mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-02-09 02:30:43 +00:00
Compare commits
9 Commits
PROWLER-96
...
review_met
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
07e75c1805 | ||
|
|
71d4a2a4f0 | ||
|
|
ac013ec6fc | ||
|
|
4ebded6ab1 | ||
|
|
770269772a | ||
|
|
ab18ddb81a | ||
|
|
cda7f89091 | ||
|
|
db7d0b3e51 | ||
|
|
89b2dad06f |
2
.env
2
.env
@@ -66,7 +66,7 @@ NEO4J_DBMS_SECURITY_PROCEDURES_ALLOWLIST=apoc.*
|
||||
NEO4J_DBMS_SECURITY_PROCEDURES_UNRESTRICTED=apoc.*
|
||||
NEO4J_DBMS_CONNECTOR_BOLT_LISTEN_ADDRESS=0.0.0.0:7687
|
||||
# Neo4j Prowler settings
|
||||
ATTACK_PATHS_FINDINGS_BATCH_SIZE=1000
|
||||
ATTACK_PATHS_BATCH_SIZE=1000
|
||||
|
||||
# Celery-Prowler task settings
|
||||
TASK_RETRY_DELAY_SECONDS=0.1
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
## [1.19.0] (Prowler UNRELEASED)
|
||||
## [1.19.0] (Prowler v5.18.0)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
@@ -18,10 +18,7 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
- Lazy-load providers and compliance data to reduce API/worker startup memory and time [(#9857)](https://github.com/prowler-cloud/prowler/pull/9857)
|
||||
- Attack Paths: Pinned Cartography to version `0.126.1`, adding AWS scans for SageMaker, CloudFront and Bedrock [(#9893)](https://github.com/prowler-cloud/prowler/issues/9893)
|
||||
- Remove unused indexes [(#9904)](https://github.com/prowler-cloud/prowler/pull/9904)
|
||||
|
||||
---
|
||||
|
||||
## [1.18.2] (Prowler UNRELEASED)
|
||||
- Attack Paths: Modified the behaviour of the Cartography scans to use the same Neo4j database per tenant, instead of individual databases per scans [(#9955)](https://github.com/prowler-cloud/prowler/pull/9955)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
from api.attack_paths.query_definitions import (
|
||||
from api.attack_paths.queries import (
|
||||
AttackPathsQueryDefinition,
|
||||
AttackPathsQueryParameterDefinition,
|
||||
get_queries_for_provider,
|
||||
get_query_by_id,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AttackPathsQueryDefinition",
|
||||
"AttackPathsQueryParameterDefinition",
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
import atexit
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from contextlib import contextmanager
|
||||
from typing import Iterator
|
||||
from uuid import UUID
|
||||
|
||||
import neo4j
|
||||
import neo4j.exceptions
|
||||
|
||||
from django.conf import settings
|
||||
|
||||
from api.attack_paths.retryable_session import RetryableSession
|
||||
from tasks.jobs.attack_paths.config import BATCH_SIZE, PROVIDER_RESOURCE_LABEL
|
||||
|
||||
# Without this Celery goes crazy with Neo4j logging
|
||||
logging.getLogger("neo4j").setLevel(logging.ERROR)
|
||||
@@ -83,7 +86,8 @@ def get_session(database: str | None = None) -> Iterator[RetryableSession]:
|
||||
yield session_wrapper
|
||||
|
||||
except neo4j.exceptions.Neo4jError as exc:
|
||||
raise GraphDatabaseQueryException(message=exc.message, code=exc.code)
|
||||
message = exc.message if exc.message is not None else str(exc)
|
||||
raise GraphDatabaseQueryException(message=message, code=exc.code)
|
||||
|
||||
finally:
|
||||
if session_wrapper is not None:
|
||||
@@ -105,24 +109,41 @@ def drop_database(database: str) -> None:
|
||||
session.run(query)
|
||||
|
||||
|
||||
def drop_subgraph(database: str, root_node_label: str, root_node_id: str) -> int:
|
||||
query = """
|
||||
MATCH (a:__ROOT_NODE_LABEL__ {id: $root_node_id})
|
||||
CALL apoc.path.subgraphNodes(a, {})
|
||||
YIELD node
|
||||
DETACH DELETE node
|
||||
RETURN COUNT(node) AS deleted_nodes_count
|
||||
""".replace("__ROOT_NODE_LABEL__", root_node_label)
|
||||
parameters = {"root_node_id": root_node_id}
|
||||
def drop_subgraph(database: str, provider_id: str) -> int:
|
||||
"""
|
||||
Delete all nodes for a provider from the tenant database.
|
||||
|
||||
with get_session(database) as session:
|
||||
result = session.run(query, parameters)
|
||||
Uses batched deletion to avoid memory issues with large graphs.
|
||||
Silently returns 0 if the database doesn't exist.
|
||||
"""
|
||||
deleted_nodes = 0
|
||||
parameters = {
|
||||
"provider_id": provider_id,
|
||||
"batch_size": BATCH_SIZE,
|
||||
}
|
||||
|
||||
try:
|
||||
return result.single()["deleted_nodes_count"]
|
||||
try:
|
||||
with get_session(database) as session:
|
||||
deleted_count = 1
|
||||
while deleted_count > 0:
|
||||
result = session.run(
|
||||
f"""
|
||||
MATCH (n:{PROVIDER_RESOURCE_LABEL} {{provider_id: $provider_id}})
|
||||
WITH n LIMIT $batch_size
|
||||
DETACH DELETE n
|
||||
RETURN COUNT(n) AS deleted_nodes_count
|
||||
""",
|
||||
parameters,
|
||||
)
|
||||
deleted_count = result.single().get("deleted_nodes_count", 0)
|
||||
deleted_nodes += deleted_count
|
||||
|
||||
except neo4j.exceptions.ResultConsumedError:
|
||||
return 0 # As there are no nodes to delete, the result is empty
|
||||
except GraphDatabaseQueryException as exc:
|
||||
if exc.code == "Neo.ClientError.Database.DatabaseNotFound":
|
||||
return 0
|
||||
raise
|
||||
|
||||
return deleted_nodes
|
||||
|
||||
|
||||
def clear_cache(database: str) -> None:
|
||||
@@ -137,12 +158,11 @@ def clear_cache(database: str) -> None:
|
||||
|
||||
|
||||
# Neo4j functions related to Prowler + Cartography
|
||||
DATABASE_NAME_TEMPLATE = "db-{attack_paths_scan_id}"
|
||||
|
||||
|
||||
def get_database_name(attack_paths_scan_id: UUID) -> str:
|
||||
attack_paths_scan_id_str = str(attack_paths_scan_id).lower()
|
||||
return DATABASE_NAME_TEMPLATE.format(attack_paths_scan_id=attack_paths_scan_id_str)
|
||||
def get_database_name(entity_id: str | UUID, temporary: bool = False) -> str:
|
||||
prefix = "tmp-scan" if temporary else "tenant"
|
||||
return f"db-{prefix}-{str(entity_id).lower()}"
|
||||
|
||||
|
||||
# Exceptions
|
||||
|
||||
16
api/src/backend/api/attack_paths/queries/__init__.py
Normal file
16
api/src/backend/api/attack_paths/queries/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from api.attack_paths.queries.types import (
|
||||
AttackPathsQueryDefinition,
|
||||
AttackPathsQueryParameterDefinition,
|
||||
)
|
||||
from api.attack_paths.queries.registry import (
|
||||
get_queries_for_provider,
|
||||
get_query_by_id,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AttackPathsQueryDefinition",
|
||||
"AttackPathsQueryParameterDefinition",
|
||||
"get_queries_for_provider",
|
||||
"get_query_by_id",
|
||||
]
|
||||
695
api/src/backend/api/attack_paths/queries/aws.py
Normal file
695
api/src/backend/api/attack_paths/queries/aws.py
Normal file
@@ -0,0 +1,695 @@
|
||||
from api.attack_paths.queries.types import (
|
||||
AttackPathsQueryDefinition,
|
||||
AttackPathsQueryParameterDefinition,
|
||||
)
|
||||
from tasks.jobs.attack_paths.config import PROWLER_FINDING_LABEL
|
||||
|
||||
|
||||
# Privilege Escalation Queries (based on pathfinding.cloud research)
|
||||
# https://github.com/DataDog/pathfinding.cloud
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
AWS_INTERNET_EXPOSED_EC2_SENSITIVE_S3_ACCESS = AttackPathsQueryDefinition(
|
||||
id="aws-internet-exposed-ec2-sensitive-s3-access",
|
||||
name="Identify internet-exposed EC2 instances with sensitive S3 access",
|
||||
description="Detect EC2 instances with SSH exposed to the internet that can assume higher-privileged roles to read tagged sensitive S3 buckets despite bucket-level public access blocks.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
CALL apoc.create.vNode(['Internet'], {{id: 'Internet', name: 'Internet'}})
|
||||
YIELD node AS internet
|
||||
|
||||
MATCH path_s3 = (aws:AWSAccount {{id: $provider_uid}})--(s3:S3Bucket)--(t:AWSTag)
|
||||
WHERE toLower(t.key) = toLower($tag_key) AND toLower(t.value) = toLower($tag_value)
|
||||
|
||||
MATCH path_ec2 = (aws)--(ec2:EC2Instance)--(sg:EC2SecurityGroup)--(ipi:IpPermissionInbound)
|
||||
WHERE ec2.exposed_internet = true
|
||||
AND ipi.toport = 22
|
||||
|
||||
MATCH path_role = (r:AWSRole)--(pol:AWSPolicy)--(stmt:AWSPolicyStatement)
|
||||
WHERE ANY(x IN stmt.resource WHERE x CONTAINS s3.name)
|
||||
AND ANY(x IN stmt.action WHERE toLower(x) =~ 's3:(listbucket|getobject).*')
|
||||
|
||||
MATCH path_assume_role = (ec2)-[p:STS_ASSUMEROLE_ALLOW*1..9]-(r:AWSRole)
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {{}}, ec2)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path_s3) + nodes(path_ec2) + nodes(path_role) + nodes(path_assume_role) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path_s3, path_ec2, path_role, path_assume_role, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[
|
||||
AttackPathsQueryParameterDefinition(
|
||||
name="tag_key",
|
||||
label="Tag key",
|
||||
description="Tag key to filter the S3 bucket, e.g. DataClassification.",
|
||||
placeholder="DataClassification",
|
||||
),
|
||||
AttackPathsQueryParameterDefinition(
|
||||
name="tag_value",
|
||||
label="Tag value",
|
||||
description="Tag value to filter the S3 bucket, e.g. Sensitive.",
|
||||
placeholder="Sensitive",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
# Basic Resource Queries
|
||||
# ----------------------
|
||||
|
||||
AWS_RDS_INSTANCES = AttackPathsQueryDefinition(
|
||||
id="aws-rds-instances",
|
||||
name="Identify provisioned RDS instances",
|
||||
description="List the selected AWS account alongside the RDS instances it owns.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(rds:RDSInstance)
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
AWS_RDS_UNENCRYPTED_STORAGE = AttackPathsQueryDefinition(
|
||||
id="aws-rds-unencrypted-storage",
|
||||
name="Identify RDS instances without storage encryption",
|
||||
description="Find RDS instances with storage encryption disabled within the selected account.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(rds:RDSInstance)
|
||||
WHERE rds.storage_encrypted = false
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
AWS_S3_ANONYMOUS_ACCESS_BUCKETS = AttackPathsQueryDefinition(
|
||||
id="aws-s3-anonymous-access-buckets",
|
||||
name="Identify S3 buckets with anonymous access",
|
||||
description="Find S3 buckets that allow anonymous access within the selected account.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(s3:S3Bucket)
|
||||
WHERE s3.anonymous_access = true
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
AWS_IAM_STATEMENTS_ALLOW_ALL_ACTIONS = AttackPathsQueryDefinition(
|
||||
id="aws-iam-statements-allow-all-actions",
|
||||
name="Identify IAM statements that allow all actions",
|
||||
description="Find IAM policy statements that allow all actions via '*' within the selected account.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(principal:AWSPrincipal)--(pol:AWSPolicy)--(stmt:AWSPolicyStatement)
|
||||
WHERE stmt.effect = 'Allow'
|
||||
AND any(x IN stmt.action WHERE x = '*')
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
AWS_IAM_STATEMENTS_ALLOW_DELETE_POLICY = AttackPathsQueryDefinition(
|
||||
id="aws-iam-statements-allow-delete-policy",
|
||||
name="Identify IAM statements that allow iam:DeletePolicy",
|
||||
description="Find IAM policy statements that allow the iam:DeletePolicy action within the selected account.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(principal:AWSPrincipal)--(pol:AWSPolicy)--(stmt:AWSPolicyStatement)
|
||||
WHERE stmt.effect = 'Allow'
|
||||
AND any(x IN stmt.action WHERE x = "iam:DeletePolicy")
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
AWS_IAM_STATEMENTS_ALLOW_CREATE_ACTIONS = AttackPathsQueryDefinition(
|
||||
id="aws-iam-statements-allow-create-actions",
|
||||
name="Identify IAM statements that allow create actions",
|
||||
description="Find IAM policy statements that allow actions containing 'create' within the selected account.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(principal:AWSPrincipal)--(pol:AWSPolicy)--(stmt:AWSPolicyStatement)
|
||||
WHERE stmt.effect = "Allow"
|
||||
AND any(x IN stmt.action WHERE toLower(x) CONTAINS "create")
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
|
||||
# Network Exposure Queries
|
||||
# ------------------------
|
||||
|
||||
AWS_EC2_INSTANCES_INTERNET_EXPOSED = AttackPathsQueryDefinition(
|
||||
id="aws-ec2-instances-internet-exposed",
|
||||
name="Identify internet-exposed EC2 instances",
|
||||
description="Find EC2 instances flagged as exposed to the internet within the selected account.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
CALL apoc.create.vNode(['Internet'], {{id: 'Internet', name: 'Internet'}})
|
||||
YIELD node AS internet
|
||||
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(ec2:EC2Instance)
|
||||
WHERE ec2.exposed_internet = true
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {{}}, ec2)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
AWS_SECURITY_GROUPS_OPEN_INTERNET_FACING = AttackPathsQueryDefinition(
|
||||
id="aws-security-groups-open-internet-facing",
|
||||
name="Identify internet-facing resources with open security groups",
|
||||
description="Find internet-facing resources associated with security groups that allow inbound access from '0.0.0.0/0'.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
CALL apoc.create.vNode(['Internet'], {{id: 'Internet', name: 'Internet'}})
|
||||
YIELD node AS internet
|
||||
|
||||
// Match EC2 instances that are internet-exposed with open security groups (0.0.0.0/0)
|
||||
MATCH path_ec2 = (aws:AWSAccount {{id: $provider_uid}})--(ec2:EC2Instance)--(sg:EC2SecurityGroup)--(ipi:IpPermissionInbound)--(ir:IpRange)
|
||||
WHERE ec2.exposed_internet = true
|
||||
AND ir.range = "0.0.0.0/0"
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {{}}, ec2)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path_ec2) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path_ec2, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
AWS_CLASSIC_ELB_INTERNET_EXPOSED = AttackPathsQueryDefinition(
|
||||
id="aws-classic-elb-internet-exposed",
|
||||
name="Identify internet-exposed Classic Load Balancers",
|
||||
description="Find Classic Load Balancers exposed to the internet along with their listeners.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
CALL apoc.create.vNode(['Internet'], {{id: 'Internet', name: 'Internet'}})
|
||||
YIELD node AS internet
|
||||
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(elb:LoadBalancer)--(listener:ELBListener)
|
||||
WHERE elb.exposed_internet = true
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {{}}, elb)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
AWS_ELBV2_INTERNET_EXPOSED = AttackPathsQueryDefinition(
|
||||
id="aws-elbv2-internet-exposed",
|
||||
name="Identify internet-exposed ELBv2 load balancers",
|
||||
description="Find ELBv2 load balancers exposed to the internet along with their listeners.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
CALL apoc.create.vNode(['Internet'], {{id: 'Internet', name: 'Internet'}})
|
||||
YIELD node AS internet
|
||||
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})--(elbv2:LoadBalancerV2)--(listener:ELBV2Listener)
|
||||
WHERE elbv2.exposed_internet = true
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {{}}, elbv2)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
AWS_PUBLIC_IP_RESOURCE_LOOKUP = AttackPathsQueryDefinition(
|
||||
id="aws-public-ip-resource-lookup",
|
||||
name="Identify resources by public IP address",
|
||||
description="Given a public IP address, find the related AWS resource and its adjacent node within the selected account.",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
CALL apoc.create.vNode(['Internet'], {{id: 'Internet', name: 'Internet'}})
|
||||
YIELD node AS internet
|
||||
|
||||
CALL () {{
|
||||
MATCH path = (aws:AWSAccount {{id: $provider_uid}})-[r]-(x:EC2PrivateIp)-[q]-(y)
|
||||
WHERE x.public_ip = $ip
|
||||
RETURN path, x
|
||||
|
||||
UNION MATCH path = (aws:AWSAccount {{id: $provider_uid}})-[r]-(x:EC2Instance)-[q]-(y)
|
||||
WHERE x.publicipaddress = $ip
|
||||
RETURN path, x
|
||||
|
||||
UNION MATCH path = (aws:AWSAccount {{id: $provider_uid}})-[r]-(x:NetworkInterface)-[q]-(y)
|
||||
WHERE x.public_ip = $ip
|
||||
RETURN path, x
|
||||
|
||||
UNION MATCH path = (aws:AWSAccount {{id: $provider_uid}})-[r]-(x:ElasticIPAddress)-[q]-(y)
|
||||
WHERE x.public_ip = $ip
|
||||
RETURN path, x
|
||||
}}
|
||||
|
||||
WITH path, x, internet
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {{}}, x)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[
|
||||
AttackPathsQueryParameterDefinition(
|
||||
name="ip",
|
||||
label="IP address",
|
||||
description="Public IP address, e.g. 192.0.2.0.",
|
||||
placeholder="192.0.2.0",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
AWS_IAM_PRIVESC_PASSROLE_EC2 = AttackPathsQueryDefinition(
|
||||
id="aws-iam-privesc-passrole-ec2",
|
||||
name="Privilege Escalation: iam:PassRole + ec2:RunInstances",
|
||||
description="Detect principals who can launch EC2 instances with privileged IAM roles attached. This allows gaining the permissions of the passed role by accessing the EC2 instance metadata service. This is a new-passrole escalation path (pathfinding.cloud: ec2-001).",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
// Create a single shared virtual EC2 instance node
|
||||
CALL apoc.create.vNode(['EC2Instance'], {{
|
||||
id: 'potential-ec2-passrole',
|
||||
name: 'New EC2 Instance',
|
||||
description: 'Attacker-controlled EC2 with privileged role'
|
||||
}})
|
||||
YIELD node AS ec2_node
|
||||
|
||||
// Create a single shared virtual escalation outcome node (styled like a finding)
|
||||
CALL apoc.create.vNode(['PrivilegeEscalation'], {{
|
||||
id: 'effective-administrator-passrole-ec2',
|
||||
check_title: 'Privilege Escalation',
|
||||
name: 'Effective Administrator',
|
||||
status: 'FAIL',
|
||||
severity: 'critical'
|
||||
}})
|
||||
YIELD node AS escalation_outcome
|
||||
|
||||
WITH ec2_node, escalation_outcome
|
||||
|
||||
// Find principals in the account
|
||||
MATCH path_principal = (aws:AWSAccount {{id: $provider_uid}})--(principal:AWSPrincipal)
|
||||
|
||||
// Find statements granting iam:PassRole
|
||||
MATCH path_passrole = (principal)--(passrole_policy:AWSPolicy)--(stmt_passrole:AWSPolicyStatement)
|
||||
WHERE stmt_passrole.effect = 'Allow'
|
||||
AND any(action IN stmt_passrole.action WHERE
|
||||
toLower(action) = 'iam:passrole'
|
||||
OR toLower(action) = 'iam:*'
|
||||
OR action = '*'
|
||||
)
|
||||
|
||||
// Find statements granting ec2:RunInstances
|
||||
MATCH path_ec2 = (principal)--(ec2_policy:AWSPolicy)--(stmt_ec2:AWSPolicyStatement)
|
||||
WHERE stmt_ec2.effect = 'Allow'
|
||||
AND any(action IN stmt_ec2.action WHERE
|
||||
toLower(action) = 'ec2:runinstances'
|
||||
OR toLower(action) = 'ec2:*'
|
||||
OR action = '*'
|
||||
)
|
||||
|
||||
// Find roles that trust EC2 service (can be passed to EC2)
|
||||
MATCH path_target = (aws)--(target_role:AWSRole)
|
||||
WHERE target_role.arn CONTAINS $provider_uid
|
||||
// Check if principal can pass this role
|
||||
AND any(resource IN stmt_passrole.resource WHERE
|
||||
resource = '*'
|
||||
OR target_role.arn CONTAINS resource
|
||||
OR resource CONTAINS target_role.name
|
||||
)
|
||||
|
||||
// Check if target role has elevated permissions (optional, for severity assessment)
|
||||
OPTIONAL MATCH (target_role)--(role_policy:AWSPolicy)--(role_stmt:AWSPolicyStatement)
|
||||
WHERE role_stmt.effect = 'Allow'
|
||||
AND (
|
||||
any(action IN role_stmt.action WHERE action = '*')
|
||||
OR any(action IN role_stmt.action WHERE toLower(action) = 'iam:*')
|
||||
)
|
||||
|
||||
CALL apoc.create.vRelationship(principal, 'CAN_LAUNCH', {{
|
||||
via: 'ec2:RunInstances + iam:PassRole'
|
||||
}}, ec2_node)
|
||||
YIELD rel AS launch_rel
|
||||
|
||||
CALL apoc.create.vRelationship(ec2_node, 'ASSUMES_ROLE', {{}}, target_role)
|
||||
YIELD rel AS assumes_rel
|
||||
|
||||
CALL apoc.create.vRelationship(target_role, 'GRANTS_ACCESS', {{
|
||||
reference: 'https://pathfinding.cloud/paths/ec2-001'
|
||||
}}, escalation_outcome)
|
||||
YIELD rel AS grants_rel
|
||||
|
||||
UNWIND nodes(path_principal) + nodes(path_passrole) + nodes(path_ec2) + nodes(path_target) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path_principal, path_passrole, path_ec2, path_target,
|
||||
ec2_node, escalation_outcome, launch_rel, assumes_rel, grants_rel,
|
||||
collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
# TODO: Add ProwlerFinding nodes
|
||||
AWS_GLUE_PRIVESC_PASSROLE_DEV_ENDPOINT = AttackPathsQueryDefinition(
|
||||
id="aws-glue-privesc-passrole-dev-endpoint",
|
||||
name="Privilege Escalation: Glue Dev Endpoint with PassRole",
|
||||
description="Detect principals that can escalate privileges by passing a role to a Glue development endpoint. The attacker creates a dev endpoint with an arbitrary role attached, then accesses those credentials through the endpoint.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
CALL apoc.create.vNode(['PrivilegeEscalation'], {
|
||||
id: 'effective-administrator-glue',
|
||||
check_title: 'Privilege Escalation',
|
||||
name: 'Effective Administrator (Glue)',
|
||||
status: 'FAIL',
|
||||
severity: 'critical'
|
||||
})
|
||||
YIELD node AS escalation_outcome
|
||||
|
||||
WITH escalation_outcome
|
||||
|
||||
// Find principals in the account
|
||||
MATCH path_principal = (aws:AWSAccount {id: $provider_uid})--(principal:AWSPrincipal)
|
||||
|
||||
// Principal can assume roles (up to 2 hops)
|
||||
OPTIONAL MATCH path_assume = (principal)-[:STS_ASSUMEROLE_ALLOW*0..2]->(acting_as:AWSRole)
|
||||
WITH escalation_outcome, principal, path_principal, path_assume,
|
||||
CASE WHEN path_assume IS NULL THEN principal ELSE acting_as END AS effective_principal
|
||||
|
||||
// Find iam:PassRole permission
|
||||
MATCH path_passrole = (effective_principal)--(passrole_policy:AWSPolicy)--(passrole_stmt:AWSPolicyStatement)
|
||||
WHERE passrole_stmt.effect = 'Allow'
|
||||
AND any(action IN passrole_stmt.action WHERE toLower(action) = 'iam:passrole' OR action = '*')
|
||||
|
||||
// Find Glue CreateDevEndpoint permission
|
||||
MATCH (effective_principal)--(glue_policy:AWSPolicy)--(glue_stmt:AWSPolicyStatement)
|
||||
WHERE glue_stmt.effect = 'Allow'
|
||||
AND any(action IN glue_stmt.action WHERE toLower(action) = 'glue:createdevendpoint' OR action = '*' OR toLower(action) = 'glue:*')
|
||||
|
||||
// Find target role with elevated permissions
|
||||
MATCH (aws)--(target_role:AWSRole)--(target_policy:AWSPolicy)--(target_stmt:AWSPolicyStatement)
|
||||
WHERE target_stmt.effect = 'Allow'
|
||||
AND (
|
||||
any(action IN target_stmt.action WHERE action = '*')
|
||||
OR any(action IN target_stmt.action WHERE toLower(action) = 'iam:*')
|
||||
)
|
||||
|
||||
// Deduplicate before creating virtual nodes
|
||||
WITH DISTINCT escalation_outcome, aws, principal, effective_principal, target_role
|
||||
|
||||
// Create virtual Glue endpoint node (one per unique principal->target pair)
|
||||
CALL apoc.create.vNode(['GlueDevEndpoint'], {
|
||||
name: 'New Dev Endpoint',
|
||||
description: 'Glue endpoint with target role attached',
|
||||
id: effective_principal.arn + '->' + target_role.arn
|
||||
})
|
||||
YIELD node AS glue_endpoint
|
||||
|
||||
CALL apoc.create.vRelationship(effective_principal, 'CREATES_ENDPOINT', {
|
||||
permissions: ['iam:PassRole', 'glue:CreateDevEndpoint'],
|
||||
technique: 'new-passrole'
|
||||
}, glue_endpoint)
|
||||
YIELD rel AS create_rel
|
||||
|
||||
CALL apoc.create.vRelationship(glue_endpoint, 'RUNS_AS', {}, target_role)
|
||||
YIELD rel AS runs_rel
|
||||
|
||||
CALL apoc.create.vRelationship(target_role, 'GRANTS_ACCESS', {
|
||||
reference: 'https://pathfinding.cloud/paths/glue-001'
|
||||
}, escalation_outcome)
|
||||
YIELD rel AS grants_rel
|
||||
|
||||
// Re-match paths for visualization
|
||||
MATCH path_principal = (aws)--(principal)
|
||||
MATCH path_target = (aws)--(target_role)
|
||||
|
||||
RETURN path_principal, path_target,
|
||||
glue_endpoint, escalation_outcome, create_rel, runs_rel, grants_rel
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
AWS_IAM_PRIVESC_ATTACH_ROLE_POLICY_ASSUME_ROLE = AttackPathsQueryDefinition(
|
||||
id="aws-iam-privesc-attach-role-policy-assume-role",
|
||||
name="Privilege Escalation: iam:AttachRolePolicy + sts:AssumeRole",
|
||||
description="Detect principals who can both attach policies to roles AND assume those roles. This two-step attack allows modifying a role's permissions then assuming it to gain elevated access. This is a principal-access escalation path (pathfinding.cloud: iam-014).",
|
||||
provider="aws",
|
||||
cypher=f"""
|
||||
// Create a virtual escalation outcome node (styled like a finding)
|
||||
CALL apoc.create.vNode(['PrivilegeEscalation'], {{
|
||||
id: 'effective-administrator',
|
||||
check_title: 'Privilege Escalation',
|
||||
name: 'Effective Administrator',
|
||||
status: 'FAIL',
|
||||
severity: 'critical'
|
||||
}})
|
||||
YIELD node AS admin_outcome
|
||||
|
||||
WITH admin_outcome
|
||||
|
||||
// Find principals in the account
|
||||
MATCH path_principal = (aws:AWSAccount {{id: $provider_uid}})--(principal:AWSPrincipal)
|
||||
|
||||
// Find statements granting iam:AttachRolePolicy
|
||||
MATCH path_attach = (principal)--(attach_policy:AWSPolicy)--(stmt_attach:AWSPolicyStatement)
|
||||
WHERE stmt_attach.effect = 'Allow'
|
||||
AND any(action IN stmt_attach.action WHERE
|
||||
toLower(action) = 'iam:attachrolepolicy'
|
||||
OR toLower(action) = 'iam:*'
|
||||
OR action = '*'
|
||||
)
|
||||
|
||||
// Find statements granting sts:AssumeRole
|
||||
MATCH path_assume = (principal)--(assume_policy:AWSPolicy)--(stmt_assume:AWSPolicyStatement)
|
||||
WHERE stmt_assume.effect = 'Allow'
|
||||
AND any(action IN stmt_assume.action WHERE
|
||||
toLower(action) = 'sts:assumerole'
|
||||
OR toLower(action) = 'sts:*'
|
||||
OR action = '*'
|
||||
)
|
||||
|
||||
// Find target roles that the principal can both modify AND assume
|
||||
MATCH path_target = (aws)--(target_role:AWSRole)
|
||||
WHERE target_role.arn CONTAINS $provider_uid
|
||||
// Can attach policy to this role
|
||||
AND any(resource IN stmt_attach.resource WHERE
|
||||
resource = '*'
|
||||
OR target_role.arn CONTAINS resource
|
||||
OR resource CONTAINS target_role.name
|
||||
)
|
||||
// Can assume this role
|
||||
AND any(resource IN stmt_assume.resource WHERE
|
||||
resource = '*'
|
||||
OR target_role.arn CONTAINS resource
|
||||
OR resource CONTAINS target_role.name
|
||||
)
|
||||
|
||||
// Deduplicate before creating virtual relationships
|
||||
WITH DISTINCT admin_outcome, aws, principal, target_role
|
||||
|
||||
// Create virtual relationships showing the attack path
|
||||
CALL apoc.create.vRelationship(principal, 'CAN_MODIFY', {{
|
||||
via: 'iam:AttachRolePolicy'
|
||||
}}, target_role)
|
||||
YIELD rel AS modify_rel
|
||||
|
||||
CALL apoc.create.vRelationship(target_role, 'LEADS_TO', {{
|
||||
technique: 'iam:AttachRolePolicy + sts:AssumeRole',
|
||||
via: 'sts:AssumeRole',
|
||||
reference: 'https://pathfinding.cloud/paths/iam-014'
|
||||
}}, admin_outcome)
|
||||
YIELD rel AS escalation_rel
|
||||
|
||||
// Re-match paths for visualization
|
||||
MATCH path_principal = (aws)--(principal)
|
||||
MATCH path_target = (aws)--(target_role)
|
||||
|
||||
UNWIND nodes(path_principal) + nodes(path_target) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:{PROWLER_FINDING_LABEL})
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path_principal, path_target,
|
||||
admin_outcome, modify_rel, escalation_rel,
|
||||
collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
# TODO: Add ProwlerFinding nodes
|
||||
AWS_BEDROCK_PRIVESC_PASSROLE_CODE_INTERPRETER = AttackPathsQueryDefinition(
|
||||
id="aws-bedrock-privesc-passrole-code-interpreter",
|
||||
name="Privilege Escalation: Bedrock Code Interpreter with PassRole",
|
||||
description="Detect principals that can escalate privileges by passing a role to a Bedrock AgentCore Code Interpreter. The attacker creates a code interpreter with an arbitrary role, then invokes it to execute code with those credentials.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
CALL apoc.create.vNode(['PrivilegeEscalation'], {
|
||||
id: 'effective-administrator-bedrock',
|
||||
check_title: 'Privilege Escalation',
|
||||
name: 'Effective Administrator (Bedrock)',
|
||||
status: 'FAIL',
|
||||
severity: 'critical'
|
||||
})
|
||||
YIELD node AS escalation_outcome
|
||||
|
||||
WITH escalation_outcome
|
||||
|
||||
// Find principals in the account
|
||||
MATCH path_principal = (aws:AWSAccount {id: $provider_uid})--(principal:AWSPrincipal)
|
||||
|
||||
// Principal can assume roles (up to 2 hops)
|
||||
OPTIONAL MATCH path_assume = (principal)-[:STS_ASSUMEROLE_ALLOW*0..2]->(acting_as:AWSRole)
|
||||
WITH escalation_outcome, aws, principal, path_principal, path_assume,
|
||||
CASE WHEN path_assume IS NULL THEN principal ELSE acting_as END AS effective_principal
|
||||
|
||||
// Find iam:PassRole permission
|
||||
MATCH path_passrole = (effective_principal)--(passrole_policy:AWSPolicy)--(passrole_stmt:AWSPolicyStatement)
|
||||
WHERE passrole_stmt.effect = 'Allow'
|
||||
AND any(action IN passrole_stmt.action WHERE toLower(action) = 'iam:passrole' OR action = '*')
|
||||
|
||||
// Find Bedrock AgentCore permissions
|
||||
MATCH (effective_principal)--(bedrock_policy:AWSPolicy)--(bedrock_stmt:AWSPolicyStatement)
|
||||
WHERE bedrock_stmt.effect = 'Allow'
|
||||
AND (
|
||||
any(action IN bedrock_stmt.action WHERE toLower(action) = 'bedrock-agentcore:createcodeinterpreter' OR action = '*' OR toLower(action) = 'bedrock-agentcore:*')
|
||||
)
|
||||
AND (
|
||||
any(action IN bedrock_stmt.action WHERE toLower(action) = 'bedrock-agentcore:startsession' OR action = '*' OR toLower(action) = 'bedrock-agentcore:*')
|
||||
)
|
||||
AND (
|
||||
any(action IN bedrock_stmt.action WHERE toLower(action) = 'bedrock-agentcore:invoke' OR action = '*' OR toLower(action) = 'bedrock-agentcore:*')
|
||||
)
|
||||
|
||||
// Find target roles with elevated permissions that could be passed
|
||||
MATCH (aws)--(target_role:AWSRole)--(target_policy:AWSPolicy)--(target_stmt:AWSPolicyStatement)
|
||||
WHERE target_stmt.effect = 'Allow'
|
||||
AND (
|
||||
any(action IN target_stmt.action WHERE action = '*')
|
||||
OR any(action IN target_stmt.action WHERE toLower(action) = 'iam:*')
|
||||
)
|
||||
|
||||
// Deduplicate per (principal, target_role) pair
|
||||
WITH DISTINCT escalation_outcome, aws, principal, target_role
|
||||
|
||||
// Group by principal, collect target_roles
|
||||
WITH escalation_outcome, aws, principal,
|
||||
collect(DISTINCT target_role) AS target_roles,
|
||||
count(DISTINCT target_role) AS target_count
|
||||
|
||||
// Create single virtual Bedrock node per principal
|
||||
CALL apoc.create.vNode(['BedrockCodeInterpreter'], {
|
||||
name: 'New Code Interpreter',
|
||||
description: toString(target_count) + ' admin role(s) can be passed',
|
||||
id: principal.arn,
|
||||
target_role_count: target_count
|
||||
})
|
||||
YIELD node AS bedrock_agent
|
||||
|
||||
// Connect from principal (not effective_principal) to keep graph connected
|
||||
CALL apoc.create.vRelationship(principal, 'CREATES_INTERPRETER', {
|
||||
permissions: ['iam:PassRole', 'bedrock-agentcore:CreateCodeInterpreter', 'bedrock-agentcore:StartSession', 'bedrock-agentcore:Invoke'],
|
||||
technique: 'new-passrole'
|
||||
}, bedrock_agent)
|
||||
YIELD rel AS create_rel
|
||||
|
||||
// UNWIND target_roles to show which roles can be passed
|
||||
UNWIND target_roles AS target_role
|
||||
|
||||
CALL apoc.create.vRelationship(bedrock_agent, 'PASSES_ROLE', {}, target_role)
|
||||
YIELD rel AS pass_rel
|
||||
|
||||
CALL apoc.create.vRelationship(target_role, 'GRANTS_ACCESS', {
|
||||
reference: 'https://pathfinding.cloud/paths/bedrock-001'
|
||||
}, escalation_outcome)
|
||||
YIELD rel AS grants_rel
|
||||
|
||||
// Re-match path for visualization
|
||||
MATCH path_principal = (aws)--(principal)
|
||||
|
||||
RETURN path_principal,
|
||||
bedrock_agent, target_role, escalation_outcome, create_rel, pass_rel, grants_rel, target_count
|
||||
""",
|
||||
parameters=[],
|
||||
)
|
||||
|
||||
|
||||
# AWS Queries List
|
||||
# ----------------
|
||||
|
||||
AWS_QUERIES: list[AttackPathsQueryDefinition] = [
|
||||
AWS_INTERNET_EXPOSED_EC2_SENSITIVE_S3_ACCESS,
|
||||
AWS_RDS_INSTANCES,
|
||||
AWS_RDS_UNENCRYPTED_STORAGE,
|
||||
AWS_S3_ANONYMOUS_ACCESS_BUCKETS,
|
||||
AWS_IAM_STATEMENTS_ALLOW_ALL_ACTIONS,
|
||||
AWS_IAM_STATEMENTS_ALLOW_DELETE_POLICY,
|
||||
AWS_IAM_STATEMENTS_ALLOW_CREATE_ACTIONS,
|
||||
AWS_EC2_INSTANCES_INTERNET_EXPOSED,
|
||||
AWS_SECURITY_GROUPS_OPEN_INTERNET_FACING,
|
||||
AWS_CLASSIC_ELB_INTERNET_EXPOSED,
|
||||
AWS_ELBV2_INTERNET_EXPOSED,
|
||||
AWS_PUBLIC_IP_RESOURCE_LOOKUP,
|
||||
AWS_IAM_PRIVESC_PASSROLE_EC2,
|
||||
AWS_GLUE_PRIVESC_PASSROLE_DEV_ENDPOINT,
|
||||
AWS_IAM_PRIVESC_ATTACH_ROLE_POLICY_ASSUME_ROLE,
|
||||
AWS_BEDROCK_PRIVESC_PASSROLE_CODE_INTERPRETER,
|
||||
]
|
||||
25
api/src/backend/api/attack_paths/queries/registry.py
Normal file
25
api/src/backend/api/attack_paths/queries/registry.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from api.attack_paths.queries.types import AttackPathsQueryDefinition
|
||||
from api.attack_paths.queries.aws import AWS_QUERIES
|
||||
|
||||
|
||||
# Query definitions organized by provider
|
||||
_QUERY_DEFINITIONS: dict[str, list[AttackPathsQueryDefinition]] = {
|
||||
"aws": AWS_QUERIES,
|
||||
}
|
||||
|
||||
# Flat lookup by query ID for O(1) access
|
||||
_QUERIES_BY_ID: dict[str, AttackPathsQueryDefinition] = {
|
||||
definition.id: definition
|
||||
for definitions in _QUERY_DEFINITIONS.values()
|
||||
for definition in definitions
|
||||
}
|
||||
|
||||
|
||||
def get_queries_for_provider(provider: str) -> list[AttackPathsQueryDefinition]:
|
||||
"""Get all attack path queries for a specific provider."""
|
||||
return _QUERY_DEFINITIONS.get(provider, [])
|
||||
|
||||
|
||||
def get_query_by_id(query_id: str) -> AttackPathsQueryDefinition | None:
|
||||
"""Get a specific attack path query by its ID."""
|
||||
return _QUERIES_BY_ID.get(query_id)
|
||||
29
api/src/backend/api/attack_paths/queries/types.py
Normal file
29
api/src/backend/api/attack_paths/queries/types.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class AttackPathsQueryParameterDefinition:
|
||||
"""
|
||||
Metadata describing a parameter that must be provided to an Attack Paths query.
|
||||
"""
|
||||
|
||||
name: str
|
||||
label: str
|
||||
data_type: str = "string"
|
||||
cast: type = str
|
||||
description: str | None = None
|
||||
placeholder: str | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class AttackPathsQueryDefinition:
|
||||
"""
|
||||
Immutable representation of an Attack Path query.
|
||||
"""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
provider: str
|
||||
cypher: str
|
||||
parameters: list[AttackPathsQueryParameterDefinition] = field(default_factory=list)
|
||||
@@ -1,690 +0,0 @@
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
# Dataclases for handling API's Attack Path query definitions and their parameters
|
||||
@dataclass
|
||||
class AttackPathsQueryParameterDefinition:
|
||||
"""
|
||||
Metadata describing a parameter that must be provided to an Attack Paths query.
|
||||
"""
|
||||
|
||||
name: str
|
||||
label: str
|
||||
data_type: str = "string"
|
||||
cast: type = str
|
||||
description: str | None = None
|
||||
placeholder: str | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class AttackPathsQueryDefinition:
|
||||
"""
|
||||
Immutable representation of an Attack Path query.
|
||||
"""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
provider: str
|
||||
cypher: str
|
||||
parameters: list[AttackPathsQueryParameterDefinition] = field(default_factory=list)
|
||||
|
||||
|
||||
# Accessor functions for API's Attack Paths query definitions
|
||||
def get_queries_for_provider(provider: str) -> list[AttackPathsQueryDefinition]:
|
||||
return _QUERY_DEFINITIONS.get(provider, [])
|
||||
|
||||
|
||||
def get_query_by_id(query_id: str) -> AttackPathsQueryDefinition | None:
|
||||
return _QUERIES_BY_ID.get(query_id)
|
||||
|
||||
|
||||
# API's Attack Paths query definitions
|
||||
_QUERY_DEFINITIONS: dict[str, list[AttackPathsQueryDefinition]] = {
|
||||
"aws": [
|
||||
# Custom query for detecting internet-exposed EC2 instances with sensitive S3 access
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-internet-exposed-ec2-sensitive-s3-access",
|
||||
name="Identify internet-exposed EC2 instances with sensitive S3 access",
|
||||
description="Detect EC2 instances with SSH exposed to the internet that can assume higher-privileged roles to read tagged sensitive S3 buckets despite bucket-level public access blocks.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
CALL apoc.create.vNode(['Internet'], {id: 'Internet', name: 'Internet'})
|
||||
YIELD node AS internet
|
||||
|
||||
MATCH path_s3 = (aws:AWSAccount {id: $provider_uid})--(s3:S3Bucket)--(t:AWSTag)
|
||||
WHERE toLower(t.key) = toLower($tag_key) AND toLower(t.value) = toLower($tag_value)
|
||||
|
||||
MATCH path_ec2 = (aws)--(ec2:EC2Instance)--(sg:EC2SecurityGroup)--(ipi:IpPermissionInbound)
|
||||
WHERE ec2.exposed_internet = true
|
||||
AND ipi.toport = 22
|
||||
|
||||
MATCH path_role = (r:AWSRole)--(pol:AWSPolicy)--(stmt:AWSPolicyStatement)
|
||||
WHERE ANY(x IN stmt.resource WHERE x CONTAINS s3.name)
|
||||
AND ANY(x IN stmt.action WHERE toLower(x) =~ 's3:(listbucket|getobject).*')
|
||||
|
||||
MATCH path_assume_role = (ec2)-[p:STS_ASSUMEROLE_ALLOW*1..9]-(r:AWSRole)
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {}, ec2)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path_s3) + nodes(path_ec2) + nodes(path_role) + nodes(path_assume_role) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path_s3, path_ec2, path_role, path_assume_role, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[
|
||||
AttackPathsQueryParameterDefinition(
|
||||
name="tag_key",
|
||||
label="Tag key",
|
||||
description="Tag key to filter the S3 bucket, e.g. DataClassification.",
|
||||
placeholder="DataClassification",
|
||||
),
|
||||
AttackPathsQueryParameterDefinition(
|
||||
name="tag_value",
|
||||
label="Tag value",
|
||||
description="Tag value to filter the S3 bucket, e.g. Sensitive.",
|
||||
placeholder="Sensitive",
|
||||
),
|
||||
],
|
||||
),
|
||||
# Regular Cartography Attack Paths queries
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-rds-instances",
|
||||
name="Identify provisioned RDS instances",
|
||||
description="List the selected AWS account alongside the RDS instances it owns.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
MATCH path = (aws:AWSAccount {id: $provider_uid})--(rds:RDSInstance)
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-rds-unencrypted-storage",
|
||||
name="Identify RDS instances without storage encryption",
|
||||
description="Find RDS instances with storage encryption disabled within the selected account.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
MATCH path = (aws:AWSAccount {id: $provider_uid})--(rds:RDSInstance)
|
||||
WHERE rds.storage_encrypted = false
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-s3-anonymous-access-buckets",
|
||||
name="Identify S3 buckets with anonymous access",
|
||||
description="Find S3 buckets that allow anonymous access within the selected account.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
MATCH path = (aws:AWSAccount {id: $provider_uid})--(s3:S3Bucket)
|
||||
WHERE s3.anonymous_access = true
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-iam-statements-allow-all-actions",
|
||||
name="Identify IAM statements that allow all actions",
|
||||
description="Find IAM policy statements that allow all actions via '*' within the selected account.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
MATCH path = (aws:AWSAccount {id: $provider_uid})--(principal:AWSPrincipal)--(pol:AWSPolicy)--(stmt:AWSPolicyStatement)
|
||||
WHERE stmt.effect = 'Allow'
|
||||
AND any(x IN stmt.action WHERE x = '*')
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-iam-statements-allow-delete-policy",
|
||||
name="Identify IAM statements that allow iam:DeletePolicy",
|
||||
description="Find IAM policy statements that allow the iam:DeletePolicy action within the selected account.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
MATCH path = (aws:AWSAccount {id: $provider_uid})--(principal:AWSPrincipal)--(pol:AWSPolicy)--(stmt:AWSPolicyStatement)
|
||||
WHERE stmt.effect = 'Allow'
|
||||
AND any(x IN stmt.action WHERE x = "iam:DeletePolicy")
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-iam-statements-allow-create-actions",
|
||||
name="Identify IAM statements that allow create actions",
|
||||
description="Find IAM policy statements that allow actions containing 'create' within the selected account.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
MATCH path = (aws:AWSAccount {id: $provider_uid})--(principal:AWSPrincipal)--(pol:AWSPolicy)--(stmt:AWSPolicyStatement)
|
||||
WHERE stmt.effect = "Allow"
|
||||
AND any(x IN stmt.action WHERE toLower(x) CONTAINS "create")
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-ec2-instances-internet-exposed",
|
||||
name="Identify internet-exposed EC2 instances",
|
||||
description="Find EC2 instances flagged as exposed to the internet within the selected account.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
CALL apoc.create.vNode(['Internet'], {id: 'Internet', name: 'Internet'})
|
||||
YIELD node AS internet
|
||||
|
||||
MATCH path = (aws:AWSAccount {id: $provider_uid})--(ec2:EC2Instance)
|
||||
WHERE ec2.exposed_internet = true
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {}, ec2)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-security-groups-open-internet-facing",
|
||||
name="Identify internet-facing resources with open security groups",
|
||||
description="Find internet-facing resources associated with security groups that allow inbound access from '0.0.0.0/0'.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
CALL apoc.create.vNode(['Internet'], {id: 'Internet', name: 'Internet'})
|
||||
YIELD node AS internet
|
||||
|
||||
// Match EC2 instances that are internet-exposed with open security groups (0.0.0.0/0)
|
||||
MATCH path_ec2 = (aws:AWSAccount {id: $provider_uid})--(ec2:EC2Instance)--(sg:EC2SecurityGroup)--(ipi:IpPermissionInbound)--(ir:IpRange)
|
||||
WHERE ec2.exposed_internet = true
|
||||
AND ir.range = "0.0.0.0/0"
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {}, ec2)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path_ec2) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path_ec2, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-classic-elb-internet-exposed",
|
||||
name="Identify internet-exposed Classic Load Balancers",
|
||||
description="Find Classic Load Balancers exposed to the internet along with their listeners.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
CALL apoc.create.vNode(['Internet'], {id: 'Internet', name: 'Internet'})
|
||||
YIELD node AS internet
|
||||
|
||||
MATCH path = (aws:AWSAccount {id: $provider_uid})--(elb:LoadBalancer)--(listener:ELBListener)
|
||||
WHERE elb.exposed_internet = true
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {}, elb)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-elbv2-internet-exposed",
|
||||
name="Identify internet-exposed ELBv2 load balancers",
|
||||
description="Find ELBv2 load balancers exposed to the internet along with their listeners.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
CALL apoc.create.vNode(['Internet'], {id: 'Internet', name: 'Internet'})
|
||||
YIELD node AS internet
|
||||
|
||||
MATCH path = (aws:AWSAccount {id: $provider_uid})--(elbv2:LoadBalancerV2)--(listener:ELBV2Listener)
|
||||
WHERE elbv2.exposed_internet = true
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {}, elbv2)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-public-ip-resource-lookup",
|
||||
name="Identify resources by public IP address",
|
||||
description="Given a public IP address, find the related AWS resource and its adjacent node within the selected account.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
CALL apoc.create.vNode(['Internet'], {id: 'Internet', name: 'Internet'})
|
||||
YIELD node AS internet
|
||||
|
||||
CALL () {
|
||||
MATCH path = (aws:AWSAccount {id: $provider_uid})-[r]-(x:EC2PrivateIp)-[q]-(y)
|
||||
WHERE x.public_ip = $ip
|
||||
RETURN path, x
|
||||
|
||||
UNION MATCH path = (aws:AWSAccount {id: $provider_uid})-[r]-(x:EC2Instance)-[q]-(y)
|
||||
WHERE x.publicipaddress = $ip
|
||||
RETURN path, x
|
||||
|
||||
UNION MATCH path = (aws:AWSAccount {id: $provider_uid})-[r]-(x:NetworkInterface)-[q]-(y)
|
||||
WHERE x.public_ip = $ip
|
||||
RETURN path, x
|
||||
|
||||
UNION MATCH path = (aws:AWSAccount {id: $provider_uid})-[r]-(x:ElasticIPAddress)-[q]-(y)
|
||||
WHERE x.public_ip = $ip
|
||||
RETURN path, x
|
||||
}
|
||||
|
||||
WITH path, x, internet
|
||||
|
||||
CALL apoc.create.vRelationship(internet, 'CAN_ACCESS', {}, x)
|
||||
YIELD rel AS can_access
|
||||
|
||||
UNWIND nodes(path) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path, collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr, internet, can_access
|
||||
""",
|
||||
parameters=[
|
||||
AttackPathsQueryParameterDefinition(
|
||||
name="ip",
|
||||
label="IP address",
|
||||
description="Public IP address, e.g. 192.0.2.0.",
|
||||
placeholder="192.0.2.0",
|
||||
),
|
||||
],
|
||||
),
|
||||
# Privilege Escalation Queries (based on pathfinding.cloud research): https://github.com/DataDog/pathfinding.cloud
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-iam-privesc-passrole-ec2",
|
||||
name="Privilege Escalation: iam:PassRole + ec2:RunInstances",
|
||||
description="Detect principals who can launch EC2 instances with privileged IAM roles attached. This allows gaining the permissions of the passed role by accessing the EC2 instance metadata service. This is a new-passrole escalation path (pathfinding.cloud: ec2-001).",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
// Create a single shared virtual EC2 instance node
|
||||
CALL apoc.create.vNode(['EC2Instance'], {
|
||||
id: 'potential-ec2-passrole',
|
||||
name: 'New EC2 Instance',
|
||||
description: 'Attacker-controlled EC2 with privileged role'
|
||||
})
|
||||
YIELD node AS ec2_node
|
||||
|
||||
// Create a single shared virtual escalation outcome node (styled like a finding)
|
||||
CALL apoc.create.vNode(['PrivilegeEscalation'], {
|
||||
id: 'effective-administrator-passrole-ec2',
|
||||
check_title: 'Privilege Escalation',
|
||||
name: 'Effective Administrator',
|
||||
status: 'FAIL',
|
||||
severity: 'critical'
|
||||
})
|
||||
YIELD node AS escalation_outcome
|
||||
|
||||
WITH ec2_node, escalation_outcome
|
||||
|
||||
// Find principals in the account
|
||||
MATCH path_principal = (aws:AWSAccount {id: $provider_uid})--(principal:AWSPrincipal)
|
||||
|
||||
// Find statements granting iam:PassRole
|
||||
MATCH path_passrole = (principal)--(passrole_policy:AWSPolicy)--(stmt_passrole:AWSPolicyStatement)
|
||||
WHERE stmt_passrole.effect = 'Allow'
|
||||
AND any(action IN stmt_passrole.action WHERE
|
||||
toLower(action) = 'iam:passrole'
|
||||
OR toLower(action) = 'iam:*'
|
||||
OR action = '*'
|
||||
)
|
||||
|
||||
// Find statements granting ec2:RunInstances
|
||||
MATCH path_ec2 = (principal)--(ec2_policy:AWSPolicy)--(stmt_ec2:AWSPolicyStatement)
|
||||
WHERE stmt_ec2.effect = 'Allow'
|
||||
AND any(action IN stmt_ec2.action WHERE
|
||||
toLower(action) = 'ec2:runinstances'
|
||||
OR toLower(action) = 'ec2:*'
|
||||
OR action = '*'
|
||||
)
|
||||
|
||||
// Find roles that trust EC2 service (can be passed to EC2)
|
||||
MATCH path_target = (aws)--(target_role:AWSRole)
|
||||
WHERE target_role.arn CONTAINS $provider_uid
|
||||
// Check if principal can pass this role
|
||||
AND any(resource IN stmt_passrole.resource WHERE
|
||||
resource = '*'
|
||||
OR target_role.arn CONTAINS resource
|
||||
OR resource CONTAINS target_role.name
|
||||
)
|
||||
|
||||
// Check if target role has elevated permissions (optional, for severity assessment)
|
||||
OPTIONAL MATCH (target_role)--(role_policy:AWSPolicy)--(role_stmt:AWSPolicyStatement)
|
||||
WHERE role_stmt.effect = 'Allow'
|
||||
AND (
|
||||
any(action IN role_stmt.action WHERE action = '*')
|
||||
OR any(action IN role_stmt.action WHERE toLower(action) = 'iam:*')
|
||||
)
|
||||
|
||||
CALL apoc.create.vRelationship(principal, 'CAN_LAUNCH', {
|
||||
via: 'ec2:RunInstances + iam:PassRole'
|
||||
}, ec2_node)
|
||||
YIELD rel AS launch_rel
|
||||
|
||||
CALL apoc.create.vRelationship(ec2_node, 'ASSUMES_ROLE', {}, target_role)
|
||||
YIELD rel AS assumes_rel
|
||||
|
||||
CALL apoc.create.vRelationship(target_role, 'GRANTS_ACCESS', {
|
||||
reference: 'https://pathfinding.cloud/paths/ec2-001'
|
||||
}, escalation_outcome)
|
||||
YIELD rel AS grants_rel
|
||||
|
||||
UNWIND nodes(path_principal) + nodes(path_passrole) + nodes(path_ec2) + nodes(path_target) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path_principal, path_passrole, path_ec2, path_target,
|
||||
ec2_node, escalation_outcome, launch_rel, assumes_rel, grants_rel,
|
||||
collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-glue-privesc-passrole-dev-endpoint",
|
||||
name="Privilege Escalation: Glue Dev Endpoint with PassRole",
|
||||
description="Detect principals that can escalate privileges by passing a role to a Glue development endpoint. The attacker creates a dev endpoint with an arbitrary role attached, then accesses those credentials through the endpoint.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
CALL apoc.create.vNode(['PrivilegeEscalation'], {
|
||||
id: 'effective-administrator-glue',
|
||||
check_title: 'Privilege Escalation',
|
||||
name: 'Effective Administrator (Glue)',
|
||||
status: 'FAIL',
|
||||
severity: 'critical'
|
||||
})
|
||||
YIELD node AS escalation_outcome
|
||||
|
||||
WITH escalation_outcome
|
||||
|
||||
// Find principals in the account
|
||||
MATCH path_principal = (aws:AWSAccount {id: $provider_uid})--(principal:AWSPrincipal)
|
||||
|
||||
// Principal can assume roles (up to 2 hops)
|
||||
OPTIONAL MATCH path_assume = (principal)-[:STS_ASSUMEROLE_ALLOW*0..2]->(acting_as:AWSRole)
|
||||
WITH escalation_outcome, principal, path_principal, path_assume,
|
||||
CASE WHEN path_assume IS NULL THEN principal ELSE acting_as END AS effective_principal
|
||||
|
||||
// Find iam:PassRole permission
|
||||
MATCH path_passrole = (effective_principal)--(passrole_policy:AWSPolicy)--(passrole_stmt:AWSPolicyStatement)
|
||||
WHERE passrole_stmt.effect = 'Allow'
|
||||
AND any(action IN passrole_stmt.action WHERE toLower(action) = 'iam:passrole' OR action = '*')
|
||||
|
||||
// Find Glue CreateDevEndpoint permission
|
||||
MATCH (effective_principal)--(glue_policy:AWSPolicy)--(glue_stmt:AWSPolicyStatement)
|
||||
WHERE glue_stmt.effect = 'Allow'
|
||||
AND any(action IN glue_stmt.action WHERE toLower(action) = 'glue:createdevendpoint' OR action = '*' OR toLower(action) = 'glue:*')
|
||||
|
||||
// Find target role with elevated permissions
|
||||
MATCH (aws)--(target_role:AWSRole)--(target_policy:AWSPolicy)--(target_stmt:AWSPolicyStatement)
|
||||
WHERE target_stmt.effect = 'Allow'
|
||||
AND (
|
||||
any(action IN target_stmt.action WHERE action = '*')
|
||||
OR any(action IN target_stmt.action WHERE toLower(action) = 'iam:*')
|
||||
)
|
||||
|
||||
// Deduplicate before creating virtual nodes
|
||||
WITH DISTINCT escalation_outcome, aws, principal, effective_principal, target_role
|
||||
|
||||
// Create virtual Glue endpoint node (one per unique principal->target pair)
|
||||
CALL apoc.create.vNode(['GlueDevEndpoint'], {
|
||||
name: 'New Dev Endpoint',
|
||||
description: 'Glue endpoint with target role attached',
|
||||
id: effective_principal.arn + '->' + target_role.arn
|
||||
})
|
||||
YIELD node AS glue_endpoint
|
||||
|
||||
CALL apoc.create.vRelationship(effective_principal, 'CREATES_ENDPOINT', {
|
||||
permissions: ['iam:PassRole', 'glue:CreateDevEndpoint'],
|
||||
technique: 'new-passrole'
|
||||
}, glue_endpoint)
|
||||
YIELD rel AS create_rel
|
||||
|
||||
CALL apoc.create.vRelationship(glue_endpoint, 'RUNS_AS', {}, target_role)
|
||||
YIELD rel AS runs_rel
|
||||
|
||||
CALL apoc.create.vRelationship(target_role, 'GRANTS_ACCESS', {
|
||||
reference: 'https://pathfinding.cloud/paths/glue-001'
|
||||
}, escalation_outcome)
|
||||
YIELD rel AS grants_rel
|
||||
|
||||
// Re-match paths for visualization
|
||||
MATCH path_principal = (aws)--(principal)
|
||||
MATCH path_target = (aws)--(target_role)
|
||||
|
||||
RETURN path_principal, path_target,
|
||||
glue_endpoint, escalation_outcome, create_rel, runs_rel, grants_rel
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-iam-privesc-attach-role-policy-assume-role",
|
||||
name="Privilege Escalation: iam:AttachRolePolicy + sts:AssumeRole",
|
||||
description="Detect principals who can both attach policies to roles AND assume those roles. This two-step attack allows modifying a role's permissions then assuming it to gain elevated access. This is a principal-access escalation path (pathfinding.cloud: iam-014).",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
// Create a virtual escalation outcome node (styled like a finding)
|
||||
CALL apoc.create.vNode(['PrivilegeEscalation'], {
|
||||
id: 'effective-administrator',
|
||||
check_title: 'Privilege Escalation',
|
||||
name: 'Effective Administrator',
|
||||
status: 'FAIL',
|
||||
severity: 'critical'
|
||||
})
|
||||
YIELD node AS admin_outcome
|
||||
|
||||
WITH admin_outcome
|
||||
|
||||
// Find principals in the account
|
||||
MATCH path_principal = (aws:AWSAccount {id: $provider_uid})--(principal:AWSPrincipal)
|
||||
|
||||
// Find statements granting iam:AttachRolePolicy
|
||||
MATCH path_attach = (principal)--(attach_policy:AWSPolicy)--(stmt_attach:AWSPolicyStatement)
|
||||
WHERE stmt_attach.effect = 'Allow'
|
||||
AND any(action IN stmt_attach.action WHERE
|
||||
toLower(action) = 'iam:attachrolepolicy'
|
||||
OR toLower(action) = 'iam:*'
|
||||
OR action = '*'
|
||||
)
|
||||
|
||||
// Find statements granting sts:AssumeRole
|
||||
MATCH path_assume = (principal)--(assume_policy:AWSPolicy)--(stmt_assume:AWSPolicyStatement)
|
||||
WHERE stmt_assume.effect = 'Allow'
|
||||
AND any(action IN stmt_assume.action WHERE
|
||||
toLower(action) = 'sts:assumerole'
|
||||
OR toLower(action) = 'sts:*'
|
||||
OR action = '*'
|
||||
)
|
||||
|
||||
// Find target roles that the principal can both modify AND assume
|
||||
MATCH path_target = (aws)--(target_role:AWSRole)
|
||||
WHERE target_role.arn CONTAINS $provider_uid
|
||||
// Can attach policy to this role
|
||||
AND any(resource IN stmt_attach.resource WHERE
|
||||
resource = '*'
|
||||
OR target_role.arn CONTAINS resource
|
||||
OR resource CONTAINS target_role.name
|
||||
)
|
||||
// Can assume this role
|
||||
AND any(resource IN stmt_assume.resource WHERE
|
||||
resource = '*'
|
||||
OR target_role.arn CONTAINS resource
|
||||
OR resource CONTAINS target_role.name
|
||||
)
|
||||
|
||||
// Deduplicate before creating virtual relationships
|
||||
WITH DISTINCT admin_outcome, aws, principal, target_role
|
||||
|
||||
// Create virtual relationships showing the attack path
|
||||
CALL apoc.create.vRelationship(principal, 'CAN_MODIFY', {
|
||||
via: 'iam:AttachRolePolicy'
|
||||
}, target_role)
|
||||
YIELD rel AS modify_rel
|
||||
|
||||
CALL apoc.create.vRelationship(target_role, 'LEADS_TO', {
|
||||
technique: 'iam:AttachRolePolicy + sts:AssumeRole',
|
||||
via: 'sts:AssumeRole',
|
||||
reference: 'https://pathfinding.cloud/paths/iam-014'
|
||||
}, admin_outcome)
|
||||
YIELD rel AS escalation_rel
|
||||
|
||||
// Re-match paths for visualization
|
||||
MATCH path_principal = (aws)--(principal)
|
||||
MATCH path_target = (aws)--(target_role)
|
||||
|
||||
UNWIND nodes(path_principal) + nodes(path_target) as n
|
||||
OPTIONAL MATCH (n)-[pfr]-(pf:ProwlerFinding)
|
||||
WHERE pf.status = 'FAIL'
|
||||
|
||||
RETURN path_principal, path_target,
|
||||
admin_outcome, modify_rel, escalation_rel,
|
||||
collect(DISTINCT pf) as dpf, collect(DISTINCT pfr) as dpfr
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
AttackPathsQueryDefinition(
|
||||
id="aws-bedrock-privesc-passrole-code-interpreter",
|
||||
name="Privilege Escalation: Bedrock Code Interpreter with PassRole",
|
||||
description="Detect principals that can escalate privileges by passing a role to a Bedrock AgentCore Code Interpreter. The attacker creates a code interpreter with an arbitrary role, then invokes it to execute code with those credentials.",
|
||||
provider="aws",
|
||||
cypher="""
|
||||
CALL apoc.create.vNode(['PrivilegeEscalation'], {
|
||||
id: 'effective-administrator-bedrock',
|
||||
check_title: 'Privilege Escalation',
|
||||
name: 'Effective Administrator (Bedrock)',
|
||||
status: 'FAIL',
|
||||
severity: 'critical'
|
||||
})
|
||||
YIELD node AS escalation_outcome
|
||||
|
||||
WITH escalation_outcome
|
||||
|
||||
// Find principals in the account
|
||||
MATCH path_principal = (aws:AWSAccount {id: $provider_uid})--(principal:AWSPrincipal)
|
||||
|
||||
// Principal can assume roles (up to 2 hops)
|
||||
OPTIONAL MATCH path_assume = (principal)-[:STS_ASSUMEROLE_ALLOW*0..2]->(acting_as:AWSRole)
|
||||
WITH escalation_outcome, aws, principal, path_principal, path_assume,
|
||||
CASE WHEN path_assume IS NULL THEN principal ELSE acting_as END AS effective_principal
|
||||
|
||||
// Find iam:PassRole permission
|
||||
MATCH path_passrole = (effective_principal)--(passrole_policy:AWSPolicy)--(passrole_stmt:AWSPolicyStatement)
|
||||
WHERE passrole_stmt.effect = 'Allow'
|
||||
AND any(action IN passrole_stmt.action WHERE toLower(action) = 'iam:passrole' OR action = '*')
|
||||
|
||||
// Find Bedrock AgentCore permissions
|
||||
MATCH (effective_principal)--(bedrock_policy:AWSPolicy)--(bedrock_stmt:AWSPolicyStatement)
|
||||
WHERE bedrock_stmt.effect = 'Allow'
|
||||
AND (
|
||||
any(action IN bedrock_stmt.action WHERE toLower(action) = 'bedrock-agentcore:createcodeinterpreter' OR action = '*' OR toLower(action) = 'bedrock-agentcore:*')
|
||||
)
|
||||
AND (
|
||||
any(action IN bedrock_stmt.action WHERE toLower(action) = 'bedrock-agentcore:startsession' OR action = '*' OR toLower(action) = 'bedrock-agentcore:*')
|
||||
)
|
||||
AND (
|
||||
any(action IN bedrock_stmt.action WHERE toLower(action) = 'bedrock-agentcore:invoke' OR action = '*' OR toLower(action) = 'bedrock-agentcore:*')
|
||||
)
|
||||
|
||||
// Find target roles with elevated permissions that could be passed
|
||||
MATCH (aws)--(target_role:AWSRole)--(target_policy:AWSPolicy)--(target_stmt:AWSPolicyStatement)
|
||||
WHERE target_stmt.effect = 'Allow'
|
||||
AND (
|
||||
any(action IN target_stmt.action WHERE action = '*')
|
||||
OR any(action IN target_stmt.action WHERE toLower(action) = 'iam:*')
|
||||
)
|
||||
|
||||
// Deduplicate per (principal, target_role) pair
|
||||
WITH DISTINCT escalation_outcome, aws, principal, target_role
|
||||
|
||||
// Group by principal, collect target_roles
|
||||
WITH escalation_outcome, aws, principal,
|
||||
collect(DISTINCT target_role) AS target_roles,
|
||||
count(DISTINCT target_role) AS target_count
|
||||
|
||||
// Create single virtual Bedrock node per principal
|
||||
CALL apoc.create.vNode(['BedrockCodeInterpreter'], {
|
||||
name: 'New Code Interpreter',
|
||||
description: toString(target_count) + ' admin role(s) can be passed',
|
||||
id: principal.arn,
|
||||
target_role_count: target_count
|
||||
})
|
||||
YIELD node AS bedrock_agent
|
||||
|
||||
// Connect from principal (not effective_principal) to keep graph connected
|
||||
CALL apoc.create.vRelationship(principal, 'CREATES_INTERPRETER', {
|
||||
permissions: ['iam:PassRole', 'bedrock-agentcore:CreateCodeInterpreter', 'bedrock-agentcore:StartSession', 'bedrock-agentcore:Invoke'],
|
||||
technique: 'new-passrole'
|
||||
}, bedrock_agent)
|
||||
YIELD rel AS create_rel
|
||||
|
||||
// UNWIND target_roles to show which roles can be passed
|
||||
UNWIND target_roles AS target_role
|
||||
|
||||
CALL apoc.create.vRelationship(bedrock_agent, 'PASSES_ROLE', {}, target_role)
|
||||
YIELD rel AS pass_rel
|
||||
|
||||
CALL apoc.create.vRelationship(target_role, 'GRANTS_ACCESS', {
|
||||
reference: 'https://pathfinding.cloud/paths/bedrock-001'
|
||||
}, escalation_outcome)
|
||||
YIELD rel AS grants_rel
|
||||
|
||||
// Re-match path for visualization
|
||||
MATCH path_principal = (aws)--(principal)
|
||||
|
||||
RETURN path_principal,
|
||||
bedrock_agent, target_role, escalation_outcome, create_rel, pass_rel, grants_rel, target_count
|
||||
""",
|
||||
parameters=[],
|
||||
),
|
||||
],
|
||||
}
|
||||
|
||||
_QUERIES_BY_ID: dict[str, AttackPathsQueryDefinition] = {
|
||||
definition.id: definition
|
||||
for definitions in _QUERY_DEFINITIONS.values()
|
||||
for definition in definitions
|
||||
}
|
||||
@@ -1,12 +1,13 @@
|
||||
import logging
|
||||
|
||||
from typing import Any
|
||||
from typing import Any, Iterable
|
||||
|
||||
from rest_framework.exceptions import APIException, ValidationError
|
||||
|
||||
from api.attack_paths import database as graph_database, AttackPathsQueryDefinition
|
||||
from api.models import AttackPathsScan
|
||||
from config.custom_logging import BackendLogger
|
||||
from tasks.jobs.attack_paths.config import INTERNAL_LABELS
|
||||
|
||||
logger = logging.getLogger(BackendLogger.API)
|
||||
|
||||
@@ -101,7 +102,7 @@ def _serialize_graph(graph):
|
||||
nodes.append(
|
||||
{
|
||||
"id": node.element_id,
|
||||
"labels": list(node.labels),
|
||||
"labels": _filter_labels(node.labels),
|
||||
"properties": _serialize_properties(node._properties),
|
||||
},
|
||||
)
|
||||
@@ -124,6 +125,10 @@ def _serialize_graph(graph):
|
||||
}
|
||||
|
||||
|
||||
def _filter_labels(labels: Iterable[str]) -> list[str]:
|
||||
return [label for label in labels if label not in INTERNAL_LABELS]
|
||||
|
||||
|
||||
def _serialize_properties(properties: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Convert Neo4j property values into JSON-serializable primitives."""
|
||||
|
||||
|
||||
@@ -2287,7 +2287,7 @@ class TaskViewSet(BaseRLSViewSet):
|
||||
),
|
||||
attack_paths_queries=extend_schema(
|
||||
tags=["Attack Paths"],
|
||||
summary="List attack paths queries",
|
||||
summary="List Attack Paths queries",
|
||||
description="Retrieve the catalog of Attack Paths queries available for this Attack Paths scan.",
|
||||
responses={
|
||||
200: OpenApiResponse(AttackPathsQuerySerializer(many=True)),
|
||||
@@ -2307,7 +2307,7 @@ class TaskViewSet(BaseRLSViewSet):
|
||||
description="Bad request (e.g., Unknown Attack Paths query for the selected provider)"
|
||||
),
|
||||
404: OpenApiResponse(
|
||||
description="No attack paths found for the given query and parameters"
|
||||
description="No Attack Paths found for the given query and parameters"
|
||||
),
|
||||
500: OpenApiResponse(
|
||||
description="Attack Paths query execution failed due to a database error"
|
||||
|
||||
86
api/src/backend/tasks/jobs/attack_paths/config.py
Normal file
86
api/src/backend/tasks/jobs/attack_paths/config.py
Normal file
@@ -0,0 +1,86 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable
|
||||
|
||||
from config.env import env
|
||||
|
||||
from tasks.jobs.attack_paths import aws
|
||||
|
||||
|
||||
# Batch size for Neo4j operations
|
||||
BATCH_SIZE = env.int("ATTACK_PATHS_BATCH_SIZE", 1000)
|
||||
|
||||
# Neo4j internal labels (Prowler-specific, not provider-specific)
|
||||
# - `ProwlerFinding`: Label for finding nodes created by Prowler and linked to cloud resources.
|
||||
# - `ProviderResource`: Added to ALL synced nodes for provider isolation and drop/query ops.
|
||||
PROWLER_FINDING_LABEL = "ProwlerFinding"
|
||||
PROVIDER_RESOURCE_LABEL = "ProviderResource"
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ProviderConfig:
|
||||
"""Configuration for a cloud provider's Attack Paths integration."""
|
||||
|
||||
name: str
|
||||
root_node_label: str # e.g., "AWSAccount"
|
||||
uid_field: str # e.g., "arn"
|
||||
# Label for resources connected to the account node, enabling indexed finding lookups.
|
||||
resource_label: str # e.g., "AWSResource"
|
||||
ingestion_function: Callable
|
||||
|
||||
|
||||
# Provider Configurations
|
||||
# -----------------------
|
||||
|
||||
AWS_CONFIG = ProviderConfig(
|
||||
name="aws",
|
||||
root_node_label="AWSAccount",
|
||||
uid_field="arn",
|
||||
resource_label="AWSResource",
|
||||
ingestion_function=aws.start_aws_ingestion,
|
||||
)
|
||||
|
||||
PROVIDER_CONFIGS: dict[str, ProviderConfig] = {
|
||||
"aws": AWS_CONFIG,
|
||||
}
|
||||
|
||||
# Labels added by Prowler that should be filtered from API responses
|
||||
# Derived from provider configs + common internal labels
|
||||
INTERNAL_LABELS: list[str] = [
|
||||
"Tenant",
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
# Add all provider-specific resource labels
|
||||
*[config.resource_label for config in PROVIDER_CONFIGS.values()],
|
||||
]
|
||||
|
||||
|
||||
# Provider Config Accessors
|
||||
# -------------------------
|
||||
|
||||
|
||||
def is_provider_available(provider_type: str) -> bool:
|
||||
"""Check if a provider type is available for Attack Paths scans."""
|
||||
return provider_type in PROVIDER_CONFIGS
|
||||
|
||||
|
||||
def get_cartography_ingestion_function(provider_type: str) -> Callable | None:
|
||||
"""Get the Cartography ingestion function for a provider type."""
|
||||
config = PROVIDER_CONFIGS.get(provider_type)
|
||||
return config.ingestion_function if config else None
|
||||
|
||||
|
||||
def get_root_node_label(provider_type: str) -> str:
|
||||
"""Get the root node label for a provider type (e.g., AWSAccount)."""
|
||||
config = PROVIDER_CONFIGS.get(provider_type)
|
||||
return config.root_node_label if config else "UnknownProviderAccount"
|
||||
|
||||
|
||||
def get_node_uid_field(provider_type: str) -> str:
|
||||
"""Get the UID field for a provider type (e.g., arn for AWS)."""
|
||||
config = PROVIDER_CONFIGS.get(provider_type)
|
||||
return config.uid_field if config else "UnknownProviderUID"
|
||||
|
||||
|
||||
def get_provider_resource_label(provider_type: str) -> str:
|
||||
"""Get the resource label for a provider type (e.g., `AWSResource`)."""
|
||||
config = PROVIDER_CONFIGS.get(provider_type)
|
||||
return config.resource_label if config else "UnknownProviderResource"
|
||||
@@ -1,7 +1,6 @@
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
|
||||
from django.db.models import Q
|
||||
from cartography.config import Config as CartographyConfig
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
@@ -10,7 +9,7 @@ from api.models import (
|
||||
Provider as ProwlerAPIProvider,
|
||||
StateChoices,
|
||||
)
|
||||
from tasks.jobs.attack_paths.providers import is_provider_available
|
||||
from tasks.jobs.attack_paths.config import is_provider_available
|
||||
|
||||
|
||||
def can_provider_run_attack_paths_scan(tenant_id: str, provider_id: int) -> bool:
|
||||
@@ -145,24 +144,3 @@ def update_old_attack_paths_scan(
|
||||
with rls_transaction(old_attack_paths_scan.tenant_id):
|
||||
old_attack_paths_scan.is_graph_database_deleted = True
|
||||
old_attack_paths_scan.save(update_fields=["is_graph_database_deleted"])
|
||||
|
||||
|
||||
def get_provider_graph_database_names(tenant_id: str, provider_id: str) -> list[str]:
|
||||
"""
|
||||
Return existing graph database names for a tenant/provider.
|
||||
|
||||
Note: For accesing the `AttackPathsScan` we need to use `all_objects` manager because the provider is soft-deleted.
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
graph_databases_names_qs = (
|
||||
ProwlerAPIAttackPathsScan.all_objects.filter(
|
||||
~Q(graph_database=""),
|
||||
graph_database__isnull=False,
|
||||
provider_id=provider_id,
|
||||
is_graph_database_deleted=False,
|
||||
)
|
||||
.values_list("graph_database", flat=True)
|
||||
.distinct()
|
||||
)
|
||||
|
||||
return list(graph_databases_names_qs)
|
||||
|
||||
355
api/src/backend/tasks/jobs/attack_paths/findings.py
Normal file
355
api/src/backend/tasks/jobs/attack_paths/findings.py
Normal file
@@ -0,0 +1,355 @@
|
||||
"""
|
||||
Prowler findings ingestion into Neo4j graph.
|
||||
|
||||
This module handles:
|
||||
- Adding resource labels to Cartography nodes for efficient lookups
|
||||
- Loading Prowler findings into the graph
|
||||
- Linking findings to resources
|
||||
- Cleaning up stale findings
|
||||
"""
|
||||
|
||||
from collections import defaultdict
|
||||
from dataclasses import asdict, dataclass, fields
|
||||
from typing import Any, Generator
|
||||
from uuid import UUID
|
||||
|
||||
import neo4j
|
||||
|
||||
from cartography.config import Config as CartographyConfig
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Finding as FindingModel
|
||||
from api.models import Provider, ResourceFindingMapping
|
||||
from prowler.config import config as ProwlerConfig
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
BATCH_SIZE,
|
||||
get_node_uid_field,
|
||||
get_provider_resource_label,
|
||||
get_root_node_label,
|
||||
)
|
||||
from tasks.jobs.attack_paths.indexes import IndexType, create_indexes
|
||||
from tasks.jobs.attack_paths.queries import (
|
||||
ADD_RESOURCE_LABEL_TEMPLATE,
|
||||
CLEANUP_FINDINGS_TEMPLATE,
|
||||
INSERT_FINDING_TEMPLATE,
|
||||
render_cypher_template,
|
||||
)
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
# Type Definitions
|
||||
# -----------------
|
||||
|
||||
# Maps dataclass field names to Django ORM query field names
|
||||
_DB_FIELD_MAP: dict[str, str] = {
|
||||
"check_title": "check_metadata__checktitle",
|
||||
}
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class Finding:
|
||||
"""
|
||||
Finding data for Neo4j ingestion.
|
||||
|
||||
Can be created from a Django .values() query result using from_db_record().
|
||||
"""
|
||||
|
||||
id: str
|
||||
uid: str
|
||||
inserted_at: str
|
||||
updated_at: str
|
||||
first_seen_at: str
|
||||
scan_id: str
|
||||
delta: str
|
||||
status: str
|
||||
status_extended: str
|
||||
severity: str
|
||||
check_id: str
|
||||
check_title: str
|
||||
muted: bool
|
||||
muted_reason: str | None
|
||||
resource_uid: str | None = None
|
||||
|
||||
@classmethod
|
||||
def get_db_query_fields(cls) -> tuple[str, ...]:
|
||||
"""Get field names for Django .values() query."""
|
||||
return tuple(
|
||||
_DB_FIELD_MAP.get(f.name, f.name)
|
||||
for f in fields(cls)
|
||||
if f.name != "resource_uid"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_db_record(cls, record: dict[str, Any], resource_uid: str) -> "Finding":
|
||||
"""Create a Finding from a Django .values() query result."""
|
||||
return cls(
|
||||
id=str(record["id"]),
|
||||
uid=record["uid"],
|
||||
inserted_at=record["inserted_at"],
|
||||
updated_at=record["updated_at"],
|
||||
first_seen_at=record["first_seen_at"],
|
||||
scan_id=str(record["scan_id"]),
|
||||
delta=record["delta"],
|
||||
status=record["status"],
|
||||
status_extended=record["status_extended"],
|
||||
severity=record["severity"],
|
||||
check_id=str(record["check_id"]),
|
||||
check_title=record["check_metadata__checktitle"],
|
||||
muted=record["muted"],
|
||||
muted_reason=record["muted_reason"],
|
||||
resource_uid=resource_uid,
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert to dict for Neo4j ingestion."""
|
||||
return asdict(self)
|
||||
|
||||
|
||||
# Public API
|
||||
# ----------
|
||||
|
||||
|
||||
def create_findings_indexes(neo4j_session: neo4j.Session) -> None:
|
||||
"""Create indexes for Prowler findings and resource lookups."""
|
||||
create_indexes(neo4j_session, IndexType.FINDINGS)
|
||||
|
||||
|
||||
def analysis(
|
||||
neo4j_session: neo4j.Session,
|
||||
prowler_api_provider: Provider,
|
||||
scan_id: str,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
"""
|
||||
Main entry point for Prowler findings analysis.
|
||||
|
||||
Adds resource labels, loads findings, and cleans up stale data.
|
||||
"""
|
||||
add_resource_label(
|
||||
neo4j_session, prowler_api_provider.provider, str(prowler_api_provider.uid)
|
||||
)
|
||||
findings_data = stream_findings_with_resources(prowler_api_provider, scan_id)
|
||||
load_findings(neo4j_session, findings_data, prowler_api_provider, config)
|
||||
cleanup_findings(neo4j_session, prowler_api_provider, config)
|
||||
|
||||
|
||||
def add_resource_label(
|
||||
neo4j_session: neo4j.Session, provider_type: str, provider_uid: str
|
||||
) -> int:
|
||||
"""
|
||||
Add a common resource label to all nodes connected to the provider account.
|
||||
|
||||
This enables index usage for resource lookups in the findings query,
|
||||
since Cartography nodes don't have a common parent label.
|
||||
|
||||
Returns the total number of nodes labeled.
|
||||
"""
|
||||
query = render_cypher_template(
|
||||
ADD_RESOURCE_LABEL_TEMPLATE,
|
||||
{
|
||||
"__ROOT_LABEL__": get_root_node_label(provider_type),
|
||||
"__RESOURCE_LABEL__": get_provider_resource_label(provider_type),
|
||||
},
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Adding {get_provider_resource_label(provider_type)} label to all resources for {provider_uid}"
|
||||
)
|
||||
|
||||
total_labeled = 0
|
||||
labeled_count = 1
|
||||
|
||||
while labeled_count > 0:
|
||||
result = neo4j_session.run(
|
||||
query,
|
||||
{"provider_uid": provider_uid, "batch_size": BATCH_SIZE},
|
||||
)
|
||||
labeled_count = result.single().get("labeled_count", 0)
|
||||
total_labeled += labeled_count
|
||||
|
||||
if labeled_count > 0:
|
||||
logger.info(
|
||||
f"Labeled {total_labeled} nodes with {get_provider_resource_label(provider_type)}"
|
||||
)
|
||||
|
||||
return total_labeled
|
||||
|
||||
|
||||
def load_findings(
|
||||
neo4j_session: neo4j.Session,
|
||||
findings_batches: Generator[list[Finding], None, None],
|
||||
prowler_api_provider: Provider,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
"""Load Prowler findings into the graph, linking them to resources."""
|
||||
query = render_cypher_template(
|
||||
INSERT_FINDING_TEMPLATE,
|
||||
{
|
||||
"__ROOT_NODE_LABEL__": get_root_node_label(prowler_api_provider.provider),
|
||||
"__NODE_UID_FIELD__": get_node_uid_field(prowler_api_provider.provider),
|
||||
"__RESOURCE_LABEL__": get_provider_resource_label(
|
||||
prowler_api_provider.provider
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
parameters = {
|
||||
"provider_uid": str(prowler_api_provider.uid),
|
||||
"last_updated": config.update_tag,
|
||||
"prowler_version": ProwlerConfig.prowler_version,
|
||||
}
|
||||
|
||||
batch_num = 0
|
||||
total_records = 0
|
||||
for batch in findings_batches:
|
||||
batch_num += 1
|
||||
batch_size = len(batch)
|
||||
total_records += batch_size
|
||||
|
||||
parameters["findings_data"] = [f.to_dict() for f in batch]
|
||||
|
||||
logger.info(f"Loading findings batch {batch_num} ({batch_size} records)")
|
||||
neo4j_session.run(query, parameters)
|
||||
|
||||
logger.info(f"Finished loading {total_records} records in {batch_num} batches")
|
||||
|
||||
|
||||
def cleanup_findings(
|
||||
neo4j_session: neo4j.Session,
|
||||
prowler_api_provider: Provider,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
"""Remove stale findings (classic Cartography behaviour)."""
|
||||
parameters = {
|
||||
"provider_uid": str(prowler_api_provider.uid),
|
||||
"last_updated": config.update_tag,
|
||||
"batch_size": BATCH_SIZE,
|
||||
}
|
||||
|
||||
batch = 1
|
||||
deleted_count = 1
|
||||
while deleted_count > 0:
|
||||
logger.info(f"Cleaning findings batch {batch}")
|
||||
|
||||
result = neo4j_session.run(CLEANUP_FINDINGS_TEMPLATE, parameters)
|
||||
|
||||
deleted_count = result.single().get("deleted_findings_count", 0)
|
||||
batch += 1
|
||||
|
||||
|
||||
# Findings Streaming (Generator-based)
|
||||
# -------------------------------------
|
||||
|
||||
|
||||
def stream_findings_with_resources(
|
||||
prowler_api_provider: Provider,
|
||||
scan_id: str,
|
||||
) -> Generator[list[Finding], None, None]:
|
||||
"""
|
||||
Stream findings with their associated resources in batches.
|
||||
|
||||
Uses keyset pagination for efficient traversal of large datasets.
|
||||
Memory efficient: yields one batch at a time, never holds all findings in memory.
|
||||
"""
|
||||
logger.info(
|
||||
f"Starting findings stream for scan {scan_id} "
|
||||
f"(tenant {prowler_api_provider.tenant_id}) with batch size {BATCH_SIZE}"
|
||||
)
|
||||
|
||||
tenant_id = prowler_api_provider.tenant_id
|
||||
for batch in _paginate_findings(tenant_id, scan_id):
|
||||
enriched = _enrich_batch_with_resources(batch, tenant_id)
|
||||
if enriched:
|
||||
yield enriched
|
||||
|
||||
logger.info(f"Finished streaming findings for scan {scan_id}")
|
||||
|
||||
|
||||
def _paginate_findings(
|
||||
tenant_id: str,
|
||||
scan_id: str,
|
||||
) -> Generator[list[dict[str, Any]], None, None]:
|
||||
"""
|
||||
Paginate through findings using keyset pagination.
|
||||
|
||||
Each iteration fetches one batch within its own RLS transaction,
|
||||
preventing long-held database connections.
|
||||
"""
|
||||
last_id = None
|
||||
iteration = 0
|
||||
|
||||
while True:
|
||||
iteration += 1
|
||||
batch = _fetch_findings_batch(tenant_id, scan_id, last_id)
|
||||
|
||||
logger.info(f"Iteration #{iteration}: fetched {len(batch)} findings")
|
||||
|
||||
if not batch:
|
||||
break
|
||||
|
||||
last_id = batch[-1]["id"]
|
||||
yield batch
|
||||
|
||||
|
||||
def _fetch_findings_batch(
|
||||
tenant_id: str,
|
||||
scan_id: str,
|
||||
after_id: UUID | None,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Fetch a single batch of findings from the database.
|
||||
|
||||
Uses read replica and RLS-scoped transaction.
|
||||
"""
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
# Use all_objects to avoid the ActiveProviderManager's implicit JOIN
|
||||
# through Scan -> Provider (to check is_deleted=False).
|
||||
# The provider is already validated as active in this context.
|
||||
qs = FindingModel.all_objects.filter(scan_id=scan_id).order_by("id")
|
||||
|
||||
if after_id is not None:
|
||||
qs = qs.filter(id__gt=after_id)
|
||||
|
||||
return list(qs.values(*Finding.get_db_query_fields())[:BATCH_SIZE])
|
||||
|
||||
|
||||
# Batch Enrichment
|
||||
# -----------------
|
||||
|
||||
|
||||
def _enrich_batch_with_resources(
|
||||
findings_batch: list[dict[str, Any]],
|
||||
tenant_id: str,
|
||||
) -> list[Finding]:
|
||||
"""
|
||||
Enrich findings with their resource UIDs.
|
||||
|
||||
One finding with N resources becomes N output records.
|
||||
Findings without resources are skipped.
|
||||
"""
|
||||
finding_ids = [f["id"] for f in findings_batch]
|
||||
resource_map = _build_finding_resource_map(finding_ids, tenant_id)
|
||||
|
||||
return [
|
||||
Finding.from_db_record(finding, resource_uid)
|
||||
for finding in findings_batch
|
||||
for resource_uid in resource_map.get(finding["id"], [])
|
||||
]
|
||||
|
||||
|
||||
def _build_finding_resource_map(
|
||||
finding_ids: list[UUID], tenant_id: str
|
||||
) -> dict[UUID, list[str]]:
|
||||
"""Build mapping from finding_id to list of resource UIDs."""
|
||||
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
|
||||
resource_mappings = ResourceFindingMapping.objects.filter(
|
||||
finding_id__in=finding_ids
|
||||
).values_list("finding_id", "resource__uid")
|
||||
|
||||
result = defaultdict(list)
|
||||
for finding_id, resource_uid in resource_mappings:
|
||||
result[finding_id].append(resource_uid)
|
||||
return result
|
||||
64
api/src/backend/tasks/jobs/attack_paths/indexes.py
Normal file
64
api/src/backend/tasks/jobs/attack_paths/indexes.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from enum import Enum
|
||||
|
||||
import neo4j
|
||||
|
||||
from cartography.client.core.tx import run_write_query
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
PROWLER_FINDING_LABEL,
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
)
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
class IndexType(Enum):
|
||||
"""Types of indexes that can be created."""
|
||||
|
||||
FINDINGS = "findings"
|
||||
SYNC = "sync"
|
||||
|
||||
|
||||
# Indexes for Prowler findings and resource lookups
|
||||
FINDINGS_INDEX_STATEMENTS = [
|
||||
# Resources indexes for quick Prowler Finding lookups
|
||||
"CREATE INDEX aws_resource_arn IF NOT EXISTS FOR (n:AWSResource) ON (n.arn);",
|
||||
"CREATE INDEX aws_resource_id IF NOT EXISTS FOR (n:AWSResource) ON (n.id);",
|
||||
# Prowler Finding indexes
|
||||
f"CREATE INDEX prowler_finding_id IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.id);",
|
||||
f"CREATE INDEX prowler_finding_provider_uid IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.provider_uid);",
|
||||
f"CREATE INDEX prowler_finding_lastupdated IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.lastupdated);",
|
||||
f"CREATE INDEX prowler_finding_status IF NOT EXISTS FOR (n:{PROWLER_FINDING_LABEL}) ON (n.status);",
|
||||
]
|
||||
|
||||
# Indexes for provider resource sync operations
|
||||
SYNC_INDEX_STATEMENTS = [
|
||||
f"CREATE INDEX provider_element_id IF NOT EXISTS FOR (n:{PROVIDER_RESOURCE_LABEL}) ON (n.provider_element_id);",
|
||||
f"CREATE INDEX provider_resource_provider_id IF NOT EXISTS FOR (n:{PROVIDER_RESOURCE_LABEL}) ON (n.provider_id);",
|
||||
]
|
||||
|
||||
|
||||
def create_indexes(neo4j_session: neo4j.Session, index_type: IndexType) -> None:
|
||||
"""
|
||||
Create indexes for the specified type.
|
||||
|
||||
Args:
|
||||
`neo4j_session`: The Neo4j session to use
|
||||
`index_type`: The type of indexes to create (FINDINGS or SYNC)
|
||||
"""
|
||||
if index_type == IndexType.FINDINGS:
|
||||
logger.info("Creating indexes for Prowler Findings node types")
|
||||
for statement in FINDINGS_INDEX_STATEMENTS:
|
||||
run_write_query(neo4j_session, statement)
|
||||
|
||||
elif index_type == IndexType.SYNC:
|
||||
logger.info("Ensuring ProviderResource indexes exist")
|
||||
for statement in SYNC_INDEX_STATEMENTS:
|
||||
neo4j_session.run(statement)
|
||||
|
||||
|
||||
def create_all_indexes(neo4j_session: neo4j.Session) -> None:
|
||||
"""Create all indexes (both findings and sync)."""
|
||||
create_indexes(neo4j_session, IndexType.FINDINGS)
|
||||
create_indexes(neo4j_session, IndexType.SYNC)
|
||||
@@ -1,23 +0,0 @@
|
||||
AVAILABLE_PROVIDERS: list[str] = [
|
||||
"aws",
|
||||
]
|
||||
|
||||
ROOT_NODE_LABELS: dict[str, str] = {
|
||||
"aws": "AWSAccount",
|
||||
}
|
||||
|
||||
NODE_UID_FIELDS: dict[str, str] = {
|
||||
"aws": "arn",
|
||||
}
|
||||
|
||||
|
||||
def is_provider_available(provider_type: str) -> bool:
|
||||
return provider_type in AVAILABLE_PROVIDERS
|
||||
|
||||
|
||||
def get_root_node_label(provider_type: str) -> str:
|
||||
return ROOT_NODE_LABELS.get(provider_type, "UnknownProviderAccount")
|
||||
|
||||
|
||||
def get_node_uid_field(provider_type: str) -> str:
|
||||
return NODE_UID_FIELDS.get(provider_type, "UnknownProviderUID")
|
||||
@@ -1,290 +0,0 @@
|
||||
from collections import defaultdict
|
||||
from typing import Generator
|
||||
|
||||
import neo4j
|
||||
from cartography.client.core.tx import run_write_query
|
||||
from cartography.config import Config as CartographyConfig
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.env import env
|
||||
from tasks.jobs.attack_paths.providers import get_node_uid_field, get_root_node_label
|
||||
|
||||
from api.db_router import READ_REPLICA_ALIAS
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Finding, Provider, ResourceFindingMapping
|
||||
from prowler.config import config as ProwlerConfig
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
BATCH_SIZE = env.int("ATTACK_PATHS_FINDINGS_BATCH_SIZE", 1000)
|
||||
|
||||
INDEX_STATEMENTS = [
|
||||
"CREATE INDEX prowler_finding_id IF NOT EXISTS FOR (n:ProwlerFinding) ON (n.id);",
|
||||
"CREATE INDEX prowler_finding_provider_uid IF NOT EXISTS FOR (n:ProwlerFinding) ON (n.provider_uid);",
|
||||
"CREATE INDEX prowler_finding_lastupdated IF NOT EXISTS FOR (n:ProwlerFinding) ON (n.lastupdated);",
|
||||
"CREATE INDEX prowler_finding_check_id IF NOT EXISTS FOR (n:ProwlerFinding) ON (n.status);",
|
||||
]
|
||||
|
||||
INSERT_STATEMENT_TEMPLATE = """
|
||||
MATCH (account:__ROOT_NODE_LABEL__ {id: $provider_uid})
|
||||
UNWIND $findings_data AS finding_data
|
||||
|
||||
OPTIONAL MATCH (account)-->(resource_by_uid)
|
||||
WHERE resource_by_uid.__NODE_UID_FIELD__ = finding_data.resource_uid
|
||||
WITH account, finding_data, resource_by_uid
|
||||
|
||||
OPTIONAL MATCH (account)-->(resource_by_id)
|
||||
WHERE resource_by_uid IS NULL
|
||||
AND resource_by_id.id = finding_data.resource_uid
|
||||
WITH account, finding_data, COALESCE(resource_by_uid, resource_by_id) AS resource
|
||||
WHERE resource IS NOT NULL
|
||||
|
||||
MERGE (finding:ProwlerFinding {id: finding_data.id})
|
||||
ON CREATE SET
|
||||
finding.id = finding_data.id,
|
||||
finding.uid = finding_data.uid,
|
||||
finding.inserted_at = finding_data.inserted_at,
|
||||
finding.updated_at = finding_data.updated_at,
|
||||
finding.first_seen_at = finding_data.first_seen_at,
|
||||
finding.scan_id = finding_data.scan_id,
|
||||
finding.delta = finding_data.delta,
|
||||
finding.status = finding_data.status,
|
||||
finding.status_extended = finding_data.status_extended,
|
||||
finding.severity = finding_data.severity,
|
||||
finding.check_id = finding_data.check_id,
|
||||
finding.check_title = finding_data.check_title,
|
||||
finding.muted = finding_data.muted,
|
||||
finding.muted_reason = finding_data.muted_reason,
|
||||
finding.provider_uid = $provider_uid,
|
||||
finding.firstseen = timestamp(),
|
||||
finding.lastupdated = $last_updated,
|
||||
finding._module_name = 'cartography:prowler',
|
||||
finding._module_version = $prowler_version
|
||||
ON MATCH SET
|
||||
finding.status = finding_data.status,
|
||||
finding.status_extended = finding_data.status_extended,
|
||||
finding.lastupdated = $last_updated
|
||||
|
||||
MERGE (resource)-[rel:HAS_FINDING]->(finding)
|
||||
ON CREATE SET
|
||||
rel.provider_uid = $provider_uid,
|
||||
rel.firstseen = timestamp(),
|
||||
rel.lastupdated = $last_updated,
|
||||
rel._module_name = 'cartography:prowler',
|
||||
rel._module_version = $prowler_version
|
||||
ON MATCH SET
|
||||
rel.lastupdated = $last_updated
|
||||
"""
|
||||
|
||||
CLEANUP_STATEMENT = """
|
||||
MATCH (finding:ProwlerFinding {provider_uid: $provider_uid})
|
||||
WHERE finding.lastupdated < $last_updated
|
||||
|
||||
WITH finding LIMIT $batch_size
|
||||
|
||||
DETACH DELETE finding
|
||||
|
||||
RETURN COUNT(finding) AS deleted_findings_count
|
||||
"""
|
||||
|
||||
|
||||
def create_indexes(neo4j_session: neo4j.Session) -> None:
|
||||
"""
|
||||
Code based on Cartography, specifically on `cartography.intel.create_indexes.run`.
|
||||
"""
|
||||
|
||||
logger.info("Creating indexes for Prowler Findings node types")
|
||||
for statement in INDEX_STATEMENTS:
|
||||
run_write_query(neo4j_session, statement)
|
||||
|
||||
|
||||
def analysis(
|
||||
neo4j_session: neo4j.Session,
|
||||
prowler_api_provider: Provider,
|
||||
scan_id: str,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
findings_data = get_provider_last_scan_findings(prowler_api_provider, scan_id)
|
||||
load_findings(neo4j_session, findings_data, prowler_api_provider, config)
|
||||
cleanup_findings(neo4j_session, prowler_api_provider, config)
|
||||
|
||||
|
||||
def get_provider_last_scan_findings(
|
||||
prowler_api_provider: Provider,
|
||||
scan_id: str,
|
||||
) -> Generator[list[dict[str, str]], None, None]:
|
||||
"""
|
||||
Generator that yields batches of finding-resource pairs.
|
||||
|
||||
Two-step query approach per batch:
|
||||
1. Paginate findings for scan (single table, indexed by scan_id)
|
||||
2. Batch-fetch resource UIDs via mapping table (single join)
|
||||
3. Merge and yield flat structure for Neo4j
|
||||
|
||||
Memory efficient: never holds more than BATCH_SIZE findings in memory.
|
||||
"""
|
||||
|
||||
logger.info(
|
||||
f"Starting findings fetch for scan {scan_id} (tenant {prowler_api_provider.tenant_id}) with batch size {BATCH_SIZE}"
|
||||
)
|
||||
|
||||
iteration = 0
|
||||
last_id = None
|
||||
|
||||
while True:
|
||||
iteration += 1
|
||||
|
||||
with rls_transaction(prowler_api_provider.tenant_id, using=READ_REPLICA_ALIAS):
|
||||
# Use all_objects to avoid the ActiveProviderManager's implicit JOIN
|
||||
# through Scan -> Provider (to check is_deleted=False).
|
||||
# The provider is already validated as active in this context.
|
||||
qs = Finding.all_objects.filter(scan_id=scan_id).order_by("id")
|
||||
if last_id is not None:
|
||||
qs = qs.filter(id__gt=last_id)
|
||||
|
||||
findings_batch = list(
|
||||
qs.values(
|
||||
"id",
|
||||
"uid",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"first_seen_at",
|
||||
"scan_id",
|
||||
"delta",
|
||||
"status",
|
||||
"status_extended",
|
||||
"severity",
|
||||
"check_id",
|
||||
"check_metadata__checktitle",
|
||||
"muted",
|
||||
"muted_reason",
|
||||
)[:BATCH_SIZE]
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Iteration #{iteration} fetched {len(findings_batch)} findings"
|
||||
)
|
||||
|
||||
if not findings_batch:
|
||||
logger.info(
|
||||
f"No findings returned for iteration #{iteration}; stopping pagination"
|
||||
)
|
||||
break
|
||||
|
||||
last_id = findings_batch[-1]["id"]
|
||||
enriched_batch = _enrich_and_flatten_batch(findings_batch)
|
||||
|
||||
# Yield outside the transaction
|
||||
if enriched_batch:
|
||||
yield enriched_batch
|
||||
|
||||
logger.info(f"Finished fetching findings for scan {scan_id}")
|
||||
|
||||
|
||||
def _enrich_and_flatten_batch(
|
||||
findings_batch: list[dict],
|
||||
) -> list[dict[str, str]]:
|
||||
"""
|
||||
Fetch resource UIDs for a batch of findings and return flat structure.
|
||||
|
||||
One finding with 3 resources becomes 3 dicts (same output format as before).
|
||||
Must be called within an RLS transaction context.
|
||||
"""
|
||||
finding_ids = [f["id"] for f in findings_batch]
|
||||
|
||||
# Single join: mapping -> resource
|
||||
resource_mappings = ResourceFindingMapping.objects.filter(
|
||||
finding_id__in=finding_ids
|
||||
).values_list("finding_id", "resource__uid")
|
||||
|
||||
# Build finding_id -> [resource_uids] mapping
|
||||
finding_resources = defaultdict(list)
|
||||
for finding_id, resource_uid in resource_mappings:
|
||||
finding_resources[finding_id].append(resource_uid)
|
||||
|
||||
# Flatten: one dict per (finding, resource) pair
|
||||
results = []
|
||||
for f in findings_batch:
|
||||
resource_uids = finding_resources.get(f["id"], [])
|
||||
|
||||
if not resource_uids:
|
||||
continue
|
||||
|
||||
for resource_uid in resource_uids:
|
||||
results.append(
|
||||
{
|
||||
"resource_uid": str(resource_uid),
|
||||
"id": str(f["id"]),
|
||||
"uid": f["uid"],
|
||||
"inserted_at": f["inserted_at"],
|
||||
"updated_at": f["updated_at"],
|
||||
"first_seen_at": f["first_seen_at"],
|
||||
"scan_id": str(f["scan_id"]),
|
||||
"delta": f["delta"],
|
||||
"status": f["status"],
|
||||
"status_extended": f["status_extended"],
|
||||
"severity": f["severity"],
|
||||
"check_id": str(f["check_id"]),
|
||||
"check_title": f["check_metadata__checktitle"],
|
||||
"muted": f["muted"],
|
||||
"muted_reason": f["muted_reason"],
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def load_findings(
|
||||
neo4j_session: neo4j.Session,
|
||||
findings_batches: Generator[list[dict[str, str]], None, None],
|
||||
prowler_api_provider: Provider,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
replacements = {
|
||||
"__ROOT_NODE_LABEL__": get_root_node_label(prowler_api_provider.provider),
|
||||
"__NODE_UID_FIELD__": get_node_uid_field(prowler_api_provider.provider),
|
||||
}
|
||||
query = INSERT_STATEMENT_TEMPLATE
|
||||
for replace_key, replace_value in replacements.items():
|
||||
query = query.replace(replace_key, replace_value)
|
||||
|
||||
parameters = {
|
||||
"provider_uid": str(prowler_api_provider.uid),
|
||||
"last_updated": config.update_tag,
|
||||
"prowler_version": ProwlerConfig.prowler_version,
|
||||
}
|
||||
|
||||
batch_num = 0
|
||||
total_records = 0
|
||||
for batch in findings_batches:
|
||||
batch_num += 1
|
||||
batch_size = len(batch)
|
||||
total_records += batch_size
|
||||
|
||||
parameters["findings_data"] = batch
|
||||
|
||||
logger.info(f"Loading findings batch {batch_num} ({batch_size} records)")
|
||||
neo4j_session.run(query, parameters)
|
||||
|
||||
logger.info(f"Finished loading {total_records} records in {batch_num} batches")
|
||||
|
||||
|
||||
def cleanup_findings(
|
||||
neo4j_session: neo4j.Session,
|
||||
prowler_api_provider: Provider,
|
||||
config: CartographyConfig,
|
||||
) -> None:
|
||||
parameters = {
|
||||
"provider_uid": str(prowler_api_provider.uid),
|
||||
"last_updated": config.update_tag,
|
||||
"batch_size": BATCH_SIZE,
|
||||
}
|
||||
|
||||
batch = 1
|
||||
deleted_count = 1
|
||||
while deleted_count > 0:
|
||||
logger.info(f"Cleaning findings batch {batch}")
|
||||
|
||||
result = neo4j_session.run(CLEANUP_STATEMENT, parameters)
|
||||
|
||||
deleted_count = result.single().get("deleted_findings_count", 0)
|
||||
batch += 1
|
||||
134
api/src/backend/tasks/jobs/attack_paths/queries.py
Normal file
134
api/src/backend/tasks/jobs/attack_paths/queries.py
Normal file
@@ -0,0 +1,134 @@
|
||||
# Cypher query templates for Attack Paths operations
|
||||
from tasks.jobs.attack_paths.config import (
|
||||
PROWLER_FINDING_LABEL,
|
||||
PROVIDER_RESOURCE_LABEL,
|
||||
)
|
||||
|
||||
|
||||
def render_cypher_template(template: str, replacements: dict[str, str]) -> str:
|
||||
"""
|
||||
Render a Cypher query template by replacing placeholders.
|
||||
|
||||
Placeholders use `__DOUBLE_UNDERSCORE__` format to avoid conflicts
|
||||
with Cypher syntax.
|
||||
"""
|
||||
query = template
|
||||
for placeholder, value in replacements.items():
|
||||
query = query.replace(placeholder, value)
|
||||
return query
|
||||
|
||||
|
||||
# Findings queries (used by findings.py)
|
||||
# ---------------------------------------
|
||||
|
||||
ADD_RESOURCE_LABEL_TEMPLATE = """
|
||||
MATCH (account:__ROOT_LABEL__ {id: $provider_uid})-->(r)
|
||||
WHERE NOT r:__ROOT_LABEL__ AND NOT r:__RESOURCE_LABEL__
|
||||
WITH r LIMIT $batch_size
|
||||
SET r:__RESOURCE_LABEL__
|
||||
RETURN COUNT(r) AS labeled_count
|
||||
"""
|
||||
|
||||
INSERT_FINDING_TEMPLATE = f"""
|
||||
MATCH (account:__ROOT_NODE_LABEL__ {{id: $provider_uid}})
|
||||
UNWIND $findings_data AS finding_data
|
||||
|
||||
OPTIONAL MATCH (account)-->(resource_by_uid:__RESOURCE_LABEL__)
|
||||
WHERE resource_by_uid.__NODE_UID_FIELD__ = finding_data.resource_uid
|
||||
WITH account, finding_data, resource_by_uid
|
||||
|
||||
OPTIONAL MATCH (account)-->(resource_by_id:__RESOURCE_LABEL__)
|
||||
WHERE resource_by_uid IS NULL
|
||||
AND resource_by_id.id = finding_data.resource_uid
|
||||
WITH account, finding_data, COALESCE(resource_by_uid, resource_by_id) AS resource
|
||||
WHERE resource IS NOT NULL
|
||||
|
||||
MERGE (finding:{PROWLER_FINDING_LABEL} {{id: finding_data.id}})
|
||||
ON CREATE SET
|
||||
finding.id = finding_data.id,
|
||||
finding.uid = finding_data.uid,
|
||||
finding.inserted_at = finding_data.inserted_at,
|
||||
finding.updated_at = finding_data.updated_at,
|
||||
finding.first_seen_at = finding_data.first_seen_at,
|
||||
finding.scan_id = finding_data.scan_id,
|
||||
finding.delta = finding_data.delta,
|
||||
finding.status = finding_data.status,
|
||||
finding.status_extended = finding_data.status_extended,
|
||||
finding.severity = finding_data.severity,
|
||||
finding.check_id = finding_data.check_id,
|
||||
finding.check_title = finding_data.check_title,
|
||||
finding.muted = finding_data.muted,
|
||||
finding.muted_reason = finding_data.muted_reason,
|
||||
finding.provider_uid = $provider_uid,
|
||||
finding.firstseen = timestamp(),
|
||||
finding.lastupdated = $last_updated,
|
||||
finding._module_name = 'cartography:prowler',
|
||||
finding._module_version = $prowler_version
|
||||
ON MATCH SET
|
||||
finding.status = finding_data.status,
|
||||
finding.status_extended = finding_data.status_extended,
|
||||
finding.lastupdated = $last_updated
|
||||
|
||||
MERGE (resource)-[rel:HAS_FINDING]->(finding)
|
||||
ON CREATE SET
|
||||
rel.provider_uid = $provider_uid,
|
||||
rel.firstseen = timestamp(),
|
||||
rel.lastupdated = $last_updated,
|
||||
rel._module_name = 'cartography:prowler',
|
||||
rel._module_version = $prowler_version
|
||||
ON MATCH SET
|
||||
rel.lastupdated = $last_updated
|
||||
"""
|
||||
|
||||
CLEANUP_FINDINGS_TEMPLATE = f"""
|
||||
MATCH (finding:{PROWLER_FINDING_LABEL} {{provider_uid: $provider_uid}})
|
||||
WHERE finding.lastupdated < $last_updated
|
||||
|
||||
WITH finding LIMIT $batch_size
|
||||
|
||||
DETACH DELETE finding
|
||||
|
||||
RETURN COUNT(finding) AS deleted_findings_count
|
||||
"""
|
||||
|
||||
# Sync queries (used by sync.py)
|
||||
# -------------------------------
|
||||
|
||||
NODE_FETCH_QUERY = """
|
||||
MATCH (n)
|
||||
WHERE id(n) > $last_id
|
||||
RETURN id(n) AS internal_id,
|
||||
elementId(n) AS element_id,
|
||||
labels(n) AS labels,
|
||||
properties(n) AS props
|
||||
ORDER BY internal_id
|
||||
LIMIT $batch_size
|
||||
"""
|
||||
|
||||
RELATIONSHIPS_FETCH_QUERY = """
|
||||
MATCH ()-[r]->()
|
||||
WHERE id(r) > $last_id
|
||||
RETURN id(r) AS internal_id,
|
||||
type(r) AS rel_type,
|
||||
elementId(startNode(r)) AS start_element_id,
|
||||
elementId(endNode(r)) AS end_element_id,
|
||||
properties(r) AS props
|
||||
ORDER BY internal_id
|
||||
LIMIT $batch_size
|
||||
"""
|
||||
|
||||
NODE_SYNC_TEMPLATE = """
|
||||
UNWIND $rows AS row
|
||||
MERGE (n:__NODE_LABELS__ {provider_element_id: row.provider_element_id})
|
||||
SET n += row.props
|
||||
SET n.provider_id = $provider_id
|
||||
"""
|
||||
|
||||
RELATIONSHIP_SYNC_TEMPLATE = f"""
|
||||
UNWIND $rows AS row
|
||||
MATCH (s:{PROVIDER_RESOURCE_LABEL} {{provider_element_id: row.start_element_id}})
|
||||
MATCH (t:{PROVIDER_RESOURCE_LABEL} {{provider_element_id: row.end_element_id}})
|
||||
MERGE (s)-[r:__REL_TYPE__ {{provider_element_id: row.provider_element_id}}]->(t)
|
||||
SET r += row.props
|
||||
SET r.provider_id = $provider_id
|
||||
"""
|
||||
@@ -1,8 +1,7 @@
|
||||
import logging
|
||||
import time
|
||||
import asyncio
|
||||
|
||||
from typing import Any, Callable
|
||||
from typing import Any
|
||||
|
||||
from cartography.config import Config as CartographyConfig
|
||||
from cartography.intel import analysis as cartography_analysis
|
||||
@@ -17,7 +16,8 @@ from api.models import (
|
||||
StateChoices,
|
||||
)
|
||||
from api.utils import initialize_prowler_provider
|
||||
from tasks.jobs.attack_paths import aws, db_utils, prowler, utils
|
||||
from tasks.jobs.attack_paths import db_utils, findings, sync, utils
|
||||
from tasks.jobs.attack_paths.config import get_cartography_ingestion_function
|
||||
|
||||
# Without this Celery goes crazy with Cartography logging
|
||||
logging.getLogger("cartography").setLevel(logging.ERROR)
|
||||
@@ -25,14 +25,6 @@ logging.getLogger("neo4j").propagate = False
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
CARTOGRAPHY_INGESTION_FUNCTIONS: dict[str, Callable] = {
|
||||
"aws": aws.start_aws_ingestion,
|
||||
}
|
||||
|
||||
|
||||
def get_cartography_ingestion_function(provider_type: str) -> Callable | None:
|
||||
return CARTOGRAPHY_INGESTION_FUNCTIONS.get(provider_type)
|
||||
|
||||
|
||||
def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
"""
|
||||
@@ -76,22 +68,36 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
tenant_id, scan_id, prowler_api_provider.id
|
||||
)
|
||||
|
||||
tmp_database_name = graph_database.get_database_name(
|
||||
attack_paths_scan.id, temporary=True
|
||||
)
|
||||
tenant_database_name = graph_database.get_database_name(
|
||||
prowler_api_provider.tenant_id
|
||||
)
|
||||
|
||||
# While creating the Cartography configuration, attributes `neo4j_user` and `neo4j_password` are not really needed in this config object
|
||||
cartography_config = CartographyConfig(
|
||||
tmp_cartography_config = CartographyConfig(
|
||||
neo4j_uri=graph_database.get_uri(),
|
||||
neo4j_database=graph_database.get_database_name(attack_paths_scan.id),
|
||||
neo4j_database=tmp_database_name,
|
||||
update_tag=int(time.time()),
|
||||
)
|
||||
tenant_cartography_config = CartographyConfig(
|
||||
neo4j_uri=tmp_cartography_config.neo4j_uri,
|
||||
neo4j_database=tenant_database_name,
|
||||
update_tag=tmp_cartography_config.update_tag,
|
||||
)
|
||||
|
||||
# Starting the Attack Paths scan
|
||||
db_utils.starting_attack_paths_scan(attack_paths_scan, task_id, cartography_config)
|
||||
db_utils.starting_attack_paths_scan(
|
||||
attack_paths_scan, task_id, tenant_cartography_config
|
||||
)
|
||||
|
||||
try:
|
||||
logger.info(
|
||||
f"Creating Neo4j database {cartography_config.neo4j_database} for tenant {prowler_api_provider.tenant_id}"
|
||||
f"Creating Neo4j database {tmp_cartography_config.neo4j_database} for tenant {prowler_api_provider.tenant_id}"
|
||||
)
|
||||
|
||||
graph_database.create_database(cartography_config.neo4j_database)
|
||||
graph_database.create_database(tmp_cartography_config.neo4j_database)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 1)
|
||||
|
||||
logger.info(
|
||||
@@ -99,18 +105,18 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
f"{prowler_api_provider.provider.upper()} provider {prowler_api_provider.id}"
|
||||
)
|
||||
with graph_database.get_session(
|
||||
cartography_config.neo4j_database
|
||||
) as neo4j_session:
|
||||
tmp_cartography_config.neo4j_database
|
||||
) as tmp_neo4j_session:
|
||||
# Indexes creation
|
||||
cartography_create_indexes.run(neo4j_session, cartography_config)
|
||||
prowler.create_indexes(neo4j_session)
|
||||
cartography_create_indexes.run(tmp_neo4j_session, tmp_cartography_config)
|
||||
findings.create_findings_indexes(tmp_neo4j_session)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 2)
|
||||
|
||||
# The real scan, where iterates over cloud services
|
||||
ingestion_exceptions = _call_within_event_loop(
|
||||
ingestion_exceptions = utils.call_within_event_loop(
|
||||
cartography_ingestion_function,
|
||||
neo4j_session,
|
||||
cartography_config,
|
||||
tmp_neo4j_session,
|
||||
tmp_cartography_config,
|
||||
prowler_api_provider,
|
||||
prowler_sdk_provider,
|
||||
attack_paths_scan,
|
||||
@@ -120,43 +126,92 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
logger.info(
|
||||
f"Syncing Cartography ontology for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
cartography_ontology.run(neo4j_session, cartography_config)
|
||||
cartography_ontology.run(tmp_neo4j_session, tmp_cartography_config)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 95)
|
||||
|
||||
logger.info(
|
||||
f"Syncing Cartography analysis for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
cartography_analysis.run(neo4j_session, cartography_config)
|
||||
cartography_analysis.run(tmp_neo4j_session, tmp_cartography_config)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 96)
|
||||
|
||||
# Adding Prowler nodes and relationships
|
||||
logger.info(
|
||||
f"Syncing Prowler analysis for AWS account {prowler_api_provider.uid}"
|
||||
)
|
||||
prowler.analysis(
|
||||
neo4j_session, prowler_api_provider, scan_id, cartography_config
|
||||
findings.analysis(
|
||||
tmp_neo4j_session, prowler_api_provider, scan_id, tmp_cartography_config
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 97)
|
||||
|
||||
logger.info(
|
||||
f"Clearing Neo4j cache for database {cartography_config.neo4j_database}"
|
||||
f"Clearing Neo4j cache for database {tmp_cartography_config.neo4j_database}"
|
||||
)
|
||||
graph_database.clear_cache(cartography_config.neo4j_database)
|
||||
graph_database.clear_cache(tmp_cartography_config.neo4j_database)
|
||||
|
||||
logger.info(
|
||||
f"Ensuring tenant database {tenant_database_name}, and its indexes, exists for tenant {prowler_api_provider.tenant_id}"
|
||||
)
|
||||
graph_database.create_database(tenant_database_name)
|
||||
with graph_database.get_session(tenant_database_name) as tenant_neo4j_session:
|
||||
cartography_create_indexes.run(
|
||||
tenant_neo4j_session, tenant_cartography_config
|
||||
)
|
||||
findings.create_findings_indexes(tenant_neo4j_session)
|
||||
sync.create_sync_indexes(tenant_neo4j_session)
|
||||
|
||||
logger.info(f"Deleting existing provider graph in {tenant_database_name}")
|
||||
graph_database.drop_subgraph(
|
||||
database=tenant_database_name,
|
||||
provider_id=str(prowler_api_provider.id),
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 98)
|
||||
|
||||
logger.info(
|
||||
f"Syncing graph from {tmp_database_name} into {tenant_database_name}"
|
||||
)
|
||||
sync.sync_graph(
|
||||
source_database=tmp_database_name,
|
||||
target_database=tenant_database_name,
|
||||
provider_id=str(prowler_api_provider.id),
|
||||
)
|
||||
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 99)
|
||||
|
||||
logger.info(f"Clearing Neo4j cache for database {tenant_database_name}")
|
||||
graph_database.clear_cache(tenant_database_name)
|
||||
|
||||
logger.info(
|
||||
f"Completed Cartography ({attack_paths_scan.id}) for "
|
||||
f"{prowler_api_provider.provider.upper()} provider {prowler_api_provider.id}"
|
||||
)
|
||||
|
||||
# Handling databases changes
|
||||
# TODO
|
||||
# This piece of code delete old Neo4j databases for this tenant's provider
|
||||
# When we clean all of these databases we need to:
|
||||
# - Delete this block
|
||||
# - Delete function from `db_utils` the functions get_old_attack_paths_scans` & `update_old_attack_paths_scan`
|
||||
# - Remove `graph_database` & `is_graph_database_deleted` from the AttackPathsScan model:
|
||||
# - Check indexes
|
||||
# - Create migration
|
||||
# - The use of `attack_paths_scan.graph_database` on `views` and `views_helpers`
|
||||
# - Tests
|
||||
old_attack_paths_scans = db_utils.get_old_attack_paths_scans(
|
||||
prowler_api_provider.tenant_id,
|
||||
prowler_api_provider.id,
|
||||
attack_paths_scan.id,
|
||||
)
|
||||
for old_attack_paths_scan in old_attack_paths_scans:
|
||||
graph_database.drop_database(old_attack_paths_scan.graph_database)
|
||||
old_graph_database = old_attack_paths_scan.graph_database
|
||||
if old_graph_database and old_graph_database != tenant_database_name:
|
||||
logger.info(
|
||||
f"Dropping old Neo4j database {old_graph_database} for provider {prowler_api_provider.id}"
|
||||
)
|
||||
graph_database.drop_database(old_graph_database)
|
||||
db_utils.update_old_attack_paths_scan(old_attack_paths_scan)
|
||||
|
||||
logger.info(f"Dropping temporary Neo4j database {tmp_database_name}")
|
||||
graph_database.drop_database(tmp_database_name)
|
||||
|
||||
db_utils.finish_attack_paths_scan(
|
||||
attack_paths_scan, StateChoices.COMPLETED, ingestion_exceptions
|
||||
)
|
||||
@@ -168,30 +223,8 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
|
||||
ingestion_exceptions["global_cartography_error"] = exception_message
|
||||
|
||||
# Handling databases changes
|
||||
graph_database.drop_database(cartography_config.neo4j_database)
|
||||
graph_database.drop_database(tmp_cartography_config.neo4j_database)
|
||||
db_utils.finish_attack_paths_scan(
|
||||
attack_paths_scan, StateChoices.FAILED, ingestion_exceptions
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def _call_within_event_loop(fn, *args, **kwargs):
|
||||
"""
|
||||
Cartography needs a running event loop, so assuming there is none (Celery task or even regular DRF endpoint),
|
||||
let's create a new one and set it as the current event loop for this thread.
|
||||
"""
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
try:
|
||||
asyncio.set_event_loop(loop)
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
finally:
|
||||
try:
|
||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to shutdown async generators cleanly: {e}")
|
||||
|
||||
loop.close()
|
||||
asyncio.set_event_loop(None)
|
||||
|
||||
202
api/src/backend/tasks/jobs/attack_paths/sync.py
Normal file
202
api/src/backend/tasks/jobs/attack_paths/sync.py
Normal file
@@ -0,0 +1,202 @@
|
||||
"""
|
||||
Graph sync operations for Attack Paths.
|
||||
|
||||
This module handles syncing graph data from temporary scan databases
|
||||
to the tenant database, adding provider isolation labels and properties.
|
||||
"""
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import Any
|
||||
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from api.attack_paths import database as graph_database
|
||||
from tasks.jobs.attack_paths.config import BATCH_SIZE, PROVIDER_RESOURCE_LABEL
|
||||
from tasks.jobs.attack_paths.indexes import IndexType, create_indexes
|
||||
from tasks.jobs.attack_paths.queries import (
|
||||
NODE_FETCH_QUERY,
|
||||
NODE_SYNC_TEMPLATE,
|
||||
RELATIONSHIP_SYNC_TEMPLATE,
|
||||
RELATIONSHIPS_FETCH_QUERY,
|
||||
render_cypher_template,
|
||||
)
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
def create_sync_indexes(neo4j_session) -> None:
|
||||
"""Create indexes for provider resource sync operations."""
|
||||
create_indexes(neo4j_session, IndexType.SYNC)
|
||||
|
||||
|
||||
def sync_graph(
|
||||
source_database: str,
|
||||
target_database: str,
|
||||
provider_id: str,
|
||||
) -> dict[str, int]:
|
||||
"""
|
||||
Sync all nodes and relationships from source to target database.
|
||||
|
||||
Args:
|
||||
`source_database`: The temporary scan database
|
||||
`target_database`: The tenant database
|
||||
`provider_id`: The provider ID for isolation
|
||||
|
||||
Returns:
|
||||
Dict with counts of synced nodes and relationships
|
||||
"""
|
||||
nodes_synced = sync_nodes(
|
||||
source_database,
|
||||
target_database,
|
||||
provider_id,
|
||||
)
|
||||
relationships_synced = sync_relationships(
|
||||
source_database,
|
||||
target_database,
|
||||
provider_id,
|
||||
)
|
||||
|
||||
return {
|
||||
"nodes": nodes_synced,
|
||||
"relationships": relationships_synced,
|
||||
}
|
||||
|
||||
|
||||
def sync_nodes(
|
||||
source_database: str,
|
||||
target_database: str,
|
||||
provider_id: str,
|
||||
) -> int:
|
||||
"""
|
||||
Sync nodes from source to target database.
|
||||
|
||||
Adds `ProviderResource` label and `provider_id` property to all nodes.
|
||||
"""
|
||||
last_id = -1
|
||||
total_synced = 0
|
||||
|
||||
with (
|
||||
graph_database.get_session(source_database) as source_session,
|
||||
graph_database.get_session(target_database) as target_session,
|
||||
):
|
||||
while True:
|
||||
rows = list(
|
||||
source_session.run(
|
||||
NODE_FETCH_QUERY,
|
||||
{"last_id": last_id, "batch_size": BATCH_SIZE},
|
||||
)
|
||||
)
|
||||
|
||||
if not rows:
|
||||
break
|
||||
|
||||
last_id = rows[-1]["internal_id"]
|
||||
|
||||
grouped: dict[tuple[str, ...], list[dict[str, Any]]] = defaultdict(list)
|
||||
for row in rows:
|
||||
labels = tuple(sorted(set(row["labels"] or [])))
|
||||
props = dict(row["props"] or {})
|
||||
_strip_internal_properties(props)
|
||||
provider_element_id = f"{provider_id}:{row['element_id']}"
|
||||
grouped[labels].append(
|
||||
{
|
||||
"provider_element_id": provider_element_id,
|
||||
"props": props,
|
||||
}
|
||||
)
|
||||
|
||||
for labels, batch in grouped.items():
|
||||
label_set = set(labels)
|
||||
label_set.add(PROVIDER_RESOURCE_LABEL)
|
||||
node_labels = ":".join(f"`{label}`" for label in sorted(label_set))
|
||||
|
||||
query = render_cypher_template(
|
||||
NODE_SYNC_TEMPLATE, {"__NODE_LABELS__": node_labels}
|
||||
)
|
||||
target_session.run(
|
||||
query,
|
||||
{
|
||||
"rows": batch,
|
||||
"provider_id": provider_id,
|
||||
},
|
||||
)
|
||||
|
||||
total_synced += len(rows)
|
||||
logger.info(
|
||||
f"Synced {total_synced} nodes from {source_database} to {target_database}"
|
||||
)
|
||||
|
||||
return total_synced
|
||||
|
||||
|
||||
def sync_relationships(
|
||||
source_database: str,
|
||||
target_database: str,
|
||||
provider_id: str,
|
||||
) -> int:
|
||||
"""
|
||||
Sync relationships from source to target database.
|
||||
|
||||
Adds `provider_id` property to all relationships.
|
||||
"""
|
||||
last_id = -1
|
||||
total_synced = 0
|
||||
|
||||
with (
|
||||
graph_database.get_session(source_database) as source_session,
|
||||
graph_database.get_session(target_database) as target_session,
|
||||
):
|
||||
while True:
|
||||
rows = list(
|
||||
source_session.run(
|
||||
RELATIONSHIPS_FETCH_QUERY,
|
||||
{"last_id": last_id, "batch_size": BATCH_SIZE},
|
||||
)
|
||||
)
|
||||
|
||||
if not rows:
|
||||
break
|
||||
|
||||
last_id = rows[-1]["internal_id"]
|
||||
|
||||
grouped: dict[str, list[dict[str, Any]]] = defaultdict(list)
|
||||
for row in rows:
|
||||
props = dict(row["props"] or {})
|
||||
_strip_internal_properties(props)
|
||||
rel_type = row["rel_type"]
|
||||
grouped[rel_type].append(
|
||||
{
|
||||
"start_element_id": f"{provider_id}:{row['start_element_id']}",
|
||||
"end_element_id": f"{provider_id}:{row['end_element_id']}",
|
||||
"provider_element_id": f"{provider_id}:{rel_type}:{row['internal_id']}",
|
||||
"props": props,
|
||||
}
|
||||
)
|
||||
|
||||
for rel_type, batch in grouped.items():
|
||||
query = render_cypher_template(
|
||||
RELATIONSHIP_SYNC_TEMPLATE, {"__REL_TYPE__": rel_type}
|
||||
)
|
||||
target_session.run(
|
||||
query,
|
||||
{
|
||||
"rows": batch,
|
||||
"provider_id": provider_id,
|
||||
},
|
||||
)
|
||||
|
||||
total_synced += len(rows)
|
||||
logger.info(
|
||||
f"Synced {total_synced} relationships from {source_database} to {target_database}"
|
||||
)
|
||||
|
||||
return total_synced
|
||||
|
||||
|
||||
def _strip_internal_properties(props: dict[str, Any]) -> None:
|
||||
"""Remove internal properties that shouldn't be copied during sync."""
|
||||
for key in [
|
||||
"provider_element_id",
|
||||
"provider_id",
|
||||
]:
|
||||
props.pop(key, None)
|
||||
@@ -1,10 +1,40 @@
|
||||
import asyncio
|
||||
import traceback
|
||||
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
def stringify_exception(exception: Exception, context: str) -> str:
|
||||
"""Format an exception with timestamp and traceback for logging."""
|
||||
timestamp = datetime.now(tz=timezone.utc)
|
||||
exception_traceback = traceback.TracebackException.from_exception(exception)
|
||||
traceback_string = "".join(exception_traceback.format())
|
||||
return f"{timestamp} - {context}\n{traceback_string}"
|
||||
|
||||
|
||||
def call_within_event_loop(fn, *args, **kwargs):
|
||||
"""
|
||||
Execute a function within a new event loop.
|
||||
|
||||
Cartography needs a running event loop, so assuming there is none
|
||||
(Celery task or even regular DRF endpoint), this creates a new one
|
||||
and sets it as the current event loop for this thread.
|
||||
"""
|
||||
loop = asyncio.new_event_loop()
|
||||
try:
|
||||
asyncio.set_event_loop(loop)
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
finally:
|
||||
try:
|
||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to shutdown async generators cleanly: {e}")
|
||||
|
||||
loop.close()
|
||||
asyncio.set_event_loop(None)
|
||||
|
||||
@@ -13,7 +13,6 @@ from api.models import (
|
||||
ScanSummary,
|
||||
Tenant,
|
||||
)
|
||||
from tasks.jobs.attack_paths.db_utils import get_provider_graph_database_names
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
@@ -33,13 +32,13 @@ def delete_provider(tenant_id: str, pk: str):
|
||||
Raises:
|
||||
Provider.DoesNotExist: If no instance with the provided primary key exists.
|
||||
"""
|
||||
# Delete the Attack Paths' graph databases related to the provider
|
||||
graph_database_names = get_provider_graph_database_names(tenant_id, pk)
|
||||
# Delete the Attack Paths' graph data related to the provider
|
||||
tenant_database_name = graph_database.get_database_name(tenant_id)
|
||||
try:
|
||||
for graph_database_name in graph_database_names:
|
||||
graph_database.drop_database(graph_database_name)
|
||||
graph_database.drop_subgraph(tenant_database_name, str(pk))
|
||||
|
||||
except graph_database.GraphDatabaseQueryException as gdb_error:
|
||||
logger.error(f"Error deleting Provider databases: {gdb_error}")
|
||||
logger.error(f"Error deleting Provider graph data: {gdb_error}")
|
||||
raise
|
||||
|
||||
# Get all provider related data and delete them in batches
|
||||
@@ -90,6 +89,13 @@ def delete_tenant(pk: str):
|
||||
summary = delete_provider(pk, provider.id)
|
||||
deletion_summary.update(summary)
|
||||
|
||||
try:
|
||||
tenant_database_name = graph_database.get_database_name(pk)
|
||||
graph_database.drop_database(tenant_database_name)
|
||||
except graph_database.GraphDatabaseQueryException as gdb_error:
|
||||
logger.error(f"Error dropping Tenant graph database: {gdb_error}")
|
||||
raise
|
||||
|
||||
Tenant.objects.using(MainRouter.admin_db).filter(id=pk).delete()
|
||||
|
||||
return deletion_summary
|
||||
|
||||
@@ -3,7 +3,7 @@ from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock, call, patch
|
||||
|
||||
import pytest
|
||||
from tasks.jobs.attack_paths import prowler as prowler_module
|
||||
from tasks.jobs.attack_paths import findings as findings_module
|
||||
from tasks.jobs.attack_paths.scan import run as attack_paths_run
|
||||
|
||||
from api.models import (
|
||||
@@ -21,7 +21,65 @@ from prowler.lib.check.models import Severity
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestAttackPathsRun:
|
||||
def test_run_success_flow(self, tenants_fixture, providers_fixture, scans_fixture):
|
||||
# Patching with decorators as we got a `SyntaxError: too many statically nested blocks` error if we use context managers
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.scan.utils.call_within_event_loop",
|
||||
side_effect=lambda fn, *a, **kw: fn(*a, **kw),
|
||||
)
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.get_old_attack_paths_scans",
|
||||
return_value=[],
|
||||
)
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.finish_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
|
||||
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
|
||||
@patch("tasks.jobs.attack_paths.scan.sync.create_sync_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
|
||||
@patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run")
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.clear_cache")
|
||||
@patch("tasks.jobs.attack_paths.scan.graph_database.create_database")
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.get_uri",
|
||||
return_value="bolt://neo4j",
|
||||
)
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.scan.initialize_prowler_provider",
|
||||
return_value=MagicMock(_enabled_regions=["us-east-1"]),
|
||||
)
|
||||
@patch(
|
||||
"tasks.jobs.attack_paths.scan.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
)
|
||||
def test_run_success_flow(
|
||||
self,
|
||||
mock_init_provider,
|
||||
mock_get_uri,
|
||||
mock_create_db,
|
||||
mock_clear_cache,
|
||||
mock_cartography_indexes,
|
||||
mock_cartography_analysis,
|
||||
mock_cartography_ontology,
|
||||
mock_findings_indexes,
|
||||
mock_findings_analysis,
|
||||
mock_sync_indexes,
|
||||
mock_drop_subgraph,
|
||||
mock_sync,
|
||||
mock_starting,
|
||||
mock_update_progress,
|
||||
mock_finish,
|
||||
mock_get_old_scans,
|
||||
mock_event_loop,
|
||||
mock_drop_db,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
scans_fixture,
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
@@ -45,66 +103,22 @@ class TestAttackPathsRun:
|
||||
ingestion_fn = MagicMock(return_value=ingestion_result)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.initialize_prowler_provider",
|
||||
return_value=MagicMock(_enabled_regions=["us-east-1"]),
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.get_uri",
|
||||
return_value="bolt://neo4j",
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.get_database_name",
|
||||
return_value="db-scan-id",
|
||||
side_effect=["db-scan-id", "tenant-db"],
|
||||
) as mock_get_db_name,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.create_database"
|
||||
) as mock_create_db,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.graph_database.get_session",
|
||||
return_value=session_ctx,
|
||||
) as mock_get_session,
|
||||
patch("tasks.jobs.attack_paths.scan.graph_database.clear_cache"),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.cartography_create_indexes.run"
|
||||
) as mock_cartography_indexes,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.cartography_analysis.run"
|
||||
) as mock_cartography_analysis,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.cartography_ontology.run"
|
||||
) as mock_cartography_ontology,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.prowler.create_indexes"
|
||||
) as mock_prowler_indexes,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.prowler.analysis"
|
||||
) as mock_prowler_analysis,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.retrieve_attack_paths_scan",
|
||||
return_value=attack_paths_scan,
|
||||
) as mock_retrieve_scan,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan"
|
||||
) as mock_starting,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress"
|
||||
) as mock_update_progress,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.finish_attack_paths_scan"
|
||||
) as mock_finish,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.get_cartography_ingestion_function",
|
||||
return_value=ingestion_fn,
|
||||
) as mock_get_ingestion,
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan._call_within_event_loop",
|
||||
side_effect=lambda fn, *a, **kw: fn(*a, **kw),
|
||||
) as mock_event_loop,
|
||||
):
|
||||
result = attack_paths_run(str(tenant.id), str(scan.id), "task-123")
|
||||
|
||||
@@ -112,29 +126,40 @@ class TestAttackPathsRun:
|
||||
mock_retrieve_scan.assert_called_once_with(str(tenant.id), str(scan.id))
|
||||
mock_starting.assert_called_once()
|
||||
config = mock_starting.call_args[0][2]
|
||||
assert config.neo4j_database == "db-scan-id"
|
||||
assert config.neo4j_database == "tenant-db"
|
||||
mock_get_db_name.assert_has_calls(
|
||||
[call(attack_paths_scan.id, temporary=True), call(provider.tenant_id)]
|
||||
)
|
||||
|
||||
mock_create_db.assert_called_once_with("db-scan-id")
|
||||
mock_get_session.assert_called_once_with("db-scan-id")
|
||||
mock_cartography_indexes.assert_called_once_with(mock_session, config)
|
||||
mock_prowler_indexes.assert_called_once_with(mock_session)
|
||||
mock_cartography_analysis.assert_called_once_with(mock_session, config)
|
||||
mock_cartography_ontology.assert_called_once_with(mock_session, config)
|
||||
mock_prowler_analysis.assert_called_once_with(
|
||||
mock_session,
|
||||
provider,
|
||||
str(scan.id),
|
||||
config,
|
||||
mock_create_db.assert_has_calls([call("db-scan-id"), call("tenant-db")])
|
||||
mock_get_session.assert_has_calls([call("db-scan-id"), call("tenant-db")])
|
||||
assert mock_cartography_indexes.call_count == 2
|
||||
mock_findings_indexes.assert_has_calls([call(mock_session), call(mock_session)])
|
||||
mock_sync_indexes.assert_called_once_with(mock_session)
|
||||
# These use tmp_cartography_config (neo4j_database="db-scan-id")
|
||||
mock_cartography_analysis.assert_called_once()
|
||||
mock_cartography_ontology.assert_called_once()
|
||||
mock_findings_analysis.assert_called_once()
|
||||
mock_drop_subgraph.assert_called_once_with(
|
||||
database="tenant-db",
|
||||
provider_id=str(provider.id),
|
||||
)
|
||||
mock_sync.assert_called_once_with(
|
||||
source_database="db-scan-id",
|
||||
target_database="tenant-db",
|
||||
provider_id=str(provider.id),
|
||||
)
|
||||
mock_get_ingestion.assert_called_once_with(provider.provider)
|
||||
mock_event_loop.assert_called_once()
|
||||
mock_update_progress.assert_any_call(attack_paths_scan, 1)
|
||||
mock_update_progress.assert_any_call(attack_paths_scan, 2)
|
||||
mock_update_progress.assert_any_call(attack_paths_scan, 95)
|
||||
mock_update_progress.assert_any_call(attack_paths_scan, 97)
|
||||
mock_update_progress.assert_any_call(attack_paths_scan, 98)
|
||||
mock_update_progress.assert_any_call(attack_paths_scan, 99)
|
||||
mock_finish.assert_called_once_with(
|
||||
attack_paths_scan, StateChoices.COMPLETED, ingestion_result
|
||||
)
|
||||
mock_get_db_name.assert_called_once_with(attack_paths_scan.id)
|
||||
|
||||
def test_run_failure_marks_scan_failed(
|
||||
self, tenants_fixture, providers_fixture, scans_fixture
|
||||
@@ -181,8 +206,8 @@ class TestAttackPathsRun:
|
||||
),
|
||||
patch("tasks.jobs.attack_paths.scan.cartography_create_indexes.run"),
|
||||
patch("tasks.jobs.attack_paths.scan.cartography_analysis.run"),
|
||||
patch("tasks.jobs.attack_paths.scan.prowler.create_indexes"),
|
||||
patch("tasks.jobs.attack_paths.scan.prowler.analysis"),
|
||||
patch("tasks.jobs.attack_paths.scan.findings.create_findings_indexes"),
|
||||
patch("tasks.jobs.attack_paths.scan.findings.analysis"),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.retrieve_attack_paths_scan",
|
||||
return_value=attack_paths_scan,
|
||||
@@ -194,12 +219,13 @@ class TestAttackPathsRun:
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.db_utils.finish_attack_paths_scan"
|
||||
) as mock_finish,
|
||||
patch("tasks.jobs.attack_paths.scan.graph_database.drop_database"),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan.get_cartography_ingestion_function",
|
||||
return_value=ingestion_fn,
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.scan._call_within_event_loop",
|
||||
"tasks.jobs.attack_paths.scan.utils.call_within_event_loop",
|
||||
side_effect=lambda fn, *a, **kw: fn(*a, **kw),
|
||||
),
|
||||
patch(
|
||||
@@ -261,15 +287,17 @@ class TestAttackPathsRun:
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestAttackPathsProwlerHelpers:
|
||||
def test_create_indexes_executes_all_statements(self):
|
||||
class TestAttackPathsFindingsHelpers:
|
||||
def test_create_findings_indexes_executes_all_statements(self):
|
||||
mock_session = MagicMock()
|
||||
with patch("tasks.jobs.attack_paths.prowler.run_write_query") as mock_run_write:
|
||||
prowler_module.create_indexes(mock_session)
|
||||
with patch("tasks.jobs.attack_paths.indexes.run_write_query") as mock_run_write:
|
||||
findings_module.create_findings_indexes(mock_session)
|
||||
|
||||
assert mock_run_write.call_count == len(prowler_module.INDEX_STATEMENTS)
|
||||
from tasks.jobs.attack_paths.indexes import FINDINGS_INDEX_STATEMENTS
|
||||
|
||||
assert mock_run_write.call_count == len(FINDINGS_INDEX_STATEMENTS)
|
||||
mock_run_write.assert_has_calls(
|
||||
[call(mock_session, stmt) for stmt in prowler_module.INDEX_STATEMENTS]
|
||||
[call(mock_session, stmt) for stmt in FINDINGS_INDEX_STATEMENTS]
|
||||
)
|
||||
|
||||
def test_load_findings_batches_requests(self, providers_fixture):
|
||||
@@ -277,25 +305,35 @@ class TestAttackPathsProwlerHelpers:
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
provider.save()
|
||||
|
||||
# Create a generator that yields two batches
|
||||
# Create mock Finding objects with to_dict() method
|
||||
mock_finding_1 = MagicMock()
|
||||
mock_finding_1.to_dict.return_value = {"id": "1", "resource_uid": "r-1"}
|
||||
mock_finding_2 = MagicMock()
|
||||
mock_finding_2.to_dict.return_value = {"id": "2", "resource_uid": "r-2"}
|
||||
|
||||
# Create a generator that yields two batches of Finding instances
|
||||
def findings_generator():
|
||||
yield [{"id": "1", "resource_uid": "r-1"}]
|
||||
yield [{"id": "2", "resource_uid": "r-2"}]
|
||||
yield [mock_finding_1]
|
||||
yield [mock_finding_2]
|
||||
|
||||
config = SimpleNamespace(update_tag=12345)
|
||||
mock_session = MagicMock()
|
||||
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.prowler.get_root_node_label",
|
||||
"tasks.jobs.attack_paths.findings.get_root_node_label",
|
||||
return_value="AWSAccount",
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.prowler.get_node_uid_field",
|
||||
"tasks.jobs.attack_paths.findings.get_node_uid_field",
|
||||
return_value="arn",
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.findings.get_provider_resource_label",
|
||||
return_value="AWSResource",
|
||||
),
|
||||
):
|
||||
prowler_module.load_findings(
|
||||
findings_module.load_findings(
|
||||
mock_session, findings_generator(), provider, config
|
||||
)
|
||||
|
||||
@@ -317,14 +355,14 @@ class TestAttackPathsProwlerHelpers:
|
||||
second_batch.single.return_value = {"deleted_findings_count": 0}
|
||||
mock_session.run.side_effect = [first_batch, second_batch]
|
||||
|
||||
prowler_module.cleanup_findings(mock_session, provider, config)
|
||||
findings_module.cleanup_findings(mock_session, provider, config)
|
||||
|
||||
assert mock_session.run.call_count == 2
|
||||
params = mock_session.run.call_args.args[1]
|
||||
assert params["provider_uid"] == str(provider.uid)
|
||||
assert params["last_updated"] == config.update_tag
|
||||
|
||||
def test_get_provider_last_scan_findings_returns_latest_scan_data(
|
||||
def test_stream_findings_with_resources_returns_latest_scan_data(
|
||||
self,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
@@ -402,15 +440,18 @@ class TestAttackPathsProwlerHelpers:
|
||||
|
||||
latest_scan.refresh_from_db()
|
||||
|
||||
with patch(
|
||||
"tasks.jobs.attack_paths.prowler.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
), patch(
|
||||
"tasks.jobs.attack_paths.prowler.READ_REPLICA_ALIAS",
|
||||
"default",
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.findings.rls_transaction",
|
||||
new=lambda *args, **kwargs: nullcontext(),
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.findings.READ_REPLICA_ALIAS",
|
||||
"default",
|
||||
),
|
||||
):
|
||||
# Generator yields batches, collect all findings from all batches
|
||||
findings_batches = prowler_module.get_provider_last_scan_findings(
|
||||
findings_batches = findings_module.stream_findings_with_resources(
|
||||
provider,
|
||||
str(latest_scan.id),
|
||||
)
|
||||
@@ -419,18 +460,18 @@ class TestAttackPathsProwlerHelpers:
|
||||
findings_data.extend(batch)
|
||||
|
||||
assert len(findings_data) == 1
|
||||
finding_dict = findings_data[0]
|
||||
assert finding_dict["id"] == str(finding.id)
|
||||
assert finding_dict["resource_uid"] == resource.uid
|
||||
assert finding_dict["check_title"] == "Check title"
|
||||
assert finding_dict["scan_id"] == str(latest_scan.id)
|
||||
finding_result = findings_data[0]
|
||||
assert finding_result.id == str(finding.id)
|
||||
assert finding_result.resource_uid == resource.uid
|
||||
assert finding_result.check_title == "Check title"
|
||||
assert finding_result.scan_id == str(latest_scan.id)
|
||||
|
||||
def test_enrich_and_flatten_batch_single_resource(
|
||||
def test_enrich_batch_with_resources_single_resource(
|
||||
self,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
"""One finding + one resource = one output dict"""
|
||||
"""One finding + one resource = one output Finding instance"""
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
@@ -493,25 +534,27 @@ class TestAttackPathsProwlerHelpers:
|
||||
"muted_reason": finding.muted_reason,
|
||||
}
|
||||
|
||||
# _enrich_and_flatten_batch queries ResourceFindingMapping directly
|
||||
# _enrich_batch_with_resources queries ResourceFindingMapping directly
|
||||
# No RLS mock needed - test DB doesn't enforce RLS policies
|
||||
with patch(
|
||||
"tasks.jobs.attack_paths.prowler.READ_REPLICA_ALIAS",
|
||||
"tasks.jobs.attack_paths.findings.READ_REPLICA_ALIAS",
|
||||
"default",
|
||||
):
|
||||
result = prowler_module._enrich_and_flatten_batch([finding_dict])
|
||||
result = findings_module._enrich_batch_with_resources(
|
||||
[finding_dict], str(tenant.id)
|
||||
)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0]["resource_uid"] == resource.uid
|
||||
assert result[0]["id"] == str(finding.id)
|
||||
assert result[0]["status"] == "FAIL"
|
||||
assert result[0].resource_uid == resource.uid
|
||||
assert result[0].id == str(finding.id)
|
||||
assert result[0].status == "FAIL"
|
||||
|
||||
def test_enrich_and_flatten_batch_multiple_resources(
|
||||
def test_enrich_batch_with_resources_multiple_resources(
|
||||
self,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
):
|
||||
"""One finding + three resources = three output dicts"""
|
||||
"""One finding + three resources = three output Finding instances"""
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
provider.provider = Provider.ProviderChoices.AWS
|
||||
@@ -579,24 +622,26 @@ class TestAttackPathsProwlerHelpers:
|
||||
"muted_reason": finding.muted_reason,
|
||||
}
|
||||
|
||||
# _enrich_and_flatten_batch queries ResourceFindingMapping directly
|
||||
# _enrich_batch_with_resources queries ResourceFindingMapping directly
|
||||
# No RLS mock needed - test DB doesn't enforce RLS policies
|
||||
with patch(
|
||||
"tasks.jobs.attack_paths.prowler.READ_REPLICA_ALIAS",
|
||||
"tasks.jobs.attack_paths.findings.READ_REPLICA_ALIAS",
|
||||
"default",
|
||||
):
|
||||
result = prowler_module._enrich_and_flatten_batch([finding_dict])
|
||||
result = findings_module._enrich_batch_with_resources(
|
||||
[finding_dict], str(tenant.id)
|
||||
)
|
||||
|
||||
assert len(result) == 3
|
||||
result_resource_uids = {r["resource_uid"] for r in result}
|
||||
result_resource_uids = {r.resource_uid for r in result}
|
||||
assert result_resource_uids == {r.uid for r in resources}
|
||||
|
||||
# All should have same finding data
|
||||
for r in result:
|
||||
assert r["id"] == str(finding.id)
|
||||
assert r["status"] == "FAIL"
|
||||
assert r.id == str(finding.id)
|
||||
assert r.status == "FAIL"
|
||||
|
||||
def test_enrich_and_flatten_batch_no_resources_skips(
|
||||
def test_enrich_batch_with_resources_no_resources_skips(
|
||||
self,
|
||||
tenants_fixture,
|
||||
providers_fixture,
|
||||
@@ -652,12 +697,14 @@ class TestAttackPathsProwlerHelpers:
|
||||
# Mock logger to verify no warning is emitted
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.prowler.READ_REPLICA_ALIAS",
|
||||
"tasks.jobs.attack_paths.findings.READ_REPLICA_ALIAS",
|
||||
"default",
|
||||
),
|
||||
patch("tasks.jobs.attack_paths.prowler.logger") as mock_logger,
|
||||
patch("tasks.jobs.attack_paths.findings.logger") as mock_logger,
|
||||
):
|
||||
result = prowler_module._enrich_and_flatten_batch([finding_dict])
|
||||
result = findings_module._enrich_batch_with_resources(
|
||||
[finding_dict], str(tenant.id)
|
||||
)
|
||||
|
||||
assert len(result) == 0
|
||||
mock_logger.warning.assert_not_called()
|
||||
@@ -670,11 +717,11 @@ class TestAttackPathsProwlerHelpers:
|
||||
scan_id = "some-scan-id"
|
||||
|
||||
with (
|
||||
patch("tasks.jobs.attack_paths.prowler.rls_transaction") as mock_rls,
|
||||
patch("tasks.jobs.attack_paths.prowler.Finding") as mock_finding,
|
||||
patch("tasks.jobs.attack_paths.findings.rls_transaction") as mock_rls,
|
||||
patch("tasks.jobs.attack_paths.findings.Finding") as mock_finding,
|
||||
):
|
||||
# Create generator but don't iterate
|
||||
prowler_module.get_provider_last_scan_findings(provider, scan_id)
|
||||
findings_module.stream_findings_with_resources(provider, scan_id)
|
||||
|
||||
# Nothing should be called yet
|
||||
mock_rls.assert_not_called()
|
||||
@@ -695,14 +742,18 @@ class TestAttackPathsProwlerHelpers:
|
||||
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.prowler.get_root_node_label",
|
||||
"tasks.jobs.attack_paths.findings.get_root_node_label",
|
||||
return_value="AWSAccount",
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.prowler.get_node_uid_field",
|
||||
"tasks.jobs.attack_paths.findings.get_node_uid_field",
|
||||
return_value="arn",
|
||||
),
|
||||
patch(
|
||||
"tasks.jobs.attack_paths.findings.get_provider_resource_label",
|
||||
return_value="AWSResource",
|
||||
),
|
||||
):
|
||||
prowler_module.load_findings(mock_session, empty_gen(), provider, config)
|
||||
findings_module.load_findings(mock_session, empty_gen(), provider, config)
|
||||
|
||||
mock_session.run.assert_not_called()
|
||||
|
||||
@@ -11,14 +11,15 @@ from tasks.jobs.deletion import delete_provider, delete_tenant
|
||||
@pytest.mark.django_db
|
||||
class TestDeleteProvider:
|
||||
def test_delete_provider_success(self, providers_fixture):
|
||||
with patch(
|
||||
"tasks.jobs.deletion.get_provider_graph_database_names"
|
||||
) as mock_get_provider_graph_database_names, patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_database"
|
||||
) as mock_drop_database:
|
||||
graph_db_names = ["graph-db-1", "graph-db-2"]
|
||||
mock_get_provider_graph_database_names.return_value = graph_db_names
|
||||
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.deletion.graph_database.get_database_name",
|
||||
return_value="tenant-db",
|
||||
) as mock_get_database_name,
|
||||
patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_subgraph"
|
||||
) as mock_drop_subgraph,
|
||||
):
|
||||
instance = providers_fixture[0]
|
||||
tenant_id = str(instance.tenant_id)
|
||||
result = delete_provider(tenant_id, instance.id)
|
||||
@@ -27,33 +28,32 @@ class TestDeleteProvider:
|
||||
with pytest.raises(ObjectDoesNotExist):
|
||||
Provider.objects.get(pk=instance.id)
|
||||
|
||||
mock_get_provider_graph_database_names.assert_called_once_with(
|
||||
tenant_id, instance.id
|
||||
)
|
||||
mock_drop_database.assert_has_calls(
|
||||
[call(graph_db_name) for graph_db_name in graph_db_names]
|
||||
mock_get_database_name.assert_called_once_with(tenant_id)
|
||||
mock_drop_subgraph.assert_called_once_with(
|
||||
"tenant-db",
|
||||
str(instance.id),
|
||||
)
|
||||
|
||||
def test_delete_provider_does_not_exist(self, tenants_fixture):
|
||||
with patch(
|
||||
"tasks.jobs.deletion.get_provider_graph_database_names"
|
||||
) as mock_get_provider_graph_database_names, patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_database"
|
||||
) as mock_drop_database:
|
||||
graph_db_names = ["graph-db-1"]
|
||||
mock_get_provider_graph_database_names.return_value = graph_db_names
|
||||
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.deletion.graph_database.get_database_name",
|
||||
return_value="tenant-db",
|
||||
) as mock_get_database_name,
|
||||
patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_subgraph"
|
||||
) as mock_drop_subgraph,
|
||||
):
|
||||
tenant_id = str(tenants_fixture[0].id)
|
||||
non_existent_pk = "babf6796-cfcc-4fd3-9dcf-88d012247645"
|
||||
|
||||
with pytest.raises(ObjectDoesNotExist):
|
||||
delete_provider(tenant_id, non_existent_pk)
|
||||
|
||||
mock_get_provider_graph_database_names.assert_called_once_with(
|
||||
tenant_id, non_existent_pk
|
||||
)
|
||||
mock_drop_database.assert_has_calls(
|
||||
[call(graph_db_name) for graph_db_name in graph_db_names]
|
||||
mock_get_database_name.assert_called_once_with(tenant_id)
|
||||
mock_drop_subgraph.assert_called_once_with(
|
||||
"tenant-db",
|
||||
non_existent_pk,
|
||||
)
|
||||
|
||||
|
||||
@@ -63,21 +63,21 @@ class TestDeleteTenant:
|
||||
"""
|
||||
Test successful deletion of a tenant and its related data.
|
||||
"""
|
||||
with patch(
|
||||
"tasks.jobs.deletion.get_provider_graph_database_names"
|
||||
) as mock_get_provider_graph_database_names, patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_database"
|
||||
) as mock_drop_database:
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.deletion.graph_database.get_database_name",
|
||||
return_value="tenant-db",
|
||||
) as mock_get_database_name,
|
||||
patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_subgraph"
|
||||
) as mock_drop_subgraph,
|
||||
patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_database"
|
||||
) as mock_drop_database,
|
||||
):
|
||||
tenant = tenants_fixture[0]
|
||||
providers = list(Provider.objects.filter(tenant_id=tenant.id))
|
||||
|
||||
graph_db_names_per_provider = [
|
||||
[f"graph-db-{provider.id}"] for provider in providers
|
||||
]
|
||||
mock_get_provider_graph_database_names.side_effect = (
|
||||
graph_db_names_per_provider
|
||||
)
|
||||
|
||||
# Ensure the tenant and related providers exist before deletion
|
||||
assert Tenant.objects.filter(id=tenant.id).exists()
|
||||
assert providers
|
||||
@@ -89,30 +89,42 @@ class TestDeleteTenant:
|
||||
assert not Tenant.objects.filter(id=tenant.id).exists()
|
||||
assert not Provider.objects.filter(tenant_id=tenant.id).exists()
|
||||
|
||||
expected_calls = [
|
||||
call(provider.tenant_id, provider.id) for provider in providers
|
||||
# get_database_name is called once per provider + once for drop_database
|
||||
expected_get_db_calls = [call(tenant.id) for _ in providers] + [
|
||||
call(tenant.id)
|
||||
]
|
||||
mock_get_provider_graph_database_names.assert_has_calls(
|
||||
expected_calls, any_order=True
|
||||
mock_get_database_name.assert_has_calls(
|
||||
expected_get_db_calls, any_order=True
|
||||
)
|
||||
assert mock_get_provider_graph_database_names.call_count == len(
|
||||
expected_calls
|
||||
)
|
||||
expected_drop_calls = [
|
||||
call(graph_db_name[0]) for graph_db_name in graph_db_names_per_provider
|
||||
assert mock_get_database_name.call_count == len(expected_get_db_calls)
|
||||
|
||||
expected_drop_subgraph_calls = [
|
||||
call("tenant-db", str(provider.id)) for provider in providers
|
||||
]
|
||||
mock_drop_database.assert_has_calls(expected_drop_calls, any_order=True)
|
||||
assert mock_drop_database.call_count == len(expected_drop_calls)
|
||||
mock_drop_subgraph.assert_has_calls(
|
||||
expected_drop_subgraph_calls,
|
||||
any_order=True,
|
||||
)
|
||||
assert mock_drop_subgraph.call_count == len(expected_drop_subgraph_calls)
|
||||
|
||||
mock_drop_database.assert_called_once_with("tenant-db")
|
||||
|
||||
def test_delete_tenant_with_no_providers(self, tenants_fixture):
|
||||
"""
|
||||
Test deletion of a tenant with no related providers.
|
||||
"""
|
||||
with patch(
|
||||
"tasks.jobs.deletion.get_provider_graph_database_names"
|
||||
) as mock_get_provider_graph_database_names, patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_database"
|
||||
) as mock_drop_database:
|
||||
with (
|
||||
patch(
|
||||
"tasks.jobs.deletion.graph_database.get_database_name",
|
||||
return_value="tenant-db",
|
||||
) as mock_get_database_name,
|
||||
patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_subgraph"
|
||||
) as mock_drop_subgraph,
|
||||
patch(
|
||||
"tasks.jobs.deletion.graph_database.drop_database"
|
||||
) as mock_drop_database,
|
||||
):
|
||||
tenant = tenants_fixture[1] # Assume this tenant has no providers
|
||||
providers = Provider.objects.filter(tenant_id=tenant.id)
|
||||
|
||||
@@ -126,5 +138,7 @@ class TestDeleteTenant:
|
||||
assert deletion_summary == {} # No providers, so empty summary
|
||||
assert not Tenant.objects.filter(id=tenant.id).exists()
|
||||
|
||||
mock_get_provider_graph_database_names.assert_not_called()
|
||||
mock_drop_database.assert_not_called()
|
||||
# get_database_name is called once for drop_database
|
||||
mock_get_database_name.assert_called_once_with(tenant.id)
|
||||
mock_drop_subgraph.assert_not_called()
|
||||
mock_drop_database.assert_called_once_with("tenant-db")
|
||||
|
||||
25
dashboard/compliance/hipaa_azure.py
Normal file
25
dashboard/compliance/hipaa_azure.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_format3
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_format3(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SECTION", "REQUIREMENTS_ID"
|
||||
)
|
||||
@@ -86,3 +86,81 @@ docker compose up -d
|
||||
<Note>
|
||||
We are evaluating adding these values to the default `docker-compose.yml` to avoid this issue in future releases.
|
||||
</Note>
|
||||
|
||||
### API Container Fails to Start with JWT Key Permission Error
|
||||
|
||||
See [GitHub Issue #8897](https://github.com/prowler-cloud/prowler/issues/8897) for more details.
|
||||
|
||||
When deploying Prowler via Docker Compose on a fresh installation, the API container may fail to start with permission errors related to JWT RSA key file generation. This issue is commonly observed on Linux systems (Ubuntu, Debian, cloud VMs) and Windows with Docker Desktop, but not typically on macOS.
|
||||
|
||||
**Error Message:**
|
||||
|
||||
Checking the API container logs reveals:
|
||||
|
||||
```bash
|
||||
PermissionError: [Errno 13] Permission denied: '/home/prowler/.config/prowler-api/jwt_private.pem'
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```bash
|
||||
Token generation failed due to invalid key configuration. Provide valid DJANGO_TOKEN_SIGNING_KEY and DJANGO_TOKEN_VERIFYING_KEY in the environment.
|
||||
```
|
||||
|
||||
**Root Cause:**
|
||||
|
||||
This permission mismatch occurs due to UID (User ID) mapping between the host system and Docker containers:
|
||||
|
||||
* The API container runs as user `prowler` with UID/GID 1000
|
||||
* In environments like WSL2, the host user may have a different UID than the container user
|
||||
* Docker creates the mounted volume directory `./_data/api` on the host, often with the host user's UID or root ownership (UID 0)
|
||||
* When the application attempts to write JWT key files (`jwt_private.pem` and `jwt_public.pem`), the operation fails because the container's UID 1000 does not have write permissions to the host-owned directory
|
||||
|
||||
**Solutions:**
|
||||
|
||||
There are two approaches to resolve this issue:
|
||||
|
||||
**Option 1: Fix Volume Ownership (Resolve UID Mapping)**
|
||||
|
||||
Change the ownership of the volume directory to match the container user's UID (1000):
|
||||
|
||||
```bash
|
||||
# The container user 'prowler' has UID 1000
|
||||
# This command changes the directory ownership to UID 1000
|
||||
sudo chown -R 1000:1000 ./_data/api
|
||||
```
|
||||
|
||||
Then start Docker Compose:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This solution directly addresses the UID mapping mismatch by ensuring the volume directory is owned by the same UID that the container process uses.
|
||||
|
||||
**Option 2: Use Environment Variables (Skip File Storage)**
|
||||
|
||||
Generate JWT RSA keys manually and provide them via environment variables to bypass file-based key storage entirely:
|
||||
|
||||
```bash
|
||||
# Generate RSA keys
|
||||
openssl genrsa -out jwt_private.pem 4096
|
||||
openssl rsa -in jwt_private.pem -pubout -out jwt_public.pem
|
||||
|
||||
# Extract key content (removes headers/footers and newlines)
|
||||
PRIVATE_KEY=$(awk 'NF {sub(/\r/, ""); printf "%s\\n",$0;}' jwt_private.pem)
|
||||
PUBLIC_KEY=$(awk 'NF {sub(/\r/, ""); printf "%s\\n",$0;}' jwt_public.pem)
|
||||
```
|
||||
|
||||
Add the following to the `.env` file:
|
||||
|
||||
```env
|
||||
DJANGO_TOKEN_SIGNING_KEY=<content of jwt_private.pem>
|
||||
DJANGO_TOKEN_VERIFYING_KEY=<content of jwt_public.pem>
|
||||
```
|
||||
|
||||
When these environment variables are set, the API will use them directly instead of attempting to write key files to the mounted volume.
|
||||
|
||||
<Note>
|
||||
A fix addressing this permission issue is being evaluated in [PR #9953](https://github.com/prowler-cloud/prowler/pull/9953).
|
||||
</Note>
|
||||
|
||||
@@ -2,6 +2,14 @@
|
||||
|
||||
All notable changes to the **Prowler SDK** are documented in this file.
|
||||
|
||||
## [5.19.0] (Prowler UNRELEASED)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- Update Azure Monitor service metadata to new format [(#9622)](https://github.com/prowler-cloud/prowler/pull/9622)
|
||||
|
||||
---
|
||||
|
||||
## [5.18.0] (Prowler v5.18.0)
|
||||
|
||||
### 🚀 Added
|
||||
@@ -15,6 +23,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
|
||||
- `rds_instance_extended_support` check for AWS provider [(#9865)](https://github.com/prowler-cloud/prowler/pull/9865)
|
||||
- `OpenStack` provider support with Compute service including 1 security check [(#9811)](https://github.com/prowler-cloud/prowler/pull/9811)
|
||||
- `OpenStack` documentation for the support in the CLI [(#9848)](https://github.com/prowler-cloud/prowler/pull/9848)
|
||||
- Add HIPAA compliance framework for the Azure provider [(#9957)](https://github.com/prowler-cloud/prowler/pull/9957)
|
||||
- Cloudflare provider credentials as constructor parameters (`api_token`, `api_key`, `api_email`) [(#9907)](https://github.com/prowler-cloud/prowler/pull/9907)
|
||||
|
||||
### 🔄 Changed
|
||||
@@ -35,7 +44,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
|
||||
- Update Azure Network service metadata to new format [(#9624)](https://github.com/prowler-cloud/prowler/pull/9624)
|
||||
- Update Azure Storage service metadata to new format [(#9628)](https://github.com/prowler-cloud/prowler/pull/9628)
|
||||
|
||||
### 🐛 Fixed
|
||||
### 🐞 Fixed
|
||||
|
||||
- Duplicated findings in `entra_user_with_vm_access_has_mfa` check when user has multiple VM access roles [(#9914)](https://github.com/prowler-cloud/prowler/pull/9914)
|
||||
- Jira integration failing with `INVALID_INPUT` error when sending findings with long resource UIDs exceeding 255-character summary limit [(#9926)](https://github.com/prowler-cloud/prowler/pull/9926)
|
||||
|
||||
820
prowler/compliance/azure/hipaa_azure.json
Normal file
820
prowler/compliance/azure/hipaa_azure.json
Normal file
@@ -0,0 +1,820 @@
|
||||
{
|
||||
"Framework": "HIPAA",
|
||||
"Name": "HIPAA compliance framework for Azure",
|
||||
"Version": "",
|
||||
"Provider": "Azure",
|
||||
"Description": "The Health Insurance Portability and Accountability Act of 1996 (HIPAA) is legislation that helps US workers to retain health insurance coverage when they change or lose jobs. The legislation also seeks to encourage electronic health records to improve the efficiency and quality of the US healthcare system through improved information sharing. This framework maps HIPAA requirements to Microsoft Azure security best practices.",
|
||||
"Requirements": [
|
||||
{
|
||||
"Id": "164_308_a_1_ii_a",
|
||||
"Name": "164.308(a)(1)(ii)(A) Risk analysis",
|
||||
"Description": "Conduct an accurate and thorough assessment of the potential risks and vulnerabilities to the confidentiality, integrity, and availability of electronic protected health information held by the covered entity or business associate.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_1_ii_a",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"defender_ensure_defender_for_server_is_on",
|
||||
"defender_ensure_defender_for_app_services_is_on",
|
||||
"defender_ensure_defender_for_sql_servers_is_on",
|
||||
"defender_ensure_defender_for_storage_is_on",
|
||||
"defender_ensure_defender_for_keyvault_is_on",
|
||||
"defender_ensure_defender_for_arm_is_on",
|
||||
"defender_ensure_defender_for_dns_is_on",
|
||||
"defender_ensure_defender_for_containers_is_on",
|
||||
"defender_ensure_defender_for_cosmosdb_is_on",
|
||||
"defender_ensure_mcas_is_enabled",
|
||||
"policy_ensure_asc_enforcement_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_1_ii_b",
|
||||
"Name": "164.308(a)(1)(ii)(B) Risk Management",
|
||||
"Description": "Implement security measures sufficient to reduce risks and vulnerabilities to a reasonable and appropriate level to comply with 164.306(a): Ensure the confidentiality, integrity, and availability of all electronic protected health information the covered entity or business associate creates, receives, maintains, or transmits.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_1_ii_b",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"storage_ensure_encryption_with_customer_managed_keys",
|
||||
"storage_infrastructure_encryption_is_enabled",
|
||||
"storage_blob_public_access_level_is_disabled",
|
||||
"storage_default_network_access_rule_is_denied",
|
||||
"storage_ensure_private_endpoints_in_storage_accounts",
|
||||
"sqlserver_tde_encryption_enabled",
|
||||
"sqlserver_tde_encrypted_with_cmk",
|
||||
"sqlserver_unrestricted_inbound_access",
|
||||
"keyvault_key_rotation_enabled",
|
||||
"keyvault_rbac_enabled",
|
||||
"keyvault_private_endpoints",
|
||||
"vm_ensure_attached_disks_encrypted_with_cmk",
|
||||
"vm_ensure_unattached_disks_encrypted_with_cmk",
|
||||
"network_ssh_internet_access_restricted",
|
||||
"network_rdp_internet_access_restricted",
|
||||
"network_http_internet_access_restricted",
|
||||
"network_udp_internet_access_restricted",
|
||||
"iam_subscription_roles_owner_custom_not_created",
|
||||
"iam_custom_role_has_permissions_to_administer_resource_locks",
|
||||
"cosmosdb_account_firewall_use_selected_networks",
|
||||
"cosmosdb_account_use_private_endpoints",
|
||||
"aks_clusters_public_access_disabled",
|
||||
"aks_clusters_created_with_private_nodes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_1_ii_d",
|
||||
"Name": "164.308(a)(1)(ii)(D) Information system activity review",
|
||||
"Description": "Implement procedures to regularly review records of information system activity, such as audit logs, access reports, and security incident tracking reports.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_1_ii_d",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"monitor_diagnostic_setting_with_appropriate_categories",
|
||||
"monitor_diagnostic_settings_exists",
|
||||
"monitor_alert_create_policy_assignment",
|
||||
"monitor_alert_delete_policy_assignment",
|
||||
"monitor_alert_create_update_nsg",
|
||||
"monitor_alert_delete_nsg",
|
||||
"monitor_alert_create_update_security_solution",
|
||||
"monitor_alert_delete_security_solution",
|
||||
"sqlserver_auditing_enabled",
|
||||
"sqlserver_auditing_retention_90_days",
|
||||
"keyvault_logging_enabled",
|
||||
"network_watcher_enabled",
|
||||
"network_flow_log_captured_sent",
|
||||
"network_flow_log_more_than_90_days",
|
||||
"app_http_logs_enabled",
|
||||
"appinsights_ensure_is_configured"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_3_i",
|
||||
"Name": "164.308(a)(3)(i) Workforce security",
|
||||
"Description": "Implement policies and procedures to ensure that all members of its workforce have appropriate access to electronic protected health information, as provided under paragraph (a)(4) of this section, and to prevent those workforce members who do not have access under paragraph (a)(4) of this section from obtaining access to electronic protected health information.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_3_i",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"storage_blob_public_access_level_is_disabled",
|
||||
"storage_default_network_access_rule_is_denied",
|
||||
"sqlserver_unrestricted_inbound_access",
|
||||
"network_ssh_internet_access_restricted",
|
||||
"network_rdp_internet_access_restricted",
|
||||
"network_http_internet_access_restricted",
|
||||
"iam_subscription_roles_owner_custom_not_created",
|
||||
"iam_role_user_access_admin_restricted",
|
||||
"containerregistry_not_publicly_accessible",
|
||||
"app_function_not_publicly_accessible",
|
||||
"aisearch_service_not_publicly_accessible",
|
||||
"cosmosdb_account_firewall_use_selected_networks"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_3_ii_a",
|
||||
"Name": "164.308(a)(3)(ii)(A) Authorization and/or supervision",
|
||||
"Description": "Implement procedures for the authorization and/or supervision of workforce members who work with electronic protected health information or in locations where it might be accessed.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_3_ii_a",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"monitor_diagnostic_setting_with_appropriate_categories",
|
||||
"monitor_diagnostic_settings_exists",
|
||||
"sqlserver_auditing_enabled",
|
||||
"keyvault_logging_enabled",
|
||||
"entra_privileged_user_has_mfa",
|
||||
"entra_non_privileged_user_has_mfa",
|
||||
"entra_security_defaults_enabled",
|
||||
"entra_conditional_access_policy_require_mfa_for_management_api",
|
||||
"entra_user_with_vm_access_has_mfa",
|
||||
"network_flow_log_captured_sent",
|
||||
"app_http_logs_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_3_ii_b",
|
||||
"Name": "164.308(a)(3)(ii)(B) Workforce clearance procedure",
|
||||
"Description": "Implement procedures to determine that the access of a workforce member to electronic protected health information is appropriate.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_3_ii_b",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "entra"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"iam_subscription_roles_owner_custom_not_created",
|
||||
"iam_role_user_access_admin_restricted",
|
||||
"iam_custom_role_has_permissions_to_administer_resource_locks",
|
||||
"entra_global_admin_in_less_than_five_users",
|
||||
"entra_policy_default_users_cannot_create_security_groups",
|
||||
"entra_policy_ensure_default_user_cannot_create_apps",
|
||||
"entra_policy_guest_invite_only_for_admin_roles",
|
||||
"entra_policy_guest_users_access_restrictions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_3_ii_c",
|
||||
"Name": "164.308(a)(3)(ii)(C) Termination procedures",
|
||||
"Description": "Implement procedures for terminating access to electronic protected health information when the employment of, or other arrangement with, a workforce member ends or as required by determinations made as specified in paragraph (a)(3)(ii)(b).",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_3_ii_c",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"storage_key_rotation_90_days",
|
||||
"keyvault_key_rotation_enabled",
|
||||
"keyvault_rbac_key_expiration_set",
|
||||
"keyvault_rbac_secret_expiration_set",
|
||||
"keyvault_key_expiration_set_in_non_rbac",
|
||||
"keyvault_non_rbac_secret_expiration_set"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_4_i",
|
||||
"Name": "164.308(a)(4)(i) Information access management",
|
||||
"Description": "Implement policies and procedures for authorizing access to electronic protected health information that are consistent with the applicable requirements of subpart E of this part.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_4_i",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"iam_subscription_roles_owner_custom_not_created",
|
||||
"iam_role_user_access_admin_restricted",
|
||||
"iam_custom_role_has_permissions_to_administer_resource_locks",
|
||||
"keyvault_rbac_enabled",
|
||||
"entra_global_admin_in_less_than_five_users",
|
||||
"entra_policy_restricts_user_consent_for_apps",
|
||||
"entra_policy_user_consent_for_verified_apps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_4_ii_a",
|
||||
"Name": "164.308(a)(4)(ii)(A) Isolating health care clearinghouse functions",
|
||||
"Description": "If a health care clearinghouse is part of a larger organization, the clearinghouse must implement policies and procedures that protect the electronic protected health information of the clearinghouse from unauthorized access by the larger organization.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_4_ii_a",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"storage_ensure_encryption_with_customer_managed_keys",
|
||||
"storage_infrastructure_encryption_is_enabled",
|
||||
"storage_ensure_private_endpoints_in_storage_accounts",
|
||||
"storage_default_network_access_rule_is_denied",
|
||||
"sqlserver_tde_encryption_enabled",
|
||||
"sqlserver_tde_encrypted_with_cmk",
|
||||
"sqlserver_auditing_enabled",
|
||||
"keyvault_key_rotation_enabled",
|
||||
"keyvault_logging_enabled",
|
||||
"keyvault_private_endpoints",
|
||||
"vm_ensure_attached_disks_encrypted_with_cmk",
|
||||
"vm_backup_enabled",
|
||||
"cosmosdb_account_use_private_endpoints",
|
||||
"databricks_workspace_cmk_encryption_enabled",
|
||||
"databricks_workspace_vnet_injection_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_4_ii_b",
|
||||
"Name": "164.308(a)(4)(ii)(B) Access authorization",
|
||||
"Description": "Implement policies and procedures for granting access to electronic protected health information, as one illustrative example, through access to a workstation, transaction, program, process, or other mechanism.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_4_ii_b",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"iam_subscription_roles_owner_custom_not_created",
|
||||
"iam_role_user_access_admin_restricted",
|
||||
"iam_custom_role_has_permissions_to_administer_resource_locks",
|
||||
"keyvault_rbac_enabled",
|
||||
"aks_cluster_rbac_enabled",
|
||||
"cosmosdb_account_use_aad_and_rbac",
|
||||
"sqlserver_azuread_administrator_enabled",
|
||||
"entra_global_admin_in_less_than_five_users"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_4_ii_c",
|
||||
"Name": "164.308(a)(4)(ii)(C) Access establishment and modification",
|
||||
"Description": "Implement policies and procedures that, based upon the covered entity's or the business associate's access authorization policies, establish, document, review, and modify a user's right of access to a workstation, transaction, program, or process.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_4_ii_c",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"iam_subscription_roles_owner_custom_not_created",
|
||||
"iam_role_user_access_admin_restricted",
|
||||
"storage_key_rotation_90_days",
|
||||
"keyvault_key_rotation_enabled",
|
||||
"keyvault_rbac_key_expiration_set",
|
||||
"keyvault_rbac_secret_expiration_set",
|
||||
"entra_global_admin_in_less_than_five_users",
|
||||
"entra_policy_default_users_cannot_create_security_groups",
|
||||
"entra_policy_ensure_default_user_cannot_create_apps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_5_ii_b",
|
||||
"Name": "164.308(a)(5)(ii)(B) Protection from malicious software",
|
||||
"Description": "Procedures for guarding against, detecting, and reporting malicious software.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_5_ii_b",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"defender_ensure_defender_for_server_is_on",
|
||||
"defender_ensure_wdatp_is_enabled",
|
||||
"defender_assessments_vm_endpoint_protection_installed",
|
||||
"defender_ensure_system_updates_are_applied",
|
||||
"defender_container_images_scan_enabled",
|
||||
"defender_container_images_resolved_vulnerabilities"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_5_ii_c",
|
||||
"Name": "164.308(a)(5)(ii)(C) Log-in monitoring",
|
||||
"Description": "Procedures for monitoring log-in attempts and reporting discrepancies.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_5_ii_c",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"defender_ensure_defender_for_server_is_on",
|
||||
"defender_ensure_mcas_is_enabled",
|
||||
"monitor_diagnostic_setting_with_appropriate_categories",
|
||||
"entra_security_defaults_enabled",
|
||||
"sqlserver_auditing_enabled",
|
||||
"keyvault_logging_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_5_ii_d",
|
||||
"Name": "164.308(a)(5)(ii)(D) Password management",
|
||||
"Description": "Procedures for creating, changing, and safeguarding passwords.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_5_ii_d",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "entra"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"entra_security_defaults_enabled",
|
||||
"entra_privileged_user_has_mfa",
|
||||
"entra_non_privileged_user_has_mfa",
|
||||
"storage_key_rotation_90_days",
|
||||
"keyvault_key_rotation_enabled",
|
||||
"keyvault_rbac_key_expiration_set",
|
||||
"keyvault_rbac_secret_expiration_set"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_6_i",
|
||||
"Name": "164.308(a)(6)(i) Security incident procedures",
|
||||
"Description": "Implement policies and procedures to address security incidents.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_6_i",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"monitor_alert_create_update_nsg",
|
||||
"monitor_alert_delete_nsg",
|
||||
"monitor_alert_create_update_security_solution",
|
||||
"monitor_alert_delete_security_solution",
|
||||
"monitor_alert_service_health_exists",
|
||||
"defender_ensure_defender_for_server_is_on",
|
||||
"defender_ensure_notify_alerts_severity_is_high",
|
||||
"defender_ensure_notify_emails_to_owners",
|
||||
"defender_additional_email_configured_with_a_security_contact",
|
||||
"defender_attack_path_notifications_properly_configured"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_6_ii",
|
||||
"Name": "164.308(a)(6)(ii) Response and reporting",
|
||||
"Description": "Identify and respond to suspected or known security incidents; mitigate, to the extent practicable, harmful effects of security incidents that are known to the covered entity or business associate; and document security incidents and their outcomes.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_6_ii",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"monitor_diagnostic_setting_with_appropriate_categories",
|
||||
"monitor_diagnostic_settings_exists",
|
||||
"monitor_alert_create_update_nsg",
|
||||
"monitor_alert_delete_nsg",
|
||||
"defender_ensure_defender_for_server_is_on",
|
||||
"defender_ensure_notify_alerts_severity_is_high",
|
||||
"defender_ensure_notify_emails_to_owners",
|
||||
"defender_additional_email_configured_with_a_security_contact",
|
||||
"sqlserver_auditing_enabled",
|
||||
"keyvault_logging_enabled",
|
||||
"network_flow_log_captured_sent",
|
||||
"app_http_logs_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_7_i",
|
||||
"Name": "164.308(a)(7)(i) Contingency plan",
|
||||
"Description": "Establish (and implement as needed) policies and procedures for responding to an emergency or other occurrence (for example, fire, vandalism, system failure, and natural disaster) that damages systems that contain electronic protected health information.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_7_i",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"vm_backup_enabled",
|
||||
"vm_sufficient_daily_backup_retention_period",
|
||||
"storage_blob_versioning_is_enabled",
|
||||
"storage_ensure_soft_delete_is_enabled",
|
||||
"storage_ensure_file_shares_soft_delete_is_enabled",
|
||||
"storage_geo_redundant_enabled",
|
||||
"keyvault_recoverable",
|
||||
"sqlserver_auditing_retention_90_days"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_7_ii_a",
|
||||
"Name": "164.308(a)(7)(ii)(A) Data backup plan",
|
||||
"Description": "Establish and implement procedures to create and maintain retrievable exact copies of electronic protected health information.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_7_ii_a",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"vm_backup_enabled",
|
||||
"vm_sufficient_daily_backup_retention_period",
|
||||
"storage_blob_versioning_is_enabled",
|
||||
"storage_ensure_soft_delete_is_enabled",
|
||||
"storage_ensure_file_shares_soft_delete_is_enabled",
|
||||
"storage_geo_redundant_enabled",
|
||||
"keyvault_recoverable",
|
||||
"sqlserver_auditing_retention_90_days",
|
||||
"postgresql_flexible_server_log_retention_days_greater_3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_7_ii_b",
|
||||
"Name": "164.308(a)(7)(ii)(B) Disaster recovery plan",
|
||||
"Description": "Establish (and implement as needed) procedures to restore any loss of data.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_7_ii_b",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"vm_backup_enabled",
|
||||
"vm_sufficient_daily_backup_retention_period",
|
||||
"storage_blob_versioning_is_enabled",
|
||||
"storage_ensure_soft_delete_is_enabled",
|
||||
"storage_geo_redundant_enabled",
|
||||
"keyvault_recoverable"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_7_ii_c",
|
||||
"Name": "164.308(a)(7)(ii)(C) Emergency mode operation plan",
|
||||
"Description": "Establish (and implement as needed) procedures to enable continuation of critical business processes for protection of the security of electronic protected health information while operating in emergency mode.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_7_ii_c",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"vm_backup_enabled",
|
||||
"vm_sufficient_daily_backup_retention_period",
|
||||
"storage_blob_versioning_is_enabled",
|
||||
"storage_ensure_soft_delete_is_enabled",
|
||||
"storage_geo_redundant_enabled",
|
||||
"keyvault_recoverable"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_308_a_8",
|
||||
"Name": "164.308(a)(8) Evaluation",
|
||||
"Description": "Perform a periodic technical and nontechnical evaluation, based initially upon the standards implemented under this rule and subsequently, in response to environmental or operational changes affecting the security of electronic protected health information, that establishes the extent to which an entity's security policies and procedures meet the requirements of this subpart.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_308_a_8",
|
||||
"Section": "164.308 Administrative Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"defender_ensure_defender_for_server_is_on",
|
||||
"defender_ensure_mcas_is_enabled",
|
||||
"sqlserver_vulnerability_assessment_enabled",
|
||||
"sqlserver_va_periodic_recurring_scans_enabled",
|
||||
"sqlserver_va_scan_reports_configured",
|
||||
"sqlserver_va_emails_notifications_admins_enabled",
|
||||
"policy_ensure_asc_enforcement_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_310_a_1",
|
||||
"Name": "164.310(a)(1) Facility access controls",
|
||||
"Description": "Implement policies and procedures to limit physical access to its electronic information systems and the facility or facilities in which they are housed, while ensuring that properly authorized access is allowed.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_310_a_1",
|
||||
"Section": "164.310 Physical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"network_ssh_internet_access_restricted",
|
||||
"network_rdp_internet_access_restricted",
|
||||
"network_http_internet_access_restricted",
|
||||
"network_bastion_host_exists",
|
||||
"vm_jit_access_enabled",
|
||||
"aks_clusters_public_access_disabled",
|
||||
"aks_clusters_created_with_private_nodes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_310_d_1",
|
||||
"Name": "164.310(d)(1) Device and media controls",
|
||||
"Description": "Implement policies and procedures that govern the receipt and removal of hardware and electronic media that contain electronic protected health information into and out of a facility, and the movement of these items within the facility.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_310_d_1",
|
||||
"Section": "164.310 Physical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"storage_ensure_encryption_with_customer_managed_keys",
|
||||
"storage_infrastructure_encryption_is_enabled",
|
||||
"vm_ensure_attached_disks_encrypted_with_cmk",
|
||||
"vm_ensure_unattached_disks_encrypted_with_cmk",
|
||||
"vm_ensure_using_managed_disks",
|
||||
"sqlserver_tde_encryption_enabled",
|
||||
"databricks_workspace_cmk_encryption_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_312_a_1",
|
||||
"Name": "164.312(a)(1) Access control",
|
||||
"Description": "Implement technical policies and procedures for electronic information systems that maintain electronic protected health information to allow access only to those persons or software programs that have been granted access rights as specified in 164.308(a)(4).",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_312_a_1",
|
||||
"Section": "164.312 Technical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"storage_blob_public_access_level_is_disabled",
|
||||
"storage_default_network_access_rule_is_denied",
|
||||
"storage_ensure_private_endpoints_in_storage_accounts",
|
||||
"sqlserver_unrestricted_inbound_access",
|
||||
"network_ssh_internet_access_restricted",
|
||||
"network_rdp_internet_access_restricted",
|
||||
"network_http_internet_access_restricted",
|
||||
"iam_subscription_roles_owner_custom_not_created",
|
||||
"iam_role_user_access_admin_restricted",
|
||||
"entra_privileged_user_has_mfa",
|
||||
"containerregistry_not_publicly_accessible",
|
||||
"app_function_not_publicly_accessible",
|
||||
"aisearch_service_not_publicly_accessible",
|
||||
"cosmosdb_account_firewall_use_selected_networks",
|
||||
"cosmosdb_account_use_private_endpoints",
|
||||
"aks_clusters_public_access_disabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_312_a_2_i",
|
||||
"Name": "164.312(a)(2)(i) Unique user identification",
|
||||
"Description": "Assign a unique name and/or number for identifying and tracking user identity.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_312_a_2_i",
|
||||
"Section": "164.312 Technical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"sqlserver_auditing_enabled",
|
||||
"sqlserver_azuread_administrator_enabled",
|
||||
"entra_security_defaults_enabled",
|
||||
"storage_default_to_entra_authorization_enabled",
|
||||
"cosmosdb_account_use_aad_and_rbac",
|
||||
"postgresql_flexible_server_entra_id_authentication_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_312_a_2_ii",
|
||||
"Name": "164.312(a)(2)(ii) Emergency access procedure",
|
||||
"Description": "Establish (and implement as needed) procedures for obtaining necessary electronic protected health information during an emergency.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_312_a_2_ii",
|
||||
"Section": "164.312 Technical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"vm_backup_enabled",
|
||||
"vm_sufficient_daily_backup_retention_period",
|
||||
"storage_blob_versioning_is_enabled",
|
||||
"storage_ensure_soft_delete_is_enabled",
|
||||
"storage_geo_redundant_enabled",
|
||||
"keyvault_recoverable"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_312_a_2_iv",
|
||||
"Name": "164.312(a)(2)(iv) Encryption and decryption",
|
||||
"Description": "Implement a mechanism to encrypt and decrypt electronic protected health information.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_312_a_2_iv",
|
||||
"Section": "164.312 Technical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"storage_ensure_encryption_with_customer_managed_keys",
|
||||
"storage_infrastructure_encryption_is_enabled",
|
||||
"storage_secure_transfer_required_is_enabled",
|
||||
"sqlserver_tde_encryption_enabled",
|
||||
"sqlserver_tde_encrypted_with_cmk",
|
||||
"keyvault_key_rotation_enabled",
|
||||
"vm_ensure_attached_disks_encrypted_with_cmk",
|
||||
"vm_ensure_unattached_disks_encrypted_with_cmk",
|
||||
"databricks_workspace_cmk_encryption_enabled",
|
||||
"monitor_storage_account_with_activity_logs_cmk_encrypted"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_312_b",
|
||||
"Name": "164.312(b) Audit controls",
|
||||
"Description": "Implement hardware, software, and/or procedural mechanisms that record and examine activity in information systems that contain or use electronic protected health information.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_312_b",
|
||||
"Section": "164.312 Technical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"monitor_diagnostic_setting_with_appropriate_categories",
|
||||
"monitor_diagnostic_settings_exists",
|
||||
"monitor_alert_create_policy_assignment",
|
||||
"monitor_alert_delete_policy_assignment",
|
||||
"monitor_alert_create_update_nsg",
|
||||
"monitor_alert_delete_nsg",
|
||||
"monitor_alert_create_update_sqlserver_fr",
|
||||
"monitor_alert_delete_sqlserver_fr",
|
||||
"sqlserver_auditing_enabled",
|
||||
"sqlserver_auditing_retention_90_days",
|
||||
"keyvault_logging_enabled",
|
||||
"network_watcher_enabled",
|
||||
"network_flow_log_captured_sent",
|
||||
"network_flow_log_more_than_90_days",
|
||||
"app_http_logs_enabled",
|
||||
"appinsights_ensure_is_configured",
|
||||
"postgresql_flexible_server_log_checkpoints_on",
|
||||
"postgresql_flexible_server_log_connections_on",
|
||||
"postgresql_flexible_server_log_disconnections_on",
|
||||
"mysql_flexible_server_audit_log_enabled",
|
||||
"mysql_flexible_server_audit_log_connection_activated"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_312_c_1",
|
||||
"Name": "164.312(c)(1) Integrity",
|
||||
"Description": "Implement policies and procedures to protect electronic protected health information from improper alteration or destruction.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_312_c_1",
|
||||
"Section": "164.312 Technical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"storage_ensure_encryption_with_customer_managed_keys",
|
||||
"storage_blob_versioning_is_enabled",
|
||||
"storage_secure_transfer_required_is_enabled",
|
||||
"keyvault_key_rotation_enabled",
|
||||
"keyvault_recoverable",
|
||||
"sqlserver_tde_encryption_enabled",
|
||||
"vm_ensure_attached_disks_encrypted_with_cmk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_312_c_2",
|
||||
"Name": "164.312(c)(2) Mechanism to authenticate electronic protected health information",
|
||||
"Description": "Implement electronic mechanisms to corroborate that electronic protected health information has not been altered or destroyed in an unauthorized manner.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_312_c_2",
|
||||
"Section": "164.312 Technical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"storage_ensure_encryption_with_customer_managed_keys",
|
||||
"storage_blob_versioning_is_enabled",
|
||||
"storage_secure_transfer_required_is_enabled",
|
||||
"keyvault_key_rotation_enabled",
|
||||
"keyvault_logging_enabled",
|
||||
"sqlserver_auditing_enabled",
|
||||
"network_flow_log_captured_sent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_312_d",
|
||||
"Name": "164.312(d) Person or entity authentication",
|
||||
"Description": "Implement procedures to verify that a person or entity seeking access to electronic protected health information is the one claimed.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_312_d",
|
||||
"Section": "164.312 Technical Safeguards",
|
||||
"Service": "entra"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"entra_security_defaults_enabled",
|
||||
"entra_privileged_user_has_mfa",
|
||||
"entra_non_privileged_user_has_mfa",
|
||||
"entra_conditional_access_policy_require_mfa_for_management_api",
|
||||
"entra_user_with_vm_access_has_mfa",
|
||||
"entra_trusted_named_locations_exists",
|
||||
"sqlserver_azuread_administrator_enabled",
|
||||
"postgresql_flexible_server_entra_id_authentication_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_312_e_1",
|
||||
"Name": "164.312(e)(1) Transmission security",
|
||||
"Description": "Implement technical security measures to guard against unauthorized access to electronic protected health information that is being transmitted over an electronic communications network.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_312_e_1",
|
||||
"Section": "164.312 Technical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"storage_secure_transfer_required_is_enabled",
|
||||
"storage_ensure_minimum_tls_version_12",
|
||||
"sqlserver_recommended_minimal_tls_version",
|
||||
"app_minimum_tls_version_12",
|
||||
"app_ensure_http_is_redirected_to_https",
|
||||
"app_ensure_using_http20",
|
||||
"app_function_ftps_deployment_disabled",
|
||||
"app_ftp_deployment_disabled",
|
||||
"network_ssh_internet_access_restricted",
|
||||
"network_rdp_internet_access_restricted",
|
||||
"mysql_flexible_server_minimum_tls_version_12",
|
||||
"mysql_flexible_server_ssl_connection_enabled",
|
||||
"postgresql_flexible_server_enforce_ssl_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_312_e_2_i",
|
||||
"Name": "164.312(e)(2)(i) Integrity controls",
|
||||
"Description": "Implement security measures to ensure that electronically transmitted electronic protected health information is not improperly modified without detection until disposed of.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_312_e_2_i",
|
||||
"Section": "164.312 Technical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"monitor_diagnostic_setting_with_appropriate_categories",
|
||||
"storage_secure_transfer_required_is_enabled",
|
||||
"storage_ensure_minimum_tls_version_12",
|
||||
"storage_blob_versioning_is_enabled",
|
||||
"defender_ensure_defender_for_server_is_on",
|
||||
"sqlserver_auditing_enabled",
|
||||
"keyvault_logging_enabled",
|
||||
"network_flow_log_captured_sent"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "164_312_e_2_ii",
|
||||
"Name": "164.312(e)(2)(ii) Encryption",
|
||||
"Description": "Implement a mechanism to encrypt electronic protected health information whenever deemed appropriate.",
|
||||
"Attributes": [
|
||||
{
|
||||
"ItemId": "164_312_e_2_ii",
|
||||
"Section": "164.312 Technical Safeguards",
|
||||
"Service": "azure"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"storage_ensure_encryption_with_customer_managed_keys",
|
||||
"storage_infrastructure_encryption_is_enabled",
|
||||
"storage_secure_transfer_required_is_enabled",
|
||||
"storage_ensure_minimum_tls_version_12",
|
||||
"sqlserver_tde_encryption_enabled",
|
||||
"sqlserver_tde_encrypted_with_cmk",
|
||||
"sqlserver_recommended_minimal_tls_version",
|
||||
"keyvault_key_rotation_enabled",
|
||||
"vm_ensure_attached_disks_encrypted_with_cmk",
|
||||
"vm_ensure_unattached_disks_encrypted_with_cmk",
|
||||
"app_minimum_tls_version_12",
|
||||
"app_ensure_http_is_redirected_to_https",
|
||||
"mysql_flexible_server_minimum_tls_version_12",
|
||||
"mysql_flexible_server_ssl_connection_enabled",
|
||||
"postgresql_flexible_server_enforce_ssl_enabled",
|
||||
"databricks_workspace_cmk_encryption_enabled"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,30 +1,37 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_alert_create_policy_assignment",
|
||||
"CheckTitle": "Ensure that Activity Log Alert exists for Create Policy Assignment",
|
||||
"CheckTitle": "Subscription has an Azure Monitor activity log alert for policy assignment creation",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "microsoft.insights/activitylogalerts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Create an activity log alert for the Create Policy Assignment event.",
|
||||
"Risk": "Monitoring for create policy assignment events gives insight into changes done in 'Azure policy - assignments' and can reduce the time it takes to detect unsolicited changes.",
|
||||
"RelatedUrl": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
|
||||
"Description": "**Azure Monitor Activity Log alert** configurations are assessed for an **activity log alert** on `Microsoft.Authorization/policyAssignments/write`, indicating monitoring of newly created **Azure Policy assignments**",
|
||||
"Risk": "Absent alerts on new policy assignments, unauthorized or accidental changes can silently weaken governance. Adversaries could assign permissive policies or replace deny rules, enabling misconfigurations, privilege expansion, and data exposure-degrading **integrity** and threatening **confidentiality** and **availability**.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-us/dotnet/api/azure.resourcemanager.monitor.activitylogalertresource?view=azure-dotnet",
|
||||
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-alert-for-create-policy-assignment-events.html"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Authorization/policyAssignments/write and level=<verbose | information | warning | error | critical> --scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription ID> --action-group <action group ID> --location global",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-alert-for-create-policy-assignment-events.html#trendmicro",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor activity-log alert create --name '<activity log rule name>' --resource-group '<resource group name>' --location global --scopes '/subscriptions/<subscription ID>' --condition \"category=Administrative and operationName=Microsoft.Authorization/policyAssignments/write\" --enabled true",
|
||||
"NativeIaC": "```bicep\n// Azure Monitor Activity Log Alert for Policy Assignment creation\nresource activityLogAlert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'global'\n properties: {\n enabled: true\n scopes: [ '/subscriptions/<subscription ID>' ]\n condition: {\n allOf: [\n {\n field: 'category'\n equals: 'Administrative' // Critical: filter Activity Log category to Administrative\n }\n {\n field: 'operationName'\n equals: 'Microsoft.Authorization/policyAssignments/write' // Critical: alert on Policy Assignment creation\n }\n ]\n }\n }\n}\n```",
|
||||
"Other": "1. In the Azure Portal, go to Monitor > Alerts > Alert rules\n2. Click + Create > Alert rule\n3. Scope: Select the target Subscription and click Apply\n4. Condition: Choose Activity log, then set Category = Administrative and Operation name = Microsoft.Authorization/policyAssignments/write; click Apply\n5. Actions: Skip or select an existing Action group (optional)\n6. Details: Enter a Name and ensure Enable alert rule upon creation is checked\n7. Click Review + create, then Create",
|
||||
"Terraform": "```hcl\n# Azure Monitor Activity Log Alert for Policy Assignment creation\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n location = \"global\"\n scopes = [\"/subscriptions/<subscription ID>\"]\n\n criteria {\n category = \"Administrative\" # Critical: Activity Log category\n operation_name = \"Microsoft.Authorization/policyAssignments/write\" # Critical: Policy Assignment creation\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Policy assignment (policyAssignments). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create policy assignment (Microsoft.Authorization/policyAssignments). 12. Select the Actions tab. 13. To use an existing action group, click elect action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
|
||||
"Url": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log"
|
||||
"Text": "Implement an **activity log alert** for `Microsoft.Authorization/policyAssignments/write` and route to an action group for timely response.\n\nApply across all subscriptions, restrict assignment rights (**least privilege**), require change approval, and integrate notifications with your SIEM for **defense in depth**.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_alert_create_policy_assignment"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, no monitoring alerts are created."
|
||||
|
||||
@@ -1,30 +1,37 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_alert_create_update_nsg",
|
||||
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update Network Security Group",
|
||||
"CheckTitle": "Subscription has an Activity Log alert for Network Security Group create or update operations",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"ResourceType": "microsoft.insights/activitylogalerts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Create an Activity Log Alert for the Create or Update Network Security Group event.",
|
||||
"Risk": "Monitoring for Create or Update Network Security Group events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
|
||||
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
|
||||
"Description": "**Azure Monitor Activity Log alert** monitors **Network Security Group** changes via the `Microsoft.Network/networkSecurityGroups/write` operation to capture create/update events across the subscription",
|
||||
"Risk": "Lack of alerting on NSG changes allows **unauthorized network policy modifications** to go unnoticed. Adversaries or mistakes could open ports, reduce segmentation, and enable **lateral movement**, impacting data **confidentiality** and service **availability** through exposure or disruption of critical traffic",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
|
||||
"https://learn.microsoft.com/en-us/azure/azure-monitor/platform/activity-log-schema",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-update-network-security-group-rule-alert-in-use.html"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/networkSecurityGroups/write and level=verbose --scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' --subscription <subscription id> --action-group <action group ID> --location global",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-update-network-security-group-rule-alert-in-use.html#trendmicro",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor activity-log alert create --resource-group '<example_resource_name>' --name '<example_resource_name>' --scopes '/subscriptions/<subscription ID>' --condition \"category=Administrative and operationName=Microsoft.Network/networkSecurityGroups/write\" --location global",
|
||||
"NativeIaC": "```bicep\n// Activity Log alert for NSG create/update\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n scopes: [ subscription().id ]\n condition: {\n allOf: [\n { field: 'category', equals: 'Administrative' }\n { field: 'operationName', equals: 'Microsoft.Network/networkSecurityGroups/write' } // Critical: triggers on NSG create/update\n ]\n }\n enabled: true // Ensures the alert is active\n }\n}\n```",
|
||||
"Other": "1. In the Azure portal, go to Monitor > Alerts > Alert rules > Create\n2. Scope: Select your subscription and click Apply\n3. Condition: Choose Activity log, set Category to Administrative, set Operation name to Microsoft.Network/networkSecurityGroups/write, then Done\n4. Actions: Skip (optional)\n5. Details: Name the rule and set Region to Global, ensure Enable upon creation is checked\n6. Review + create > Create",
|
||||
"Terraform": "```hcl\n# Activity Log alert for NSG create/update\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<subscription_id>\"]\n\n criteria {\n category = \"Administrative\"\n operation_name = \"Microsoft.Network/networkSecurityGroups/write\" # Critical: triggers on NSG create/update\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Network security groups. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Network Security Group (Microsoft.Network/networkSecurityGroups). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
|
||||
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
|
||||
"Text": "Implement a subscription-wide **Activity Log alert** for NSG change operations and route notifications to an **action group** for rapid triage.\n\nApply **least privilege** for change tooling, enforce **change management**, and add complementary alerts for `Microsoft.Network/networkSecurityGroups/securityRules/write` and `.../delete`. *Integrate with SIEM for correlation*",
|
||||
"Url": "https://hub.prowler.com/check/monitor_alert_create_update_nsg"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, no monitoring alerts are created."
|
||||
|
||||
@@ -1,30 +1,39 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_alert_create_update_public_ip_address_rule",
|
||||
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update Public IP Address rule",
|
||||
"CheckTitle": "Subscription has an Activity Log Alert for Public IP address create or update operations",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "microsoft.insights/activitylogalerts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Create an activity log alert for the Create or Update Public IP Addresses rule.",
|
||||
"Risk": "Monitoring for Create or Update Public IP Address events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
|
||||
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
|
||||
"Description": "**Azure Monitor activity log alert** for **Public IP addresses** tracks `Microsoft.Network/publicIPAddresses/write` events at the subscription level, covering any creation or update of public IP resources.",
|
||||
"Risk": "Without this alert, unauthorized or mistaken public IP changes can go unnoticed, exposing workloads to the Internet.\n- Confidentiality: unexpected ingress paths\n- Integrity: shadow endpoints for control\n- Availability: larger DDoS surface and outages",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
|
||||
"https://trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-or-update-public-ip-alert.html",
|
||||
"https://support.icompaas.com/support/solutions/articles/62000229918-ensure-that-activity-log-alert-exists-for-create-or-update-public-ip-address-rule",
|
||||
"https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/monitor-public-ip"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/publicIPAddresses/write and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-public-ip-alert.html#trendmicro",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor activity-log alert create --resource-group <example_resource_name> --name <example_resource_name> --scopes /subscriptions/<example_resource_id> --condition \"category=Administrative and operationName=Microsoft.Network/publicIPAddresses/write\" --location global",
|
||||
"NativeIaC": "```bicep\n// Activity Log Alert for Public IP create/update\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'global'\n properties: {\n enabled: true\n scopes: ['/subscriptions/<example_resource_id>']\n condition: {\n allOf: [\n { field: 'category', equals: 'Administrative' }\n { field: 'operationName', equals: 'Microsoft.Network/publicIPAddresses/write' } // Critical: alerts on Public IP create/update\n ]\n }\n }\n}\n```",
|
||||
"Other": "1. In Azure Portal, go to Monitor > Alerts > Alert rules > Create\n2. Scope: Select your subscription and click Done\n3. Condition: Choose Activity log, then select the signal \"Create or Update Public Ip Address (publicIPAddresses)\"\n4. Details: Enter an alert rule name; Region: Global; Ensure Enable alert rule upon creation is checked\n5. Click Review + create, then Create",
|
||||
"Terraform": "```hcl\n# Activity Log Alert for Public IP create/update\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n category = \"Administrative\"\n operation_name = \"Microsoft.Network/publicIPAddresses/write\" # Critical: alerts on Public IP create/update\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Public IP addresses. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Public Ip Address (Microsoft.Network/publicIPAddresses). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
|
||||
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
|
||||
"Text": "Create a subscription-wide **activity log alert** on `Microsoft.Network/publicIPAddresses/write` and route it to an **action group**.\n\nEnforce **least privilege** for IP management, apply **change control**, and use **defense in depth** (private endpoints, bastions, VPN) to minimize public exposure and speed response.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_alert_create_update_public_ip_address_rule"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging",
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, no monitoring alerts are created."
|
||||
|
||||
@@ -1,30 +1,37 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_alert_create_update_security_solution",
|
||||
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update Security Solution",
|
||||
"CheckTitle": "Subscription has Activity Log alert for Security Solution create or update",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "microsoft.insights/activitylogalerts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Create an activity log alert for the Create or Update Security Solution event.",
|
||||
"Risk": "Monitoring for Create or Update Security Solution events gives insight into changes to the active security solutions and may reduce the time it takes to detect suspicious activity.",
|
||||
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
|
||||
"Description": "**Azure Monitor activity log alert** is configured to capture **Security Solutions** create/update operations (`Microsoft.Security/securitySolutions/write`) at subscription scope.",
|
||||
"Risk": "Without this alert, **unauthorized or mistaken changes** to security tooling can go undetected. Attackers could disable defenses, alter integrations, or weaken policies, eroding the **integrity** of controls, creating blind spots that threaten **confidentiality**, and delaying incident response.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
|
||||
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-or-update-security-solution-alert.html#trendmicro"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Security/securitySolutions/write and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-security-solution-alert.html#trendmicro",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor activity-log alert create --name \"<activity log rule name>\" --resource-group \"<example_resource_name>\" --scopes \"/subscriptions/<example_resource_id>\" --condition \"category=Administrative and operationName=Microsoft.Security/securitySolutions/write\" --location Global",
|
||||
"NativeIaC": "```bicep\n// Activity Log Alert for Security Solution create/update\nresource activityLogAlert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n scopes: [ '/subscriptions/<example_resource_id>' ]\n condition: {\n allOf: [\n {\n field: 'category'\n equals: 'Administrative'\n }\n {\n field: 'operationName' // Critical: match Security Solution create/update\n equals: 'Microsoft.Security/securitySolutions/write' // Triggers on this operation\n }\n ]\n }\n enabled: true\n }\n}\n```",
|
||||
"Other": "1. In the Azure portal, go to Monitor > Alerts > + Create > Alert rule\n2. Scope: Select your Subscription and click Apply\n3. Condition: Choose Activity log, set Signal name to Administrative, then add a filter Operation name = Microsoft.Security/securitySolutions/write\n4. Actions: Skip (no action group required)\n5. Details: Enter a Name, set Region to Global, ensure Enable alert rule upon creation is checked\n6. Review + create > Create",
|
||||
"Terraform": "```hcl\n# Activity Log Alert for Security Solution create/update\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n category = \"Administrative\"\n operation_name = \"Microsoft.Security/securitySolutions/write\" # Critical: fires on Security Solution create/update\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Security Solutions (securitySolutions). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Security Solutions (Microsoft.Security/securitySolutions). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
|
||||
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
|
||||
"Text": "Configure an **activity log alert** for `Microsoft.Security/securitySolutions/write` and route it to action groups for prompt notification/automation.\n\nApply **least privilege**, require **change control**, and forward alerts to a central SIEM to strengthen **defense in depth**.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_alert_create_update_security_solution"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, no monitoring alerts are created."
|
||||
|
||||
@@ -1,30 +1,37 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_alert_create_update_sqlserver_fr",
|
||||
"CheckTitle": "Ensure that Activity Log Alert exists for Create or Update SQL Server Firewall Rule",
|
||||
"CheckTitle": "Subscription has an Activity Log alert for SQL Server firewall rule create or update events",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "microsoft.insights/activitylogalerts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Create an activity log alert for the Create or Update SQL Server Firewall Rule event.",
|
||||
"Risk": "Monitoring for Create or Update SQL Server Firewall Rule events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
|
||||
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
|
||||
"Description": "**Azure Monitor activity log alerts** are configured for **Azure SQL Server firewall rule changes**, targeting the `Microsoft.Sql/servers/firewallRules/write` operation.\n\nThis evaluates whether notifications or automated actions are set when firewall rules are created or updated.",
|
||||
"Risk": "Without alerting on firewall rule changes, unauthorized or accidental openings can remain unnoticed, exposing databases to untrusted networks.\n\nThis harms **confidentiality** (data exfiltration via widened IP ranges) and **integrity** (unauthorized queries), while increasing attacker dwell time.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
|
||||
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-or-update-or-delete-sql-server-firewall-rule-alert.html#trendmicro"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Sql/servers/firewallRules/write and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-or-delete-sql-server-firewall-rule-alert.html#trendmicro",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor activity-log alert create --name <activity_log_rule_name> --resource-group <resource_group_name> --scopes /subscriptions/<subscription_id> --condition \"category=Administrative and operationName=Microsoft.Sql/servers/firewallRules/write\" --location global",
|
||||
"NativeIaC": "```bicep\n// Activity Log alert for SQL Server firewall rule create/update\nresource example_activity_log_alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n enabled: true\n scopes: [ '/subscriptions/<example_resource_id>' ]\n condition: {\n allOf: [\n {\n field: 'category'\n equals: 'Administrative'\n }\n {\n field: 'operationName'\n equals: 'Microsoft.Sql/servers/firewallRules/write' // Critical: alert on SQL Server firewall rule create/update\n }\n ]\n }\n }\n}\n```",
|
||||
"Other": "1. In the Azure portal, go to Monitor > Alerts > + Create > Alert rule\n2. Scope: Select the subscription and click Done\n3. Condition: Choose Signal type \"Activity log\", then set\n - Category: Administrative\n - Operation name: Microsoft.Sql/servers/firewallRules/write\n Click Done\n4. Actions: Skip (no action group required)\n5. Details: Enter an Alert rule name and ensure Enable alert rule upon creation is checked\n6. Review + create > Create",
|
||||
"Terraform": "```hcl\n# Activity Log alert for SQL Server firewall rule create/update\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n category = \"Administrative\"\n operation_name = \"Microsoft.Sql/servers/firewallRules/write\" # Critical: alert on SQL Server firewall rule create/update\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Server Firewall Rule (servers/firewallRules). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create/Update server firewall rule (Microsoft.Sql/servers/firewallRules). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
|
||||
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
|
||||
"Text": "Enable an activity log alert for `Microsoft.Sql/servers/firewallRules/write` and route it to responsive action groups.\n\nApply **least privilege** for firewall management, enforce change approvals, and use **defense in depth**: prefer **private endpoints** and avoid broad public network access.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_alert_create_update_sqlserver_fr"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, no monitoring alerts are created."
|
||||
|
||||
@@ -1,30 +1,38 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_alert_delete_nsg",
|
||||
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Network Security Group",
|
||||
"CheckTitle": "Subscription has an Activity Log alert for Network Security Group delete operations",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"ResourceType": "microsoft.insights/activitylogalerts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Create an activity log alert for the Delete Network Security Group event.",
|
||||
"Risk": "Monitoring for 'Delete Network Security Group' events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
|
||||
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
|
||||
"Description": "**Azure Monitor activity log alerts** include the NSG deletion signal (`Microsoft.Network/networkSecurityGroups/delete` or `Microsoft.ClassicNetwork/networkSecurityGroups/delete`). The finding indicates whether a subscription has an alert rule configured to trigger when a Network Security Group is deleted.",
|
||||
"Risk": "Without alerting on **NSG deletions**, network segmentation can be removed unnoticed, exposing services to broad ingress/egress. Malicious actors or automation may delete NSGs to enable **lateral movement** and **data exfiltration**. Missing alerts delay response, impacting confidentiality and availability.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
|
||||
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/delete-network-security-group-rule-alert-in-use.html#trendmicro"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/networkSecurityGroups/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-network-security-group-rule-alert-in-use.html#trendmicro",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor activity-log alert create --name \"<activity log rule name>\" --resource-group \"<resource group name>\" --scopes \"/subscriptions/<subscription ID>\" --condition \"category=Administrative and operationName=Microsoft.Network/networkSecurityGroups/delete\" --location global",
|
||||
"NativeIaC": "```bicep\n// Activity Log alert for NSG delete\nresource activityAlert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n scopes: ['/subscriptions/<example_resource_id>']\n enabled: true\n condition: {\n allOf: [\n { field: 'category', equals: 'Administrative' } // Critical: filter Activity Log to Administrative category\n { field: 'operationName', equals: 'Microsoft.Network/networkSecurityGroups/delete' } // Critical: triggers on NSG delete\n ]\n }\n }\n}\n```",
|
||||
"Other": "1. In Azure Portal, go to Monitor > Alerts > Alert rules\n2. Click + Create > Alert rule\n3. Scope: Select the target subscription and click Apply\n4. Condition: Choose Activity log, select the signal \"Delete Network Security Group\" (operation Microsoft.Network/networkSecurityGroups/delete); ensure Category is Administrative\n5. Details: Enter a name; leave other settings as default\n6. Click Review + create, then Create",
|
||||
"Terraform": "```hcl\n# Activity Log alert for NSG delete\nresource \"azurerm_monitor_activity_log_alert\" \"example\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n category = \"Administrative\" # Critical: Activity Log category filter\n operation_name = \"Microsoft.Network/networkSecurityGroups/delete\" # Critical: alert on NSG delete\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Network security groups. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete Network Security Group (Microsoft.Network/networkSecurityGroups). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
|
||||
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
|
||||
"Text": "Configure a subscription-wide **activity log alert** for the NSG delete operation (`Microsoft.Network/networkSecurityGroups/delete`; include Classic if applicable) and route notifications via **action groups**. Enforce **least privilege** for NSG changes, require **change control**, and integrate with your **SIEM** for correlation.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_alert_delete_nsg"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging",
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, no monitoring alerts are created."
|
||||
|
||||
@@ -1,30 +1,37 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_alert_delete_policy_assignment",
|
||||
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Policy Assignment",
|
||||
"CheckTitle": "Subscription has an Activity Log alert for policy assignment deletion",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"ResourceType": "microsoft.insights/activitylogalerts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Create an activity log alert for the Delete Policy Assignment event.",
|
||||
"Risk": "Monitoring for delete policy assignment events gives insight into changes done in 'azure policy - assignments' and can reduce the time it takes to detect unsolicited changes.",
|
||||
"RelatedUrl": "https://docs.microsoft.com/en-in/rest/api/monitor/activitylogalerts/createorupdate",
|
||||
"Description": "**Azure Monitor Activity log alerts** for policy assignment deletions using the `Microsoft.Authorization/policyAssignments/delete` operation at subscription scope",
|
||||
"Risk": "Without this alert, **policy assignment deletions** can go unnoticed, eroding configuration **integrity** and enabling governance drift. Malicious or accidental changes may remove guardrails, increasing exposure and threatening **confidentiality** of protected resources.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
|
||||
"https://learn.microsoft.com/en-in/rest/api/monitor/activity-log-alerts/create-or-update?view=rest-monitor-2020-10-01&tabs=HTTP",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/delete-policy-assignment-alert-in-use.html#trendmicro"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Authorization/policyAssignments/delete and level=<verbose | information | warning | error | critical> --scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-policy-assignment-alert-in-use.html#trendmicro",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor activity-log alert create --resource-group <example_resource_name> --name <example_resource_name> --scopes \"/subscriptions/<example_resource_id>\" --condition \"operationName=Microsoft.Authorization/policyAssignments/delete\" --location global",
|
||||
"NativeIaC": "```bicep\n// Activity Log alert for Policy Assignment deletion\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n scopes: [\n '/subscriptions/<example_resource_id>'\n ]\n condition: {\n allOf: [\n {\n field: 'operationName'\n equals: 'Microsoft.Authorization/policyAssignments/delete' // CRITICAL: alerts on policy assignment deletion\n }\n ]\n }\n actions: {\n actionGroups: [] // Required property; empty keeps rule minimal\n }\n }\n}\n```",
|
||||
"Other": "1. In Azure Portal, go to Monitor > Alerts > Alert rules\n2. Click + Create > Alert rule\n3. Scope: Select your subscription and click Apply\n4. Condition: Choose Activity log, then set Operation name equals \"Microsoft.Authorization/policyAssignments/delete\"\n5. Actions: Skip (optional)\n6. Details: Enter a name and set Enable alert rule upon creation\n7. Click Create",
|
||||
"Terraform": "```hcl\n# Activity Log alert for Policy Assignment deletion\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n operation_name = \"Microsoft.Authorization/policyAssignments/delete\" # CRITICAL: alerts on policy assignment deletion\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Policy assignment (policyAssignments). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete policy assignment (Microsoft.Authorization/policyAssignments). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
|
||||
"Url": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log"
|
||||
"Text": "- Configure an activity log alert for `Microsoft.Authorization/policyAssignments/delete` and route to an action group.\n- Enforce **least privilege** and **separation of duties** for policy changes and require approvals.\n- Integrate alerts with your SIEM and define playbooks for rapid response.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_alert_delete_policy_assignment"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, no monitoring alerts are created."
|
||||
|
||||
@@ -1,30 +1,37 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_alert_delete_public_ip_address_rule",
|
||||
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Public IP Address rule",
|
||||
"CheckTitle": "Azure subscription has an Activity Log alert for public IP address deletion",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "microsoft.insights/activitylogalerts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Create an activity log alert for the Delete Public IP Address rule.",
|
||||
"Risk": "Monitoring for Delete Public IP Address events gives insight into network access changes and may reduce the time it takes to detect suspicious activity.",
|
||||
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
|
||||
"Description": "**Azure Monitor activity log alert** exists for the **Delete Public IP Address** operation (`Microsoft.Network/publicIPAddresses/delete`), capturing subscription-wide events when Public IP resources are removed.",
|
||||
"Risk": "Unmonitored deletion of Public IPs can abruptly sever ingress/egress, break DNS and allowlists, and take services offline (**availability**). Attackers or misconfigurations can delete IPs to cause **DoS** or evade controls, and delayed visibility hinders **incident response** and **forensics**.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/delete-public-ip-alert.html#trendmicro",
|
||||
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
|
||||
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Network/publicIPAddresses/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-public-ip-alert.html#trendmicro",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor activity-log alert create --name <activity_log_rule_name> --resource-group <resource_group_name> --location global --scopes /subscriptions/<subscription_id> --condition category=Administrative and operationName=Microsoft.Network/publicIPAddresses/delete",
|
||||
"NativeIaC": "```bicep\n// Activity Log alert for Public IP deletion\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n enabled: true\n scopes: [\n '/subscriptions/<example_resource_id>' // Scope the alert to the subscription\n ]\n condition: {\n allOf: [\n { field: 'category', equals: 'Administrative' }\n { field: 'operationName', equals: 'Microsoft.Network/publicIPAddresses/delete' } // Critical: triggers when a Public IP is deleted\n ]\n }\n }\n}\n```",
|
||||
"Other": "1. In the Azure portal, go to Monitor > Alerts > + Create > Alert rule\n2. Scope: Select your subscription and click Apply\n3. Condition: Choose Activity log, then set Category = Administrative and Operation name = Microsoft.Network/publicIPAddresses/delete; click Apply\n4. Actions: Skip (no action group required to pass)\n5. Details: Enter an alert name, set Region to Global, ensure Enable alert rule upon creation is checked\n6. Review + create > Create",
|
||||
"Terraform": "```hcl\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n category = \"Administrative\"\n operation_name = \"Microsoft.Network/publicIPAddresses/delete\" # Critical: alert when a Public IP is deleted\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Public IP addresses. 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete Public Ip Address (Microsoft.Network/publicIPAddresses). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
|
||||
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
|
||||
"Text": "Implement an activity log alert for `Microsoft.Network/publicIPAddresses/delete` and route it to an action group for rapid response.\n- Apply **least privilege** and change approval for IP deletions\n- Use **resource locks** on critical IPs\n- Centralize alerts in your SIEM and define runbooks for containment",
|
||||
"Url": "https://hub.prowler.com/check/monitor_alert_delete_public_ip_address_rule"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, no monitoring alerts are created."
|
||||
|
||||
@@ -1,30 +1,38 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_alert_delete_security_solution",
|
||||
"CheckTitle": "Ensure that Activity Log Alert exists for Delete Security Solution",
|
||||
"CheckTitle": "Subscription has an Azure Monitor Activity Log alert for Microsoft.Security/securitySolutions delete operations",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "microsoft.insights/activitylogalerts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Create an activity log alert for the Delete Security Solution event.",
|
||||
"Risk": "Monitoring for Delete Security Solution events gives insight into changes to the active security solutions and may reduce the time it takes to detect suspicious activity.",
|
||||
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
|
||||
"Description": "**Azure activity log alerts** monitor deletions of **Security Solutions** by targeting the operation `Microsoft.Security/securitySolutions/delete` at subscription scope.\n\nIdentifies whether notifications are configured for security solution removal events.",
|
||||
"Risk": "Without this alert, **unauthorized or accidental deletions** of security tooling may go **unnoticed**, reducing the **availability** of protections and the **integrity** of monitoring. Adversaries can evade defenses, prolong dwell time, and enable **data exfiltration** under reduced visibility.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/delete-security-solution-alert.html",
|
||||
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
|
||||
"https://learn.microsoft.com/en-us/cli/azure/monitor/activity-log/alert?view=azure-cli-latest",
|
||||
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Security/securitySolutions/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/delete-security-solution-alert.html#trendmicro",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor activity-log alert create -g <example_resource_name> -n <example_resource_name> --condition operationName=Microsoft.Security/securitySolutions/delete --scope /subscriptions/<example_resource_id>",
|
||||
"NativeIaC": "```bicep\n// Activity Log Alert for Security Solution delete\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'global'\n properties: {\n enabled: true\n scopes: [ subscription().id ]\n condition: {\n allOf: [\n {\n field: 'operationName'\n equals: 'Microsoft.Security/securitySolutions/delete' // Critical: alerts on Security Solution delete\n }\n ]\n }\n }\n}\n```",
|
||||
"Other": "1. In Azure portal, go to Monitor > Alerts > + Create > Alert rule\n2. Scope: Select your subscription and click Apply\n3. Condition: Click Add condition, search and select \"Delete Security Solutions (Microsoft.Security/securitySolutions)\", then Add\n4. Ensure no filters for Level or Status are set\n5. Details: Enter an Alert rule name and choose a resource group\n6. Create: Review + create, then Create",
|
||||
"Terraform": "```hcl\n# Activity Log Alert for Security Solution delete\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_resource_id>\"]\n\n criteria {\n operation_name = \"Microsoft.Security/securitySolutions/delete\" # Critical: alerts on delete operation\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Security Solutions (securitySolutions). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete Security Solutions (Microsoft.Security/securitySolutions). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.curitySolutions). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Create or Update Security Solutions (Microsoft.Security/securitySolutions). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
|
||||
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
|
||||
"Text": "Configure a **dedicated activity log alert** for `Microsoft.Security/securitySolutions/delete` and route it to resilient **action groups** (email, chat, ticketing, SIEM). Apply **least privilege** and **resource locks** to deter tampering. Test alerting routinely and integrate it into **defense-in-depth** monitoring.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_alert_delete_security_solution"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, no monitoring alerts are created."
|
||||
|
||||
@@ -1,30 +1,37 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_alert_delete_sqlserver_fr",
|
||||
"CheckTitle": "Ensure that Activity Log Alert exists for Delete SQL Server Firewall Rule",
|
||||
"CheckTitle": "Subscription has an Activity Log Alert for SQL Server firewall rule deletions",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "microsoft.insights/activitylogalerts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Create an activity log alert for the 'Delete SQL Server Firewall Rule.'",
|
||||
"Risk": "Monitoring for Delete SQL Server Firewall Rule events gives insight into SQL network access changes and may reduce the time it takes to detect suspicious activity.",
|
||||
"RelatedUrl": "https://docs.microsoft.com/en-in/azure/azure-monitor/platform/alerts-activity-log",
|
||||
"Description": "**Azure Monitor Activity log alerts** watch the admin operation `Microsoft.Sql/servers/firewallRules/delete`, indicating when an **Azure SQL firewall rule** is removed across a subscription.",
|
||||
"Risk": "Without alerting on firewall rule deletions, unexpected changes to SQL network allowlists can go unnoticed, causing **availability** loss for apps and masking **unauthorized tampering**. A compromised admin could remove rules to disrupt service, erode control **integrity**, and delay response.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement",
|
||||
"https://learn.microsoft.com/en-in/azure/azure-monitor/alerts/alerts-create-activity-log-alert-rule?tabs=activity-log",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/create-or-update-or-delete-sql-server-firewall-rule-alert.html#trendmicro"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor activity-log alert create --resource-group '<resource group name>' --condition category=Administrative and operationName=Microsoft.Sql/servers/firewallRules/delete and level=<verbose | information | warning | error | critical>--scope '/subscriptions/<subscription ID>' --name '<activity log rule name>' -- subscription <subscription id> --action-group <action group ID> --location global",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/ActivityLog/create-or-update-or-delete-sql-server-firewall-rule-alert.html#trendmicro",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor activity-log alert create --resource-group <resource_group_name> --name <alert_name> --scopes /subscriptions/<subscription_id> --condition \"category=Administrative and operationName=Microsoft.Sql/servers/firewallRules/delete\" --location global",
|
||||
"NativeIaC": "```bicep\n// Activity Log Alert for SQL Server firewall rule deletions\nresource activityLogAlert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n scopes: [\n '/subscriptions/<example_subscription_id>'\n ]\n enabled: true\n condition: {\n allOf: [\n {\n field: 'category' // Critical: filter Activity Log category\n equals: 'Administrative' // Ensures Administrative events are matched\n }\n {\n field: 'operationName' // Critical: target deletion of SQL Server firewall rules\n equals: 'Microsoft.Sql/servers/firewallRules/delete' // This makes the check PASS\n }\n ]\n }\n }\n}\n```",
|
||||
"Other": "1. In the Azure Portal, go to Monitor > Alerts > + Create > Alert rule\n2. Scope: Select the target Subscription and click Done\n3. Condition: Click Add condition, choose Signal type = Activity log, search for and select the operation with type \"Microsoft.Sql/servers/firewallRules/delete\" (display name like \"Delete Server Firewall Rule\"), then Click Apply\n4. Actions: Skip (optional)\n5. Details: Enter an Alert rule name and ensure Enable upon creation is selected\n6. Click Create",
|
||||
"Terraform": "```hcl\n# Activity Log Alert for SQL Server firewall rule deletions\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_subscription_id>\"]\n\n criteria {\n category = \"Administrative\" # Critical: filter Activity Log category\n operation_name = \"Microsoft.Sql/servers/firewallRules/delete\" # Critical: match deletion of SQL Server firewall rules\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Navigate to the Monitor blade. 2. Select Alerts. 3. Select Create. 4. Select Alert rule. 5. Under Filter by subscription, choose a subscription. 6. Under Filter by resource type, select Server Firewall Rule (servers/firewallRules). 7. Under Filter by location, select All. 8. From the results, select the subscription. 9. Select Done. 10. Select the Condition tab. 11. Under Signal name, click Delete server firewall rule (Microsoft.Sql/servers/firewallRules). 12. Select the Actions tab. 13. To use an existing action group, click Select action groups. To create a new action group, click Create action group. Fill out the appropriate details for the selection. 14. Select the Details tab. 15. Select a Resource group, provide an Alert rule name and an optional Alert rule description. 16. Click Review + create. 17. Click Create.",
|
||||
"Url": "https://azure.microsoft.com/en-us/updates/classic-alerting-monitoring-retirement"
|
||||
"Text": "Create an **activity log alert** for `Microsoft.Sql/servers/firewallRules/delete` and route it via an **action group** for rapid triage.\n\nEnforce **least privilege** and **separation of duties** on SQL admins, add alerts for related create/update operations, integrate with **SIEM**, and require *change approval* to strengthen defense in depth.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_alert_delete_sqlserver_fr"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, no monitoring alerts are created."
|
||||
|
||||
@@ -1,30 +1,39 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_alert_service_health_exists",
|
||||
"CheckTitle": "Ensure that an Activity Log Alert exists for Service Health",
|
||||
"CheckTitle": "Azure subscription has an enabled Activity Log alert for Service Health incidents",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "microsoft.insights/activitylogalerts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Ensure that an Azure activity log alert is configured to trigger when Service Health events occur within your Microsoft Azure cloud account. The alert should activate when new events match the specified conditions in the alert rule configuration.",
|
||||
"Risk": "Lack of monitoring for Service Health events may result in missing critical service issues, planned maintenance, security advisories, or other changes that could impact Azure services and regions in use.",
|
||||
"RelatedUrl": "https://learn.microsoft.com/en-us/azure/service-health/overview",
|
||||
"Description": "**Azure Monitor Activity Log alert** is configured for **Service Health** notifications where `category` is `ServiceHealth` and `properties.incidentType` is `Incident`, with the rule enabled.",
|
||||
"Risk": "Without alerts for **Service Health incidents**, teams may miss Azure outages or degradations, harming **availability** and delaying failover. Unseen incidents can cause cascading errors, timeouts, deployment failures, and SLA breaches across dependent workloads.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-us/azure/service-health/service-health-notifications-properties",
|
||||
"https://learn.microsoft.com/en-us/azure/service-health/alerts-activity-log-service-notifications-portal",
|
||||
"https://learn.microsoft.com/en-us/azure/service-health/overview",
|
||||
"https://learn.microsoft.com/en-us/azure/azure-monitor/platform/activity-log-schema",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/service-health-alert.html"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor activity-log alert create --subscription <subscription-id> --resource-group <resource-group> --name <alert-rule> --condition category=ServiceHealth and properties.incidentType=Incident --scope /subscriptions/<subscription-id> --action-group <action-group>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/ActivityLog/service-health-alert.html",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor activity-log alert create --resource-group <resource-group> --name <alert-rule> --scopes /subscriptions/<subscription-id> --condition \"category=ServiceHealth and properties.incidentType=Incident\"",
|
||||
"NativeIaC": "```bicep\n// Activity Log Alert for Service Health Incidents\nresource alert 'Microsoft.Insights/activityLogAlerts@2020-10-01' = {\n name: '<example_resource_name>'\n location: 'Global'\n properties: {\n enabled: true\n scopes: [ subscription().id ]\n condition: {\n allOf: [\n { field: 'category', equals: 'ServiceHealth' } // Critical: match Service Health category\n { field: 'properties.incidentType', equals: 'Incident' } // Critical: alert only on Incident events\n ]\n }\n }\n}\n```",
|
||||
"Other": "1. In the Azure portal, go to Service Health > Health alerts > Create service health alert\n2. Scope: select your Subscription and choose the Resource group to save the alert\n3. Event types: select only Service issues (Incidents)\n4. Leave other filters as default, ensure Enable rule is On, then click Create",
|
||||
"Terraform": "```hcl\n# Activity Log Alert for Service Health Incidents\nresource \"azurerm_monitor_activity_log_alert\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n scopes = [\"/subscriptions/<example_subscription_id>\"]\n\n criteria {\n category = \"ServiceHealth\" # Critical: Service Health category\n service_health {\n events = [\"Incident\"] # Critical: alert only on Incident type\n }\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Create an activity log alert for Service Health events and configure an action group to notify appropriate personnel.",
|
||||
"Url": "https://learn.microsoft.com/en-us/azure/service-health/alerts-activity-log-service-notifications-portal"
|
||||
"Text": "Create and maintain an enabled **Activity Log alert** for **Service Health Incident** events.\n- Route via **Action Groups** to on-call channels\n- Filter to critical services/regions\n- Test routing and refine recipients regularly\n- Integrate with **incident response** and **defense-in-depth** monitoring",
|
||||
"Url": "https://hub.prowler.com/check/monitor_alert_service_health_exists"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"resilience"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, in your Azure subscription there will not be any activity log alerts configured for Service Health events."
|
||||
|
||||
@@ -1,30 +1,37 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_diagnostic_setting_with_appropriate_categories",
|
||||
"CheckTitle": "Ensure Diagnostic Setting captures appropriate categories",
|
||||
"CheckTitle": "Subscription has a diagnostic setting capturing Administrative, Security, Alert, and Policy categories",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "Configuring Diagnostic Settings",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Monitor",
|
||||
"Severity": "high",
|
||||
"ResourceType": "microsoft.resources/subscriptions",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Prerequisite: A Diagnostic Setting must exist. If a Diagnostic Setting does not exist, the navigation and options within this recommendation will not be available. Please review the recommendation at the beginning of this subsection titled: 'Ensure that a 'Diagnostic Setting' exists.' The diagnostic setting should be configured to log the appropriate activities from the control/management plane.",
|
||||
"Risk": "A diagnostic setting controls how the diagnostic log is exported. Capturing the diagnostic setting categories for appropriate control/management plane activities allows proper alerting.",
|
||||
"RelatedUrl": "https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings",
|
||||
"Description": "**Azure Monitor Diagnostic Settings** capture **control-plane events** at the subscription level. This evaluates whether at least one setting collects the categories: `Administrative`, `Security`, `Policy`, and `Alert`.",
|
||||
"Risk": "Without these categories, critical control-plane actions may go unrecorded. Attackers could change policies, roles, or alerts unnoticed, enabling privilege escalation and resource tampering. This erodes **integrity**, threatens **confidentiality**, and weakens **availability** and **incident response**.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/Monitor/diagnostic-setting-categories.html",
|
||||
"https://learn.microsoft.com/en-us/azure/storage/common/manage-storage-analytics-logs?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json&tabs=azure-portal"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor diagnostic-settings subscription create --subscription <subscription id> --name <diagnostic settings name> --location <location> <[- -event-hub <event hub ID> --event-hub-auth-rule <event hub auth rule ID>] [-- storage-account <storage account ID>] [--workspace <log analytics workspace ID>] --logs '[{category:Security,enabled:true},{category:Administrative,enabled:true},{ca tegory:Alert,enabled:true},{category:Policy,enabled:true}]'>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/Monitor/diagnostic-setting-categories.html",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor diagnostic-settings subscription create --name <example_resource_name> --workspace <example_resource_id> --logs '[{\"category\":\"Administrative\",\"enabled\":true},{\"category\":\"Security\",\"enabled\":true},{\"category\":\"Alert\",\"enabled\":true},{\"category\":\"Policy\",\"enabled\":true}]'",
|
||||
"NativeIaC": "```bicep\n// Create a subscription-level diagnostic setting capturing required categories\ntargetScope = 'subscription'\n\nresource diag 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = {\n name: '<example_resource_name>'\n properties: {\n workspaceId: '<example_resource_id>' // Critical: send Activity Log to this Log Analytics workspace\n logs: [\n { category: 'Administrative', enabled: true } // Critical: required category\n { category: 'Security', enabled: true } // Critical: required category\n { category: 'Alert', enabled: true } // Critical: required category\n { category: 'Policy', enabled: true } // Critical: required category\n ]\n }\n}\n```",
|
||||
"Other": "1. In Azure portal, go to Monitor > Activity log\n2. Click Diagnostic settings > Add diagnostic setting\n3. Name the setting\n4. Under Categories, check: Administrative, Security, Alert, Policy\n5. Under Destination, select Send to Log Analytics workspace and choose your workspace\n6. Click Save",
|
||||
"Terraform": "```hcl\n# Subscription Activity Log diagnostic setting capturing required categories\nresource \"azurerm_monitor_diagnostic_setting\" \"example\" {\n name = \"<example_resource_name>\"\n target_resource_id = \"/subscriptions/<example_resource_id>\" # Critical: scope set to the subscription\n log_analytics_workspace_id = \"<example_resource_id>\" # Critical: destination workspace\n\n enabled_log { category = \"Administrative\" } # Critical: required category\n enabled_log { category = \"Security\" } # Critical: required category\n enabled_log { category = \"Alert\" } # Critical: required category\n enabled_log { category = \"Policy\" } # Critical: required category\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Go to Azure Monitor 2. Click Activity log 3. Click on Export Activity Logs 4. Select the Subscription from the drop down menu 5. Click on Add diagnostic setting 6. Enter a name for your new Diagnostic Setting 7. Check the following categories: Administrative, Alert, Policy, and Security 8. Choose the destination details according to your organization's needs.",
|
||||
"Url": "https://learn.microsoft.com/en-us/azure/storage/common/manage-storage-analytics-logs?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json&tabs=azure-portal"
|
||||
"Text": "Collect `Administrative`, `Security`, `Policy`, and `Alert` via a subscription diagnostic setting and route them to a centralized, tamper-resistant destination. Enforce **least privilege** on log access, set retention, and create **alerts** for high-risk changes as part of **defense in depth**.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_diagnostic_setting_with_appropriate_categories"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "When the diagnostic setting is created using Azure Portal, by default no categories are selected."
|
||||
|
||||
@@ -1,30 +1,38 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_diagnostic_settings_exists",
|
||||
"CheckTitle": "Ensure that a 'Diagnostic Setting' exists for Subscription Activity Logs ",
|
||||
"CheckTitle": "Subscription has an Activity Log diagnostic setting",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Monitor",
|
||||
"Severity": "high",
|
||||
"ResourceType": "microsoft.resources/subscriptions",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Enable Diagnostic settings for exporting activity logs. Diagnostic settings are available for each individual resource within a subscription. Settings should be configured for all appropriate resources for your environment.",
|
||||
"Risk": "A diagnostic setting controls how a diagnostic log is exported. By default, logs are retained only for 90 days. Diagnostic settings should be defined so that logs can be exported and stored for a longer duration in order to analyze security activities within an Azure subscription.",
|
||||
"RelatedUrl": "https://learn.microsoft.com/en-us/cli/azure/monitor/diagnostic-settings?view=azure-cli-latest",
|
||||
"Description": "**Azure Monitor Diagnostic Settings** are configured to export the **Activity Log** to an external destination (Log Analytics, Storage, Event Hub, or partner).",
|
||||
"Risk": "Without exporting the **Activity Log**, control-plane events lack **centralization and retention**.\n\nUndetected RBAC changes, policy updates, and resource deletions reduce **detectability**, hinder **forensics**, and weaken incident response and audit evidence.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings?WT.mc_id=AZ-MVP-5003450&tabs=portal",
|
||||
"https://learn.microsoft.com/en-us/azure/azure-monitor/fundamentals/data-sources#export-the-activity-log-with-a-log-profile",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/Monitor/subscription-activity-log-diagnostic-settings.html",
|
||||
"https://learn.microsoft.com/en-us/cli/azure/monitor/diagnostic-settings?view=azure-cli-latest"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az monitor diagnostic-settings subscription create --subscription <subscription id> --name <diagnostic settings name> --location <location> <[- -event-hub <event hub ID> --event-hub-auth-rule <event hub auth rule ID>] [-- storage-account <storage account ID>] [--workspace <log analytics workspace ID>] --logs '<JSON encoded categories>' (e.g. [{category:Security,enabled:true},{category:Administrative,enabled:true},{cat egory:Alert,enabled:true},{category:Policy,enabled:true}])",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/Monitor/subscription-activity-log-diagnostic-settings.html#trendmicro",
|
||||
"Terraform": ""
|
||||
"CLI": "az monitor diagnostic-settings subscription create --subscription <subscription id> --name <example_resource_name> --workspace <log analytics workspace ID> --logs '[{\"category\":\"Administrative\",\"enabled\":true}]'",
|
||||
"NativeIaC": "```bicep\n// Subscription-level Activity Log diagnostic setting\nresource diag 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = {\n name: '<example_resource_name>'\n scope: subscription() // CRITICAL: targets the subscription Activity Log\n properties: {\n workspaceId: '<example_resource_id>' // CRITICAL: sends logs to this Log Analytics workspace\n logs: [\n { category: 'Administrative', enabled: true } // CRITICAL: enables at least one Activity Log category\n ]\n }\n}\n```",
|
||||
"Other": "1. In the Azure portal, go to Subscriptions and select your subscription\n2. Open Monitoring > Activity log, then click Diagnostic settings\n3. Click + Add diagnostic setting and enter a name\n4. Under Destination details, select Send to Log Analytics workspace and choose your workspace\n5. Under Categories, select Administrative\n6. Click Save",
|
||||
"Terraform": "```hcl\n# Subscription-level Activity Log diagnostic setting\nresource \"azurerm_monitor_diagnostic_setting\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n target_resource_id = \"/subscriptions/<subscription id>\" # CRITICAL: subscription scope\n log_analytics_workspace_id = \"<example_resource_id>\" # CRITICAL: destination workspace\n\n log {\n category = \"Administrative\" # CRITICAL: enable at least one Activity Log category\n enabled = true\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "To enable Diagnostic Settings on a Subscription: 1. Go to Monitor 2. Click on Activity Log 3. Click on Export Activity Logs 4. Click + Add diagnostic setting 5. Enter a Diagnostic setting name 6. Select Categories for the diagnostic settings 7. Select the appropriate Destination details (this may be Log Analytics, Storage Account, Event Hub, or Partner solution) 8. Click Save To enable Diagnostic Settings on a specific resource: 1. Go to Monitor 2. Click Diagnostic settings 3. Click on the resource that has a diagnostics status of disabled 4. Select Add Diagnostic Setting 5. Enter a Diagnostic setting name 6. Select the appropriate log, metric, and destination. (this may be Log Analytics, Storage Account, Event Hub, or Partner solution) 7. Click save Repeat these step for all resources as needed.",
|
||||
"Url": "https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitoring-overview-activity-logs#export-the-activity-log-with-a-log-profile"
|
||||
"Text": "Enable **subscription Diagnostic Settings** to send the **Activity Log** to a trusted destination.\n\nUse **immutable storage** or a **SIEM**, enforce coverage with **Azure Policy**, apply **least privilege** to log access, include essential categories, and set retention aligned to regulatory needs.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_diagnostic_settings_exists"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"logging"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "By default, diagnostic setting is not set."
|
||||
|
||||
@@ -1,30 +1,37 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_storage_account_with_activity_logs_cmk_encrypted",
|
||||
"CheckTitle": "Ensure the storage account containing the container with activity logs is encrypted with Customer Managed Key",
|
||||
"CheckTitle": "Storage account storing Activity Log data is encrypted with a customer-managed key",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Monitor",
|
||||
"ResourceType": "microsoft.storage/storageaccounts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "Storage accounts with the activity log exports can be configured to use CustomerManaged Keys (CMK).",
|
||||
"Risk": "Configuring the storage account with the activity log export container to use CMKs provides additional confidentiality controls on log data, as a given user must have read permission on the corresponding storage account and must be granted decrypt permission by the CMK.",
|
||||
"RelatedUrl": "https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-data-protection#dp-5-encrypt-sensitive-data-at-rest",
|
||||
"Description": "**Azure Monitor Activity Logs** sent to a **Storage account** are evaluated to confirm encryption with **Customer-Managed Keys** (`CMK`) instead of **Microsoft-managed keys**.",
|
||||
"Risk": "Storing activity logs without **CMK** weakens confidentiality and control of audit data. You lose independent key ownership, limiting rapid rotation/revocation and separation of duties. If storage credentials are compromised, attackers can exfiltrate logs that map resources and changes, aiding targeted attacks and hindering effective incident response.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/activity-log?tabs=cli#managing-legacy-log-profiles",
|
||||
"https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-data-protection#dp-5-encrypt-sensitive-data-at-rest",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/Monitor/use-cmk-for-activity-log-storage-container-encryption.html"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az storage account update --name <name of the storage account> --resource-group <resource group for a storage account> --encryption-key-source=Microsoft.Keyvault --encryption-key-vault <Key Vault URI> --encryption-key-name <KeyName> --encryption-key-version <Key Version>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/Monitor/use-cmk-for-activity-log-storage-container-encryption.html",
|
||||
"Terraform": "https://docs.prowler.com/checks/azure/azure-general-policies/ensure-that-storage-accounts-use-customer-managed-key-for-encryption#terraform"
|
||||
"CLI": "az storage account update --name <example_resource_name> --resource-group <example_resource_name> --assign-identity --encryption-key-source Microsoft.Keyvault --encryption-key-vault <KeyVaultURI> --encryption-key-name <KeyName>",
|
||||
"NativeIaC": "```bicep\n// Storage account encrypted with a customer-managed key (CMK)\nresource stg 'Microsoft.Storage/storageAccounts@2023-01-01' = {\n name: '<example_resource_name>'\n location: resourceGroup().location\n kind: 'StorageV2'\n sku: { name: 'Standard_LRS' }\n identity: { type: 'SystemAssigned' } // Required for Storage to access the Key Vault key\n properties: {\n encryption: {\n keySource: 'Microsoft.Keyvault' // CRITICAL: switches encryption from Microsoft.Storage to CMK\n keyVaultProperties: {\n keyName: '<KeyName>'\n keyVaultUri: '<KeyVaultURI>' // Uses latest key version if not specified\n }\n }\n }\n}\n```",
|
||||
"Other": "1. In the Azure portal, go to Storage accounts and open the account used by your Activity Log diagnostic setting\n2. Select Identity > System assigned > set Status to On > Save\n3. Go to Settings > Encryption\n4. Select Customer-managed keys, choose your Key vault and Key, then click Save\n5. Ensure the storage account's identity has Get, Wrap Key, and Unwrap Key permissions on the key in Key Vault",
|
||||
"Terraform": "```hcl\n# Storage account encrypted with a customer-managed key (CMK)\nresource \"azurerm_storage_account\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n location = \"<example_location>\"\n account_tier = \"Standard\"\n account_replication_type = \"LRS\"\n\n identity {\n type = \"SystemAssigned\" # Required for Storage to access the Key Vault key\n }\n\n customer_managed_key {\n key_vault_key_id = \"<key_vault_key_id>\" # CRITICAL: enables CMK by pointing to the Key Vault key\n }\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. Go to Activity log 2. Select Export 3. Select Subscription 4. In section Storage Account, note the name of the Storage account 5. Close the Export Audit Logs blade. Close the Monitor - Activity Log blade. 6. In right column, Click service Storage Accounts to access Storage account blade 7. Click on the storage account name noted in step 4. This will open blade specific to that storage account 8. Under Security + networking, click Encryption. 9. Ensure Customer-managed keys is selected and Key URI is set.",
|
||||
"Url": "https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/activity-log?tabs=cli#managing-legacy-log-profiles"
|
||||
"Text": "Encrypt the storage account that holds exported **Activity Logs** with **Customer-Managed Keys** via Azure Key Vault or Managed HSM. Apply **least privilege** to key usage, enforce regular rotation and revocation, and enable soft delete and purge protection. Complement with network isolation and immutable retention for **defense in depth**.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_storage_account_with_activity_logs_cmk_encrypted"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"encryption"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "NOTE: You must have your key vault setup to utilize this. All Audit Logs will be encrypted with a key you provide. You will need to set up customer managed keys separately, and you will select which key to use via the instructions here. You will be responsible for the lifecycle of the keys, and will need to manually replace them at your own determined intervals to keep the data secure."
|
||||
|
||||
@@ -1,30 +1,38 @@
|
||||
{
|
||||
"Provider": "azure",
|
||||
"CheckID": "monitor_storage_account_with_activity_logs_is_private",
|
||||
"CheckTitle": "Ensure the Storage Container Storing the Activity Logs is not Publicly Accessible",
|
||||
"CheckTitle": "Storage account storing activity logs does not allow public blob access",
|
||||
"CheckType": [],
|
||||
"ServiceName": "monitor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Monitor",
|
||||
"ResourceType": "microsoft.storage/storageaccounts",
|
||||
"ResourceGroup": "monitoring",
|
||||
"Description": "The storage account container containing the activity log export should not be publicly accessible.",
|
||||
"Risk": "Allowing public access to activity log content may aid an adversary in identifying weaknesses in the affected account's use or configuration.",
|
||||
"RelatedUrl": "https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings",
|
||||
"Description": "**Azure Monitor Activity Logs** sent to a **Storage account** are evaluated for **Blob public access**. The finding identifies whether the account that stores the logs has `AllowBlobPublicAccess` turned on.",
|
||||
"Risk": "Exposed log data undermines **confidentiality** by revealing operations, resource IDs, IPs, and identities.\n\nAdversaries gain **reconnaissance** to map controls, craft targeted attacks, and time actions to avoid detection, enabling **lateral movement** and broader compromise.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings",
|
||||
"https://learn.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-network-security#ns-2-secure-cloud-services-with-network-controls",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/azure/Monitor/check-for-publicly-accessible-activity-log-storage-container.html"
|
||||
],
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "az storage container set-permission --name insights-activity-logs --account-name <Storage Account Name> --public-access off",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/azure/Monitor/check-for-publicly-accessible-activity-log-storage-container.html",
|
||||
"Terraform": "https://docs.prowler.com/checks/azure/azure-logging-policies/ensure-the-storage-container-storing-the-activity-logs-is-not-publicly-accessible#terraform"
|
||||
"CLI": "az storage account update --name <STORAGE_ACCOUNT_NAME> --resource-group <RESOURCE_GROUP_NAME> --allow-blob-public-access false",
|
||||
"NativeIaC": "```bicep\n// Set storage account to disallow public blob access\nresource sa 'Microsoft.Storage/storageAccounts@2023-01-01' = {\n name: '<example_resource_name>'\n location: resourceGroup().location\n sku: { name: 'Standard_LRS' }\n kind: 'StorageV2'\n properties: {\n allowBlobPublicAccess: false // Critical: disables public access at the account level\n }\n}\n```",
|
||||
"Other": "1. In Azure Portal, go to the storage account used by the diagnostic/Activity Log export\n2. Under Settings, select Configuration\n3. Set \"Allow Blob public access\" to Disabled\n4. Click Save",
|
||||
"Terraform": "```hcl\n# Disable public blob access on the storage account\nresource \"azurerm_storage_account\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n resource_group_name = \"<example_resource_name>\"\n location = \"<example_resource_name>\"\n account_tier = \"Standard\"\n account_replication_type = \"LRS\"\n\n allow_blob_public_access = false # Critical: disables public access at the account level\n}\n```"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "1. From Azure Home select the Portal Menu 2. Search for Storage Accounts to access Storage account blade 3. Click on the storage account name 4. Click on Configuration under settings 5. Select Enabled under 'Allow Blob public access'",
|
||||
"Url": "https://docs.microsoft.com/en-us/security/benchmark/azure/security-controls-v3-network-security#ns-2-secure-cloud-services-with-network-controls"
|
||||
"Text": "Set `AllowBlobPublicAccess=false` on the storage account holding logs. Enforce **least privilege** via RBAC or scoped SAS, use **private endpoints** and network restrictions, and enable **immutability** for log containers to add **defense in depth** and prevent unauthorized access.",
|
||||
"Url": "https://hub.prowler.com/check/monitor_storage_account_with_activity_logs_is_private"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"Categories": [
|
||||
"internet-exposed",
|
||||
"logging"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "Configuring container Access policy to private will remove access from the container for everyone except owners of the storage account. Access policy needs to be set explicitly in order to allow access to other desired users."
|
||||
|
||||
@@ -188,5 +188,5 @@ export const getUserByMe = async (accessToken: string) => {
|
||||
};
|
||||
|
||||
export async function logOut() {
|
||||
await signOut();
|
||||
await signOut({ redirectTo: "/sign-in" });
|
||||
}
|
||||
|
||||
@@ -74,11 +74,17 @@ test.describe("Session Error Messages", () => {
|
||||
await scansPage.goto();
|
||||
await expect(page.locator("main")).toBeVisible();
|
||||
|
||||
// Navigate to a safe public page before clearing cookies
|
||||
// This prevents background requests from the protected page (scans)
|
||||
// triggering a client-side redirect race condition when cookies are cleared
|
||||
await signInPage.goto();
|
||||
|
||||
// Clear cookies to simulate session expiry
|
||||
await context.clearCookies();
|
||||
|
||||
// Try to navigate to a different protected route
|
||||
await providersPage.goto();
|
||||
// Use fresh navigation to force middleware evaluation
|
||||
await providersPage.gotoFresh();
|
||||
|
||||
// Should be redirected to login with callbackUrl
|
||||
await signInPage.verifyRedirectWithCallback("/providers");
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
import { expect, test } from "@playwright/test";
|
||||
|
||||
import { getSession, TEST_CREDENTIALS, verifySessionValid } from "../helpers";
|
||||
import {
|
||||
getSession,
|
||||
getSessionWithoutCookies,
|
||||
TEST_CREDENTIALS,
|
||||
verifySessionValid,
|
||||
} from "../helpers";
|
||||
import { HomePage } from "../home/home-page";
|
||||
import { SignInPage } from "../sign-in-base/sign-in-base-page";
|
||||
|
||||
@@ -71,7 +76,7 @@ test.describe("Token Refresh Flow", () => {
|
||||
|
||||
await context.clearCookies();
|
||||
|
||||
const expiredSession = await getSession(page);
|
||||
const expiredSession = await getSessionWithoutCookies(page);
|
||||
expect(expiredSession).toBeNull();
|
||||
},
|
||||
);
|
||||
|
||||
@@ -143,9 +143,10 @@
|
||||
### Flow Steps:
|
||||
1. Log in with valid credentials.
|
||||
2. Navigate to a protected route (/scans).
|
||||
3. Clear cookies to simulate session expiry.
|
||||
4. Navigate to another protected route (/providers).
|
||||
5. Verify redirect to sign-in includes callbackUrl parameter.
|
||||
3. Navigate to a safe public page (/sign-in).
|
||||
4. Clear cookies to simulate session expiry.
|
||||
5. Navigate to another protected route (/providers) using fresh navigation.
|
||||
6. Verify redirect to sign-in includes callbackUrl parameter.
|
||||
|
||||
### Expected Result:
|
||||
- URL contains callbackUrl=/providers parameter.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { Locator, Page, expect } from "@playwright/test";
|
||||
import { Locator, Page, expect, request } from "@playwright/test";
|
||||
import { AWSProviderCredential, AWSProviderData, AWS_CREDENTIAL_OPTIONS, ProvidersPage } from "./providers/providers-page";
|
||||
import { ScansPage } from "./scans/scans-page";
|
||||
|
||||
@@ -47,6 +47,20 @@ export async function getSession(page: Page) {
|
||||
return response.json();
|
||||
}
|
||||
|
||||
export async function getSessionWithoutCookies(page: Page) {
|
||||
const currentUrl = page.url();
|
||||
const baseUrl = currentUrl.startsWith("http")
|
||||
? new URL(currentUrl).origin
|
||||
: process.env.NEXTAUTH_URL || "http://localhost:3000";
|
||||
|
||||
const apiContext = await request.newContext({ baseURL: baseUrl });
|
||||
const response = await apiContext.get("/api/auth/session");
|
||||
const session = await response.json();
|
||||
await apiContext.dispose();
|
||||
|
||||
return session;
|
||||
}
|
||||
|
||||
export async function verifySessionValid(page: Page) {
|
||||
const session = await getSession(page);
|
||||
expect(session).toBeTruthy();
|
||||
|
||||
@@ -428,6 +428,12 @@ export class ProvidersPage extends BasePage {
|
||||
await super.goto("/providers");
|
||||
}
|
||||
|
||||
async gotoFresh(): Promise<void> {
|
||||
// Go to the providers page with fresh navigation
|
||||
|
||||
await super.gotoFresh("/providers");
|
||||
}
|
||||
|
||||
private async verifyPageHasProwlerTitle(): Promise<void> {
|
||||
await expect(this.page).toHaveTitle(/Prowler/);
|
||||
}
|
||||
|
||||
@@ -110,8 +110,18 @@ export class ScansPage extends BasePage {
|
||||
.filter({ hasText: accountId })
|
||||
.first();
|
||||
|
||||
// Verify the row with the account ID is visible (provider exists)
|
||||
await expect(rowWithAccountId).toBeVisible();
|
||||
try {
|
||||
// Verify the row with the account ID is visible (provider exists)
|
||||
// Use a short timeout first to allow for a quick check
|
||||
await expect(rowWithAccountId).toBeVisible({ timeout: 5000 });
|
||||
} catch {
|
||||
// If not visible immediately (likely due to async backend processing),
|
||||
// reload the page to fetch the latest data
|
||||
await this.page.reload();
|
||||
await this.verifyPageLoaded();
|
||||
// Wait longer after reload
|
||||
await expect(rowWithAccountId).toBeVisible({ timeout: 15000 });
|
||||
}
|
||||
|
||||
// Verify the row contains "scheduled scan" in the Scan name column
|
||||
// The scan name "Daily scheduled scan" is displayed as "scheduled scan" in the table
|
||||
|
||||
@@ -150,9 +150,11 @@ test.describe("Session Persistence", () => {
|
||||
await signInPage.loginAndVerify(TEST_CREDENTIALS.VALID);
|
||||
|
||||
await homePage.signOut();
|
||||
await signInPage.verifyLogoutSuccess();
|
||||
|
||||
await homePage.goto();
|
||||
// Wait for signOut to redirect to /sign-in (NextAuth signOut flow)
|
||||
await page.waitForURL(/\/sign-in/, { timeout: 15000 });
|
||||
|
||||
// Verify we're on the sign-in page with the form visible
|
||||
await signInPage.verifyOnSignInPage();
|
||||
},
|
||||
);
|
||||
|
||||
Reference in New Issue
Block a user