fix(sdk): cover CNAME → dangling S3 in route53 takeover check (#10920)

This commit is contained in:
Hugo Pereira Brito
2026-04-29 11:14:33 +01:00
committed by GitHub
parent 13b04d339b
commit 380b89cfb6
4 changed files with 324 additions and 16 deletions
+1
View File
@@ -10,6 +10,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
### 🔄 Changed
- `route53_dangling_ip_subdomain_takeover` now also flags `CNAME` records pointing to S3 website endpoints whose buckets are missing from the account
- Azure Network Watcher flow log checks now require workspace-backed Traffic Analytics for `network_flow_log_captured_sent` and align metadata with VNet-compatible flow log guidance [(#10645)](https://github.com/prowler-cloud/prowler/pull/10645)
---
@@ -1,7 +1,7 @@
{
"Provider": "aws",
"CheckID": "route53_dangling_ip_subdomain_takeover",
"CheckTitle": "Route53 A record does not point to a dangling IP address",
"CheckTitle": "Route53 record does not point to a dangling AWS resource",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability",
"TTPs/Initial Access",
@@ -13,13 +13,14 @@
"Severity": "high",
"ResourceType": "AwsRoute53HostedZone",
"ResourceGroup": "network",
"Description": "**Route 53 `A` records** (non-alias) that use literal IPs are evaluated for **public AWS addresses** not currently assigned to resources in the account. Entries that match AWS ranges yet lack ownership are identified as potential **dangling IP targets**.",
"Risk": "**Dangling DNS `A` records** pointing to released AWS IPs enable **subdomain takeover**. An attacker who later obtains that IP can:\n- Redirect or alter content (integrity)\n- Capture credentials/cookies (confidentiality)\n- Disrupt or impersonate services (availability)",
"Description": "**Route 53 records** are evaluated for two **subdomain takeover** vectors: (1) non-alias **`A` records** using literal IPs in **public AWS ranges** that are not assigned to resources in the account (released EIPs/ENI public IPs); and (2) non-alias **`CNAME` records** targeting an **S3 website endpoint** (`*.s3-website[.-]<region>.amazonaws.com`) whose bucket no longer exists in the account.",
"Risk": "**Dangling DNS records** pointing to released AWS resources enable **subdomain takeover**. An attacker who later claims the IP — or registers an S3 bucket with the same name in any AWS account — can:\n- Redirect or alter content (integrity)\n- Capture credentials/cookies (confidentiality)\n- Disrupt or impersonate services (availability)",
"RelatedUrl": "",
"AdditionalURLs": [
"https://support.icompaas.com/support/solutions/articles/62000233461-ensure-route53-records-contains-dangling-ips-",
"https://www.trendmicro.com/trendaivisiononecloudriskmanagement/knowledge-base/aws/Route53/dangling-dns-records.html",
"https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-deleting.html"
"https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-deleting.html",
"https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteEndpoints.html"
],
"Remediation": {
"Code": {
@@ -29,7 +30,7 @@
"Terraform": "```hcl\n# Terraform: convert A record to Alias to avoid dangling public IPs\nresource \"aws_route53_record\" \"<example_resource_name>\" {\n zone_id = \"<example_resource_id>\"\n name = \"<example_resource_name>\"\n type = \"A\"\n\n alias { # CRITICAL: Alias to AWS resource (no direct IP)\n name = \"<ALIAS_TARGET_DNS_NAME>\" # e.g., dualstack.<alb>.amazonaws.com\n zone_id = \"<ALIAS_TARGET_HOSTED_ZONE_ID>\"\n evaluate_target_health = false\n }\n}\n```"
},
"Recommendation": {
"Text": "Remove or update any record that points to an unassigned IP. Avoid hard-coding AWS public IPs in `A` records; use **aliases/CNAMEs** to managed endpoints. Enforce **asset lifecycle** decommissioning, routine DNS-asset reconciliation, and **change control** with monitoring to prevent and detect drift.",
"Text": "Remove or update any record that points to an unowned AWS resource: unassigned public IPs in `A` records and S3 website endpoints in `CNAME` records whose bucket has been deleted. Avoid hard-coding AWS public IPs in `A` records; prefer **aliases** to managed endpoints (ALB, CloudFront, S3) and delete CNAMEs as soon as the backing bucket is removed. Enforce **asset lifecycle** decommissioning, routine DNS-asset reconciliation, and **change control** with monitoring to prevent and detect drift.",
"Url": "https://hub.prowler.com/check/route53_dangling_ip_subdomain_takeover"
}
},
@@ -1,3 +1,4 @@
import re
from ipaddress import ip_address
import awsipranges
@@ -6,6 +7,14 @@ from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.lib.utils.utils import validate_ip_address
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.route53.route53_client import route53_client
from prowler.providers.aws.services.s3.s3_client import s3_client
# S3 website endpoint formats:
# <bucket>.s3-website-<region>.amazonaws.com (legacy, dash)
# <bucket>.s3-website.<region>.amazonaws.com (newer, dot)
S3_WEBSITE_ENDPOINT_REGEX = re.compile(
r"^(?P<bucket>[^.]+(?:\.[^.]+)*)\.s3-website[.-](?P<region>[a-z0-9-]+)\.amazonaws\.com\.?$"
)
class route53_dangling_ip_subdomain_takeover(Check):
@@ -24,11 +33,14 @@ class route53_dangling_ip_subdomain_takeover(Check):
if ni.association and ni.association.get("PublicIp"):
public_ips.append(ni.association.get("PublicIp"))
owned_bucket_names = {bucket.name for bucket in s3_client.buckets.values()}
for record_set in route53_client.record_sets:
# Check only A records and avoid aliases (only need to check IPs not AWS Resources)
hosted_zone = route53_client.hosted_zones[record_set.hosted_zone_id]
# A records: dangling-IP path (released EIPs / unowned AWS IPs)
if record_set.type == "A" and not record_set.is_alias:
for record in record_set.records:
# Check if record is an IP Address
if validate_ip_address(record):
report = Check_Report_AWS(
metadata=self.metadata(), resource=record_set
@@ -36,25 +48,45 @@ class route53_dangling_ip_subdomain_takeover(Check):
report.resource_id = (
f"{record_set.hosted_zone_id}/{record_set.name}/{record}"
)
report.resource_arn = route53_client.hosted_zones[
record_set.hosted_zone_id
].arn
report.resource_tags = route53_client.hosted_zones[
record_set.hosted_zone_id
].tags
report.resource_arn = hosted_zone.arn
report.resource_tags = hosted_zone.tags
report.status = "PASS"
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {route53_client.hosted_zones[record_set.hosted_zone_id].name} is not a dangling IP."
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {hosted_zone.name} is not a dangling IP."
# If Public IP check if it is in the AWS Account
if (
not ip_address(record).is_private
and record not in public_ips
):
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {route53_client.hosted_zones[record_set.hosted_zone_id].name} does not belong to AWS and it is not a dangling IP."
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {hosted_zone.name} does not belong to AWS and it is not a dangling IP."
# Check if potential dangling IP is within AWS Ranges
aws_ip_ranges = awsipranges.get_ranges()
if aws_ip_ranges.get(record):
report.status = "FAIL"
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {route53_client.hosted_zones[record_set.hosted_zone_id].name} is a dangling IP which can lead to a subdomain takeover attack."
report.status_extended = f"Route53 record {record} (name: {record_set.name}) in Hosted Zone {hosted_zone.name} is a dangling IP which can lead to a subdomain takeover attack."
findings.append(report)
# CNAME records: dangling S3 website endpoint
# (deleted bucket whose name can be re-registered by anyone)
elif record_set.type == "CNAME" and not record_set.is_alias:
for record in record_set.records:
match = S3_WEBSITE_ENDPOINT_REGEX.match(record.lower())
if not match:
continue
bucket_name = match.group("bucket")
report = Check_Report_AWS(
metadata=self.metadata(), resource=record_set
)
report.resource_id = (
f"{record_set.hosted_zone_id}/{record_set.name}/{record}"
)
report.resource_arn = hosted_zone.arn
report.resource_tags = hosted_zone.tags
if bucket_name in owned_bucket_names:
report.status = "PASS"
report.status_extended = f"Route53 CNAME {record_set.name} in Hosted Zone {hosted_zone.name} points to S3 website endpoint of bucket {bucket_name} which exists in the account."
else:
report.status = "FAIL"
report.status_extended = f"Route53 CNAME {record_set.name} in Hosted Zone {hosted_zone.name} points to S3 website endpoint of bucket {bucket_name} which does not exist in the account and can lead to a subdomain takeover attack."
findings.append(report)
return findings
@@ -4,6 +4,7 @@ from boto3 import client, resource
from moto import mock_aws
from tests.providers.aws.utils import (
AWS_REGION_EU_WEST_1,
AWS_REGION_US_EAST_1,
AWS_REGION_US_WEST_2,
set_mocked_aws_provider,
@@ -502,3 +503,276 @@ class Test_route53_dangling_ip_subdomain_takeover:
result[0].status_extended
== f"Route53 record {record_ip} (name: {record_set_name}) in Hosted Zone {HOSTED_ZONE_NAME} is not a dangling IP."
)
@mock_aws
def test_hosted_zone_cname_to_existing_s3_website_bucket(self):
bucket_name = "my-static-site"
s3 = client("s3", region_name=AWS_REGION_US_EAST_1)
s3.create_bucket(Bucket=bucket_name)
conn = client("route53", region_name=AWS_REGION_US_EAST_1)
zone_id = conn.create_hosted_zone(
Name=HOSTED_ZONE_NAME, CallerReference=str(hash("foo"))
)["HostedZone"]["Id"]
record_set_name = "www.testdns.aws.com."
cname_target = f"{bucket_name}.s3-website-us-east-1.amazonaws.com"
conn.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Changes": [
{
"Action": "CREATE",
"ResourceRecordSet": {
"Name": record_set_name,
"Type": "CNAME",
"TTL": 60,
"ResourceRecords": [{"Value": cname_target}],
},
}
]
},
)
from prowler.providers.aws.services.ec2.ec2_service import EC2
from prowler.providers.aws.services.route53.route53_service import Route53
from prowler.providers.aws.services.s3.s3_service import S3
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.route53_client",
new=Route53(aws_provider),
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.ec2_client",
new=EC2(aws_provider),
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.s3_client",
new=S3(aws_provider),
):
from prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover import (
route53_dangling_ip_subdomain_takeover,
)
check = route53_dangling_ip_subdomain_takeover()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"Route53 CNAME {record_set_name} in Hosted Zone {HOSTED_ZONE_NAME} points to S3 website endpoint of bucket {bucket_name} which exists in the account."
)
assert (
result[0].resource_id
== zone_id.replace("/hostedzone/", "")
+ "/"
+ record_set_name
+ "/"
+ cname_target
)
assert (
result[0].resource_arn
== f"arn:{aws_provider.identity.partition}:route53:::hostedzone/{zone_id.replace('/hostedzone/', '')}"
)
@mock_aws
def test_hosted_zone_cname_to_dangling_s3_website_bucket(self):
# Bucket name referenced by the CNAME is NOT created in the account
# (simulates a deleted bucket whose name is now claimable by anyone)
missing_bucket = "deleted-static-site"
conn = client("route53", region_name=AWS_REGION_US_EAST_1)
zone_id = conn.create_hosted_zone(
Name=HOSTED_ZONE_NAME, CallerReference=str(hash("foo"))
)["HostedZone"]["Id"]
record_set_name = "www.testdns.aws.com."
cname_target = f"{missing_bucket}.s3-website-us-east-1.amazonaws.com"
conn.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Changes": [
{
"Action": "CREATE",
"ResourceRecordSet": {
"Name": record_set_name,
"Type": "CNAME",
"TTL": 60,
"ResourceRecords": [{"Value": cname_target}],
},
}
]
},
)
from prowler.providers.aws.services.ec2.ec2_service import EC2
from prowler.providers.aws.services.route53.route53_service import Route53
from prowler.providers.aws.services.s3.s3_service import S3
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.route53_client",
new=Route53(aws_provider),
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.ec2_client",
new=EC2(aws_provider),
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.s3_client",
new=S3(aws_provider),
):
from prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover import (
route53_dangling_ip_subdomain_takeover,
)
check = route53_dangling_ip_subdomain_takeover()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"Route53 CNAME {record_set_name} in Hosted Zone {HOSTED_ZONE_NAME} points to S3 website endpoint of bucket {missing_bucket} which does not exist in the account and can lead to a subdomain takeover attack."
)
assert (
result[0].resource_id
== zone_id.replace("/hostedzone/", "")
+ "/"
+ record_set_name
+ "/"
+ cname_target
)
@mock_aws
def test_hosted_zone_cname_to_dangling_s3_website_bucket_dot_format(self):
# Newer regions use the dot-style endpoint:
# <bucket>.s3-website.<region>.amazonaws.com
missing_bucket = "deleted-eu-site"
conn = client("route53", region_name=AWS_REGION_US_EAST_1)
zone_id = conn.create_hosted_zone(
Name=HOSTED_ZONE_NAME, CallerReference=str(hash("foo"))
)["HostedZone"]["Id"]
record_set_name = "eu.testdns.aws.com."
cname_target = (
f"{missing_bucket}.s3-website.{AWS_REGION_EU_WEST_1}.amazonaws.com"
)
conn.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Changes": [
{
"Action": "CREATE",
"ResourceRecordSet": {
"Name": record_set_name,
"Type": "CNAME",
"TTL": 60,
"ResourceRecords": [{"Value": cname_target}],
},
}
]
},
)
from prowler.providers.aws.services.ec2.ec2_service import EC2
from prowler.providers.aws.services.route53.route53_service import Route53
from prowler.providers.aws.services.s3.s3_service import S3
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.route53_client",
new=Route53(aws_provider),
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.ec2_client",
new=EC2(aws_provider),
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.s3_client",
new=S3(aws_provider),
):
from prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover import (
route53_dangling_ip_subdomain_takeover,
)
check = route53_dangling_ip_subdomain_takeover()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert missing_bucket in result[0].status_extended
@mock_aws
def test_hosted_zone_cname_to_non_s3_target_is_ignored(self):
# CNAMEs that do not target an S3 website endpoint must not yield a finding
conn = client("route53", region_name=AWS_REGION_US_EAST_1)
zone_id = conn.create_hosted_zone(
Name=HOSTED_ZONE_NAME, CallerReference=str(hash("foo"))
)["HostedZone"]["Id"]
conn.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
"Changes": [
{
"Action": "CREATE",
"ResourceRecordSet": {
"Name": "blog.testdns.aws.com.",
"Type": "CNAME",
"TTL": 60,
"ResourceRecords": [{"Value": "external-host.example.com"}],
},
}
]
},
)
from prowler.providers.aws.services.ec2.ec2_service import EC2
from prowler.providers.aws.services.route53.route53_service import Route53
from prowler.providers.aws.services.s3.s3_service import S3
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.route53_client",
new=Route53(aws_provider),
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.ec2_client",
new=EC2(aws_provider),
):
with mock.patch(
"prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover.s3_client",
new=S3(aws_provider),
):
from prowler.providers.aws.services.route53.route53_dangling_ip_subdomain_takeover.route53_dangling_ip_subdomain_takeover import (
route53_dangling_ip_subdomain_takeover,
)
check = route53_dangling_ip_subdomain_takeover()
result = check.execute()
assert len(result) == 0