Compare commits

..

11 Commits

Author SHA1 Message Date
Chandrapal Badshah 77451d5cf8 chore: update changelog 2025-09-23 16:45:21 +05:30
Chandrapal Badshah 5b54b47930 fix(lighthouse): allow scrolling during AI response streaming (#8669)
Co-authored-by: Chandrapal Badshah <12944530+Chan9390@users.noreply.github.com>
(cherry picked from commit 3949ab736d)
2025-09-23 09:09:41 +00:00
Prowler Bot a1168e3082 fix: handle 4XX and 204 properly (#8732)
Co-authored-by: Alejandro Bailo <59607668+alejandrobailo@users.noreply.github.com>
2025-09-15 17:43:18 +02:00
Prowler Bot f2341c9878 chore(changelog): remove whitespace in links (#8718)
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2025-09-12 18:10:48 +05:45
Prowler Bot 67b8e925e5 chore(release): Bump version to v5.12.2 (#8713)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2025-09-12 13:24:37 +02:00
Prowler Bot ad4475efc9 fix(firehose): false positive in firehose_stream_encrypted_at_rest (#8707)
Co-authored-by: Daniel Barranquero <74871504+danibarranqueroo@users.noreply.github.com>
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-09-12 10:02:25 +02:00
Prowler Bot 4dd6547b9c fix(auth): validate email field (#8706)
Co-authored-by: Alejandro Bailo <59607668+alejandrobailo@users.noreply.github.com>
Co-authored-by: alejandrobailo <alejandrobailo94@gmail.com>
2025-09-11 16:25:35 +02:00
Prowler Bot cc4d759f47 fix(auth): add method attribute to form for proper submission handling (#8705)
Co-authored-by: Alejandro Bailo <59607668+alejandrobailo@users.noreply.github.com>
2025-09-11 19:36:24 +05:45
Prowler Bot e9aca866c8 fix(defender): change policies rules key (#8703)
Co-authored-by: Daniel Barranquero <74871504+danibarranqueroo@users.noreply.github.com>
Co-authored-by: HugoPBrito <hugopbrit@gmail.com>
2025-09-11 14:00:52 +02:00
Prowler Bot 12f9e477a3 fix(compliance): replace old check id with new one (#8686)
Co-authored-by: Daniel Barranquero <74871504+danibarranqueroo@users.noreply.github.com>
Co-authored-by: Daniel Barranquero <danielbo2001@gmail.com>
2025-09-09 15:34:09 +02:00
Prowler Bot a2a3b7c125 chore(release): Bump version to v5.12.1 (#8680)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2025-09-09 16:35:54 +05:45
47 changed files with 910 additions and 278 deletions
+3
View File
@@ -78,3 +78,6 @@ _data/
# Claude
CLAUDE.md
# LLM's (Until we have a standard one)
AGENTS.md
File diff suppressed because one or more lines are too long
+8
View File
@@ -1,6 +1,14 @@
# Prowler SDK Changelog
All notable changes to the **Prowler SDK** are documented in this file.
## [v5.12.1] (Prowler v5.12.1)
### Fixed
- Replaced old check id with new ones for compliance files [(#8682)](https://github.com/prowler-cloud/prowler/pull/8682)
- `firehose_stream_encrypted_at_rest` check false positives and new api call in kafka service [(#8599)](https://github.com/prowler-cloud/prowler/pull/8599)
- Replace defender rules policies key to use old name [(#8702)](https://github.com/prowler-cloud/prowler/pull/8702)
## [v5.12.0] (Prowler v5.12.0)
### Added
@@ -364,8 +364,8 @@
"ec2_ami_public",
"ec2_instance_public_ip",
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -721,8 +721,8 @@
"ec2_networkacl_allow_ingress_tcp_port_22",
"ec2_networkacl_allow_ingress_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -1510,8 +1510,8 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -1604,8 +1604,8 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -1698,8 +1698,8 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -1558,8 +1558,8 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -1682,7 +1682,7 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601",
@@ -1814,7 +1814,7 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306",
@@ -1917,7 +1917,7 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23",
@@ -3024,8 +3024,8 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -4588,4 +4588,4 @@
]
}
]
}
}
@@ -1557,8 +1557,8 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -1682,7 +1682,7 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601",
@@ -1816,7 +1816,7 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306",
@@ -1919,7 +1919,7 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23",
@@ -3028,8 +3028,8 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -4603,4 +4603,4 @@
]
}
]
}
}
+10 -10
View File
@@ -107,8 +107,8 @@
"ec2_networkacl_allow_ingress_tcp_port_22",
"ec2_networkacl_allow_ingress_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -1024,8 +1024,8 @@
"ec2_networkacl_allow_ingress_tcp_port_22",
"ec2_networkacl_allow_ingress_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -1470,8 +1470,8 @@
"ec2_networkacl_allow_ingress_tcp_port_22",
"ec2_networkacl_allow_ingress_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -1650,8 +1650,8 @@
"ec2_networkacl_allow_ingress_tcp_port_22",
"ec2_networkacl_allow_ingress_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -1902,8 +1902,8 @@
"ec2_networkacl_allow_ingress_tcp_port_22",
"ec2_networkacl_allow_ingress_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -553,8 +553,8 @@
"Description": "Ensure that ec2 security groups do not allow ingress from internet to common ports",
"Checks": [
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
@@ -66,7 +66,7 @@
"elbv2_ssl_listeners",
"ssm_documents_set_as_public",
"vpc_subnet_no_public_ip_by_default",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306",
"s3_account_level_public_access_blocks"
+2 -2
View File
@@ -253,8 +253,8 @@
"ec2_securitygroup_allow_ingress_from_internet_to_all_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_any_port",
"ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports",
"ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mongodb_27017_27018",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_ftp_20_21",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389",
"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888",
+1 -1
View File
@@ -12,7 +12,7 @@ from prowler.lib.logger import logger
timestamp = datetime.today()
timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)
prowler_version = "5.12.0"
prowler_version = "5.12.2"
html_logo_url = "https://github.com/prowler-cloud/prowler/"
square_logo_img = "https://prowler.com/wp-content/uploads/logo-html.png"
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
@@ -3,6 +3,7 @@ from typing import List
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.firehose.firehose_client import firehose_client
from prowler.providers.aws.services.firehose.firehose_service import EncryptionStatus
from prowler.providers.aws.services.kafka.kafka_client import kafka_client
from prowler.providers.aws.services.kinesis.kinesis_client import kinesis_client
from prowler.providers.aws.services.kinesis.kinesis_service import EncryptionType
@@ -37,7 +38,28 @@ class firehose_stream_encrypted_at_rest(Check):
report.status = "PASS"
report.status_extended = f"Firehose Stream {stream.name} does not have at rest encryption enabled but the source stream {source_stream.name} has at rest encryption enabled."
# Check if the stream has encryption enabled directly
# MSK source - check if the MSK cluster has encryption at rest with CMK
elif stream.delivery_stream_type == "MSKAsSource":
msk_cluster_arn = stream.source.msk.msk_cluster_arn
if msk_cluster_arn:
msk_cluster = None
for cluster in kafka_client.clusters.values():
if cluster.arn == msk_cluster_arn:
msk_cluster = cluster
break
if msk_cluster:
# All MSK clusters (both provisioned and serverless) always have encryption at rest enabled by AWS
# AWS MSK always encrypts data at rest - either with AWS managed keys or CMK
report.status = "PASS"
if msk_cluster.kafka_version == "SERVERLESS":
report.status_extended = f"Firehose Stream {stream.name} uses MSK serverless source which always has encryption at rest enabled by default."
else:
report.status_extended = f"Firehose Stream {stream.name} uses MSK provisioned source which always has encryption at rest enabled by AWS (either with AWS managed keys or CMK)."
else:
report.status_extended = f"Firehose Stream {stream.name} uses MSK source which always has encryption at rest enabled by AWS."
# Check if the stream has encryption enabled directly (DirectPut or DatabaseAsSource cases)
elif stream.kms_encryption == EncryptionStatus.ENABLED:
report.status = "PASS"
report.status_extended = f"Firehose Stream {stream.name} does have at rest encryption enabled."
@@ -12,7 +12,12 @@ class kafka_cluster_encryption_at_rest_uses_cmk(Check):
report.status = "FAIL"
report.status_extended = f"Kafka cluster '{cluster.name}' does not have encryption at rest enabled with a CMK."
if any(
# Serverless clusters always have encryption at rest enabled by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and always has encryption at rest enabled by default."
# For provisioned clusters, check if they use a customer managed KMS key
elif any(
(
cluster.data_volume_kms_key_id == key.arn
and getattr(key, "manager", "") == "CUSTOMER"
@@ -13,7 +13,12 @@ class kafka_cluster_enhanced_monitoring_enabled(Check):
f"Kafka cluster '{cluster.name}' has enhanced monitoring enabled."
)
if cluster.enhanced_monitoring == "DEFAULT":
# Serverless clusters always have enhanced monitoring enabled by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and always has enhanced monitoring enabled by default."
# For provisioned clusters, check the enhanced monitoring configuration
elif cluster.enhanced_monitoring == "DEFAULT":
report.status = "FAIL"
report.status_extended = f"Kafka cluster '{cluster.name}' does not have enhanced monitoring enabled."
@@ -11,7 +11,12 @@ class kafka_cluster_in_transit_encryption_enabled(Check):
report.status = "FAIL"
report.status_extended = f"Kafka cluster '{cluster.name}' does not have encryption in transit enabled."
if (
# Serverless clusters always have encryption in transit enabled by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and always has encryption in transit enabled by default."
# For provisioned clusters, check the encryption configuration
elif (
cluster.encryption_in_transit.client_broker == "TLS"
and cluster.encryption_in_transit.in_cluster
):
@@ -13,7 +13,12 @@ class kafka_cluster_is_public(Check):
f"Kafka cluster {cluster.name} is publicly accessible."
)
if not cluster.public_access:
# Serverless clusters are always private by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster {cluster.name} is serverless and always private by default."
# For provisioned clusters, check the public access configuration
elif not cluster.public_access:
report.status = "PASS"
report.status_extended = (
f"Kafka cluster {cluster.name} is not publicly accessible."
@@ -11,7 +11,12 @@ class kafka_cluster_mutual_tls_authentication_enabled(Check):
report.status = "FAIL"
report.status_extended = f"Kafka cluster '{cluster.name}' does not have mutual TLS authentication enabled."
if cluster.tls_authentication:
# Serverless clusters always have TLS authentication enabled by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and always has TLS authentication enabled by default."
# For provisioned clusters, check the TLS configuration
elif cluster.tls_authentication:
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' has mutual TLS authentication enabled."
@@ -13,7 +13,12 @@ class kafka_cluster_unrestricted_access_disabled(Check):
f"Kafka cluster '{cluster.name}' has unrestricted access enabled."
)
if not cluster.unauthentication_access:
# Serverless clusters always require authentication by default
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and always requires authentication by default."
# For provisioned clusters, check the unauthenticated access configuration
elif not cluster.unauthentication_access:
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' does not have unrestricted access enabled."
@@ -13,7 +13,12 @@ class kafka_cluster_uses_latest_version(Check):
f"Kafka cluster '{cluster.name}' is using the latest version."
)
if cluster.kafka_version != kafka_client.kafka_versions[-1].version:
# Serverless clusters don't have specific Kafka versions - AWS manages them automatically
if cluster.kafka_version == "SERVERLESS":
report.status = "PASS"
report.status_extended = f"Kafka cluster '{cluster.name}' is serverless and AWS automatically manages the Kafka version."
# For provisioned clusters, check if they're using the latest version
elif cluster.kafka_version != kafka_client.kafka_versions[-1].version:
report.status = "FAIL"
report.status_extended = (
f"Kafka cluster '{cluster.name}' is not using the latest version."
@@ -15,61 +15,133 @@ class Kafka(AWSService):
self.__threading_call__(self._list_kafka_versions)
def _list_clusters(self, regional_client):
logger.info(f"Kafka - Listing clusters in region {regional_client.region}...")
try:
cluster_paginator = regional_client.get_paginator("list_clusters")
# Use list_clusters_v2 to support both provisioned and serverless clusters
cluster_paginator = regional_client.get_paginator("list_clusters_v2")
logger.info(
f"Kafka - Paginator created for region {regional_client.region}"
)
for page in cluster_paginator.paginate():
logger.info(
f"Kafka - Processing page with {len(page.get('ClusterInfoList', []))} clusters in region {regional_client.region}"
)
for cluster in page["ClusterInfoList"]:
logger.info(
f"Kafka - Found cluster: {cluster.get('ClusterName', 'Unknown')} in region {regional_client.region}"
)
arn = cluster.get(
"ClusterArn",
f"{self.account_arn_template}/{cluster.get('ClusterName', '')}",
)
cluster_type = cluster.get("ClusterType", "UNKNOWN")
if not self.audit_resources or is_resource_filtered(
arn, self.audit_resources
):
self.clusters[cluster.get("ClusterArn", "")] = Cluster(
id=arn.split(":")[-1].split("/")[-1],
name=cluster.get("ClusterName", ""),
arn=arn,
region=regional_client.region,
tags=list(cluster.get("Tags", {})),
state=cluster.get("State", ""),
kafka_version=cluster.get(
"CurrentBrokerSoftwareInfo", {}
).get("KafkaVersion", ""),
data_volume_kms_key_id=cluster.get("EncryptionInfo", {})
.get("EncryptionAtRest", {})
.get("DataVolumeKMSKeyId", ""),
encryption_in_transit=EncryptionInTransit(
client_broker=cluster.get("EncryptionInfo", {})
.get("EncryptionInTransit", {})
.get("ClientBroker", "PLAINTEXT"),
in_cluster=cluster.get("EncryptionInfo", {})
.get("EncryptionInTransit", {})
.get("InCluster", False),
),
tls_authentication=cluster.get("ClientAuthentication", {})
.get("Tls", {})
.get("Enabled", False),
public_access=cluster.get("BrokerNodeGroupInfo", {})
.get("ConnectivityInfo", {})
.get("PublicAccess", {})
.get("Type", "SERVICE_PROVIDED_EIPS")
!= "DISABLED",
unauthentication_access=cluster.get(
"ClientAuthentication", {}
# Handle provisioned clusters
if cluster_type == "PROVISIONED" and "Provisioned" in cluster:
provisioned = cluster["Provisioned"]
self.clusters[cluster.get("ClusterArn", "")] = Cluster(
id=arn.split(":")[-1].split("/")[-1],
name=cluster.get("ClusterName", ""),
arn=arn,
region=regional_client.region,
tags=(
list(cluster.get("Tags", {}).values())
if cluster.get("Tags")
else []
),
state=cluster.get("State", ""),
kafka_version=provisioned.get(
"CurrentBrokerSoftwareInfo", {}
).get("KafkaVersion", ""),
data_volume_kms_key_id=provisioned.get(
"EncryptionInfo", {}
)
.get("EncryptionAtRest", {})
.get("DataVolumeKMSKeyId", ""),
encryption_in_transit=EncryptionInTransit(
client_broker=provisioned.get("EncryptionInfo", {})
.get("EncryptionInTransit", {})
.get("ClientBroker", "PLAINTEXT"),
in_cluster=provisioned.get("EncryptionInfo", {})
.get("EncryptionInTransit", {})
.get("InCluster", False),
),
tls_authentication=provisioned.get(
"ClientAuthentication", {}
)
.get("Tls", {})
.get("Enabled", False),
public_access=provisioned.get("BrokerNodeGroupInfo", {})
.get("ConnectivityInfo", {})
.get("PublicAccess", {})
.get("Type", "SERVICE_PROVIDED_EIPS")
!= "DISABLED",
unauthentication_access=provisioned.get(
"ClientAuthentication", {}
)
.get("Unauthenticated", {})
.get("Enabled", False),
enhanced_monitoring=provisioned.get(
"EnhancedMonitoring", "DEFAULT"
),
)
.get("Unauthenticated", {})
.get("Enabled", False),
enhanced_monitoring=cluster.get(
"EnhancedMonitoring", "DEFAULT"
),
logger.info(
f"Kafka - Added provisioned cluster {cluster.get('ClusterName', 'Unknown')} to clusters dict"
)
# Handle serverless clusters
elif cluster_type == "SERVERLESS" and "Serverless" in cluster:
# For serverless clusters, encryption is always enabled by default
# We'll create a Cluster object with default encryption values
self.clusters[cluster.get("ClusterArn", "")] = Cluster(
id=arn.split(":")[-1].split("/")[-1],
name=cluster.get("ClusterName", ""),
arn=arn,
region=regional_client.region,
tags=(
list(cluster.get("Tags", {}).values())
if cluster.get("Tags")
else []
),
state=cluster.get("State", ""),
kafka_version="SERVERLESS", # Serverless doesn't have specific Kafka version
data_volume_kms_key_id="AWS_MANAGED", # Serverless uses AWS managed keys
encryption_in_transit=EncryptionInTransit(
client_broker="TLS", # Serverless always has TLS enabled
in_cluster=True, # Serverless always has in-cluster encryption
),
tls_authentication=True, # Serverless always has TLS authentication
public_access=False, # Serverless clusters are always private
unauthentication_access=False, # Serverless requires authentication
enhanced_monitoring="DEFAULT",
)
logger.info(
f"Kafka - Added serverless cluster {cluster.get('ClusterName', 'Unknown')} to clusters dict"
)
else:
logger.warning(
f"Kafka - Unknown cluster type {cluster_type} for cluster {cluster.get('ClusterName', 'Unknown')}"
)
else:
logger.info(
f"Kafka - Cluster {cluster.get('ClusterName', 'Unknown')} filtered out by audit_resources"
)
logger.info(
f"Kafka - Total clusters found in region {regional_client.region}: {len(self.clusters)}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
logger.error(
f"Kafka - Error details in region {regional_client.region}: {str(error)}"
)
def _list_kafka_versions(self, regional_client):
try:
@@ -91,7 +91,7 @@ class Defender(M365Service):
malware_rule = [malware_rule]
for rule in malware_rule:
if rule:
malware_rules[rule.get("Name", "")] = MalwareRule(
malware_rules[rule.get("MalwareFilterPolicy", "")] = MalwareRule(
state=rule.get("State", ""),
priority=rule.get("Priority", 0),
users=rule.get("SentTo", None),
@@ -152,12 +152,14 @@ class Defender(M365Service):
antiphishing_rule = [antiphishing_rule]
for rule in antiphishing_rule:
if rule:
antiphishing_rules[rule.get("Name", "")] = AntiphishingRule(
state=rule.get("State", ""),
priority=rule.get("Priority", 0),
users=rule.get("SentTo", None),
groups=rule.get("SentToMemberOf", None),
domains=rule.get("RecipientDomainIs", None),
antiphishing_rules[rule.get("AntiPhishPolicy", "")] = (
AntiphishingRule(
state=rule.get("State", ""),
priority=rule.get("Priority", 0),
users=rule.get("SentTo", None),
groups=rule.get("SentToMemberOf", None),
domains=rule.get("RecipientDomainIs", None),
)
)
except Exception as error:
logger.error(
@@ -250,7 +252,9 @@ class Defender(M365Service):
outbound_spam_rule = [outbound_spam_rule]
for rule in outbound_spam_rule:
if rule:
outbound_spam_rules[rule.get("Name", "")] = OutboundSpamRule(
outbound_spam_rules[
rule.get("HostedOutboundSpamFilterPolicy", "")
] = OutboundSpamRule(
state=rule.get("State", "Disabled"),
priority=rule.get("Priority", 0),
users=rule.get("From", None),
@@ -330,12 +334,14 @@ class Defender(M365Service):
inbound_spam_rule = [inbound_spam_rule]
for rule in inbound_spam_rule:
if rule:
inbound_spam_rules[rule.get("Name", "")] = InboundSpamRule(
state=rule.get("State", "Disabled"),
priority=rule.get("Priority", 0),
users=rule.get("SentTo", None),
groups=rule.get("SentToMemberOf", None),
domains=rule.get("RecipientDomainIs", None),
inbound_spam_rules[rule.get("HostedContentFilterPolicy", "")] = (
InboundSpamRule(
state=rule.get("State", "Disabled"),
priority=rule.get("Priority", 0),
users=rule.get("SentTo", None),
groups=rule.get("SentToMemberOf", None),
domains=rule.get("RecipientDomainIs", None),
)
)
except Exception as error:
logger.error(
+1 -1
View File
@@ -74,7 +74,7 @@ maintainers = [{name = "Prowler Engineering", email = "engineering@prowler.com"}
name = "prowler"
readme = "README.md"
requires-python = ">3.9.1,<3.13"
version = "5.12.0"
version = "5.12.2"
[project.scripts]
prowler = "prowler.__main__:prowler"
@@ -162,3 +162,64 @@ class Test_kafka_cluster_encryption_at_rest_uses_cmk:
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1
def test_kafka_cluster_serverless_encryption_at_rest(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
kms_client = MagicMock
kms_client.keys = []
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_cluster_encryption_at_rest_uses_cmk.kafka_cluster_encryption_at_rest_uses_cmk.kms_client",
new=kms_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_encryption_at_rest_uses_cmk.kafka_cluster_encryption_at_rest_uses_cmk import (
kafka_cluster_encryption_at_rest_uses_cmk,
)
check = kafka_cluster_encryption_at_rest_uses_cmk()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and always has encryption at rest enabled by default."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1
@@ -4,7 +4,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
Cluster,
EncryptionInTransit,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_enhanced_monitoring_enabled:
@@ -14,11 +14,11 @@ class Test_kafka_cluster_enhanced_monitoring_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -56,11 +56,11 @@ class Test_kafka_cluster_enhanced_monitoring_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -110,11 +110,11 @@ class Test_kafka_cluster_enhanced_monitoring_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -138,3 +138,57 @@ class Test_kafka_cluster_enhanced_monitoring_enabled:
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1
def test_kafka_cluster_serverless_enhanced_monitoring(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_enhanced_monitoring_enabled.kafka_cluster_enhanced_monitoring_enabled import (
kafka_cluster_enhanced_monitoring_enabled,
)
check = kafka_cluster_enhanced_monitoring_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and always has enhanced monitoring enabled by default."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1
@@ -4,7 +4,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
Cluster,
EncryptionInTransit,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_in_transit_encryption_enabled:
@@ -14,11 +14,11 @@ class Test_kafka_cluster_in_transit_encryption_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -56,11 +56,11 @@ class Test_kafka_cluster_in_transit_encryption_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -110,11 +110,11 @@ class Test_kafka_cluster_in_transit_encryption_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -164,11 +164,11 @@ class Test_kafka_cluster_in_transit_encryption_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -191,3 +191,57 @@ class Test_kafka_cluster_in_transit_encryption_enabled:
== "arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
)
assert result[0].region == AWS_REGION_US_EAST_1
def test_kafka_cluster_serverless_in_transit_encryption(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_in_transit_encryption_enabled.kafka_cluster_in_transit_encryption_enabled import (
kafka_cluster_in_transit_encryption_enabled,
)
check = kafka_cluster_in_transit_encryption_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and always has encryption in transit enabled by default."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []
@@ -4,7 +4,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
Cluster,
EncryptionInTransit,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_is_public:
@@ -14,11 +14,11 @@ class Test_kafka_cluster_is_public:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -56,11 +56,11 @@ class Test_kafka_cluster_is_public:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -110,11 +110,11 @@ class Test_kafka_cluster_is_public:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -138,3 +138,57 @@ class Test_kafka_cluster_is_public:
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []
def test_kafka_cluster_serverless_public(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_is_public.kafka_cluster_is_public import (
kafka_cluster_is_public,
)
check = kafka_cluster_is_public()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster serverless-cluster-1 is serverless and always private by default."
)
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []
@@ -4,7 +4,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
Cluster,
EncryptionInTransit,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_mutual_tls_authentication_enabled:
@@ -14,11 +14,11 @@ class Test_kafka_cluster_mutual_tls_authentication_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -56,11 +56,11 @@ class Test_kafka_cluster_mutual_tls_authentication_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -110,11 +110,11 @@ class Test_kafka_cluster_mutual_tls_authentication_enabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -138,3 +138,57 @@ class Test_kafka_cluster_mutual_tls_authentication_enabled:
)
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []
def test_kafka_cluster_serverless_mutual_tls_authentication(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_mutual_tls_authentication_enabled.kafka_cluster_mutual_tls_authentication_enabled import (
kafka_cluster_mutual_tls_authentication_enabled,
)
check = kafka_cluster_mutual_tls_authentication_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and always has TLS authentication enabled by default."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []
@@ -4,7 +4,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
Cluster,
EncryptionInTransit,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_unrestricted_access_disabled:
@@ -14,11 +14,11 @@ class Test_kafka_cluster_unrestricted_access_disabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -56,11 +56,11 @@ class Test_kafka_cluster_unrestricted_access_disabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -110,11 +110,11 @@ class Test_kafka_cluster_unrestricted_access_disabled:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -138,3 +138,57 @@ class Test_kafka_cluster_unrestricted_access_disabled:
)
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []
def test_kafka_cluster_serverless_unrestricted_access_disabled(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_unrestricted_access_disabled.kafka_cluster_unrestricted_access_disabled import (
kafka_cluster_unrestricted_access_disabled,
)
check = kafka_cluster_unrestricted_access_disabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and always requires authentication by default."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_tags == []
@@ -5,7 +5,7 @@ from prowler.providers.aws.services.kafka.kafka_service import (
EncryptionInTransit,
KafkaVersion,
)
from tests.providers.aws.utils import AWS_REGION_US_EAST_1, set_mocked_aws_provider
from tests.providers.aws.utils import AWS_REGION_US_EAST_1
class Test_kafka_cluster_latest_version:
@@ -15,11 +15,11 @@ class Test_kafka_cluster_latest_version:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -62,11 +62,11 @@ class Test_kafka_cluster_latest_version:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -121,11 +121,11 @@ class Test_kafka_cluster_latest_version:
with (
patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
@@ -149,3 +149,62 @@ class Test_kafka_cluster_latest_version:
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1
def test_kafka_cluster_serverless_uses_latest_version(self):
kafka_client = MagicMock
kafka_client.clusters = {
"arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6": Cluster(
id="6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
name="serverless-cluster-1",
arn="arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
region=AWS_REGION_US_EAST_1,
tags=[],
state="ACTIVE",
kafka_version="SERVERLESS",
data_volume_kms_key_id="AWS_MANAGED",
encryption_in_transit=EncryptionInTransit(
client_broker="TLS",
in_cluster=True,
),
tls_authentication=True,
public_access=False,
unauthentication_access=False,
enhanced_monitoring="DEFAULT",
)
}
kafka_client.kafka_versions = [
KafkaVersion(version="1.0.0", status="DEPRECATED"),
KafkaVersion(version="2.8.0", status="ACTIVE"),
]
with (
patch(
"prowler.providers.aws.services.kafka.kafka_service.Kafka",
new=kafka_client,
),
patch(
"prowler.providers.aws.services.kafka.kafka_client.kafka_client",
new=kafka_client,
),
):
from prowler.providers.aws.services.kafka.kafka_cluster_uses_latest_version.kafka_cluster_uses_latest_version import (
kafka_cluster_uses_latest_version,
)
check = kafka_cluster_uses_latest_version()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Kafka cluster 'serverless-cluster-1' is serverless and AWS automatically manages the Kafka version."
)
assert result[0].resource_id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert (
result[0].resource_arn
== "arn:aws:kafka:us-east-1:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
)
assert result[0].resource_tags == []
assert result[0].region == AWS_REGION_US_EAST_1
@@ -13,47 +13,67 @@ make_api_call = botocore.client.BaseClient._make_api_call
def mock_make_api_call(self, operation_name, kwarg):
if operation_name == "ListClusters":
if operation_name == "ListClustersV2":
return {
"ClusterInfoList": [
{
"BrokerNodeGroupInfo": {
"BrokerAZDistribution": "DEFAULT",
"ClientSubnets": ["subnet-cbfff283", "subnet-6746046b"],
"InstanceType": "kafka.m5.large",
"SecurityGroups": ["sg-f839b688"],
"StorageInfo": {"EbsStorageInfo": {"VolumeSize": 100}},
},
"ClusterType": "PROVISIONED",
"ClusterArn": f"arn:aws:kafka:{AWS_REGION_US_EAST_1}:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5",
"ClusterName": "demo-cluster-1",
"CreationTime": "2020-07-09T02:31:36.223000+00:00",
"CurrentBrokerSoftwareInfo": {"KafkaVersion": "2.2.1"},
"CurrentVersion": "K3AEGXETSR30VB",
"EncryptionInfo": {
"EncryptionAtRest": {
"DataVolumeKMSKeyId": f"arn:aws:kms:{AWS_REGION_US_EAST_1}:123456789012:key/a7ca56d5-0768-4b64-a670-339a9fbef81c"
},
"EncryptionInTransit": {
"ClientBroker": "TLS_PLAINTEXT",
"InCluster": True,
},
},
"ClientAuthentication": {
"Tls": {"CertificateAuthorityArnList": [], "Enabled": True},
"Unauthenticated": {"Enabled": False},
},
"EnhancedMonitoring": "DEFAULT",
"OpenMonitoring": {
"Prometheus": {
"JmxExporter": {"EnabledInBroker": False},
"NodeExporter": {"EnabledInBroker": False},
}
},
"NumberOfBrokerNodes": 2,
"State": "ACTIVE",
"Tags": {},
"ZookeeperConnectString": f"z-2.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181,z-1.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181,z-3.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181",
}
"Provisioned": {
"BrokerNodeGroupInfo": {
"BrokerAZDistribution": "DEFAULT",
"ClientSubnets": ["subnet-cbfff283", "subnet-6746046b"],
"InstanceType": "kafka.m5.large",
"SecurityGroups": ["sg-f839b688"],
"StorageInfo": {"EbsStorageInfo": {"VolumeSize": 100}},
"ConnectivityInfo": {
"PublicAccess": {"Type": "SERVICE_PROVIDED_EIPS"}
},
},
"CurrentBrokerSoftwareInfo": {"KafkaVersion": "2.2.1"},
"CurrentVersion": "K3AEGXETSR30VB",
"EncryptionInfo": {
"EncryptionAtRest": {
"DataVolumeKMSKeyId": f"arn:aws:kms:{AWS_REGION_US_EAST_1}:123456789012:key/a7ca56d5-0768-4b64-a670-339a9fbef81c"
},
"EncryptionInTransit": {
"ClientBroker": "TLS_PLAINTEXT",
"InCluster": True,
},
},
"ClientAuthentication": {
"Tls": {"CertificateAuthorityArnList": [], "Enabled": True},
"Unauthenticated": {"Enabled": False},
},
"EnhancedMonitoring": "DEFAULT",
"OpenMonitoring": {
"Prometheus": {
"JmxExporter": {"EnabledInBroker": False},
"NodeExporter": {"EnabledInBroker": False},
}
},
"NumberOfBrokerNodes": 2,
"ZookeeperConnectString": f"z-2.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181,z-1.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181,z-3.demo-cluster-1.xuy0sb.c5.kafka.{AWS_REGION_US_EAST_1}.amazonaws.com:2181",
},
},
{
"ClusterType": "SERVERLESS",
"ClusterArn": f"arn:aws:kafka:{AWS_REGION_US_EAST_1}:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6",
"ClusterName": "serverless-cluster-1",
"State": "ACTIVE",
"Tags": {},
"Serverless": {
"VpcConfigs": [
{
"SubnetIds": ["subnet-cbfff283", "subnet-6746046b"],
"SecurityGroups": ["sg-f839b688"],
}
],
},
},
]
}
elif operation_name == "ListKafkaVersions":
@@ -86,32 +106,53 @@ class TestKafkaService:
assert kafka.__class__.__name__ == "Kafka"
assert kafka.session.__class__.__name__ == "Session"
assert kafka.audited_account == AWS_ACCOUNT_NUMBER
# Clusters assertions
assert len(kafka.clusters) == 1
cluster_arn = f"arn:aws:kafka:{AWS_REGION_US_EAST_1}:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
assert cluster_arn in kafka.clusters
# Clusters assertions - should now include both provisioned and serverless
assert len(kafka.clusters) == 2
# Check provisioned cluster
provisioned_cluster_arn = f"arn:aws:kafka:{AWS_REGION_US_EAST_1}:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
assert provisioned_cluster_arn in kafka.clusters
provisioned_cluster = kafka.clusters[provisioned_cluster_arn]
assert provisioned_cluster.id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
assert provisioned_cluster.arn == provisioned_cluster_arn
assert provisioned_cluster.name == "demo-cluster-1"
assert provisioned_cluster.region == AWS_REGION_US_EAST_1
assert provisioned_cluster.tags == []
assert provisioned_cluster.state == "ACTIVE"
assert provisioned_cluster.kafka_version == "2.2.1"
assert (
kafka.clusters[cluster_arn].id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5"
)
assert kafka.clusters[cluster_arn].arn == cluster_arn
assert kafka.clusters[cluster_arn].name == "demo-cluster-1"
assert kafka.clusters[cluster_arn].region == AWS_REGION_US_EAST_1
assert kafka.clusters[cluster_arn].tags == []
assert kafka.clusters[cluster_arn].state == "ACTIVE"
assert kafka.clusters[cluster_arn].kafka_version == "2.2.1"
assert (
kafka.clusters[cluster_arn].data_volume_kms_key_id
provisioned_cluster.data_volume_kms_key_id
== f"arn:aws:kms:{AWS_REGION_US_EAST_1}:123456789012:key/a7ca56d5-0768-4b64-a670-339a9fbef81c"
)
assert (
kafka.clusters[cluster_arn].encryption_in_transit.client_broker
== "TLS_PLAINTEXT"
provisioned_cluster.encryption_in_transit.client_broker == "TLS_PLAINTEXT"
)
assert kafka.clusters[cluster_arn].encryption_in_transit.in_cluster
assert kafka.clusters[cluster_arn].enhanced_monitoring == "DEFAULT"
assert kafka.clusters[cluster_arn].tls_authentication
assert kafka.clusters[cluster_arn].public_access
assert not kafka.clusters[cluster_arn].unauthentication_access
assert provisioned_cluster.encryption_in_transit.in_cluster
assert provisioned_cluster.enhanced_monitoring == "DEFAULT"
assert provisioned_cluster.tls_authentication
assert provisioned_cluster.public_access
assert not provisioned_cluster.unauthentication_access
# Check serverless cluster
serverless_cluster_arn = f"arn:aws:kafka:{AWS_REGION_US_EAST_1}:123456789012:cluster/serverless-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert serverless_cluster_arn in kafka.clusters
serverless_cluster = kafka.clusters[serverless_cluster_arn]
assert serverless_cluster.id == "6357e0b2-0e6a-4b86-a0b4-70df934c2e31-6"
assert serverless_cluster.arn == serverless_cluster_arn
assert serverless_cluster.name == "serverless-cluster-1"
assert serverless_cluster.region == AWS_REGION_US_EAST_1
assert serverless_cluster.tags == []
assert serverless_cluster.state == "ACTIVE"
assert serverless_cluster.kafka_version == "SERVERLESS"
assert serverless_cluster.data_volume_kms_key_id == "AWS_MANAGED"
assert serverless_cluster.encryption_in_transit.client_broker == "TLS"
assert serverless_cluster.encryption_in_transit.in_cluster
assert serverless_cluster.enhanced_monitoring == "DEFAULT"
assert serverless_cluster.tls_authentication
assert not serverless_cluster.public_access
assert not serverless_cluster.unauthentication_access
# Kafka versions assertions
assert len(kafka.kafka_versions) == 2
assert kafka.kafka_versions[0].version == "1.0.0"
+15
View File
@@ -2,6 +2,21 @@
All notable changes to the **Prowler UI** are documented in this file.
## [1.12.2] (Prowler v5.12.2)
### 🐞 Fixed
- Handle 4XX errors consistently and 204 responses properly[(#8722)](https://github.com/prowler-cloud/prowler/pull/8722)
- Scrolling during Lighthouse AI response streaming [(#8669)](https://github.com/prowler-cloud/prowler/pull/8669)
## [1.12.1] (Prowler v5.12.1)
### 🐞 Fixed
- Field-level email validation message [(#8698)](https://github.com/prowler-cloud/prowler/pull/8698)
- POST method on auth form [(#8699)](https://github.com/prowler-cloud/prowler/pull/8699)
## [1.12.0] (Prowler v5.12.0)
### 🚀 Added
+1 -1
View File
@@ -35,7 +35,7 @@ export async function authenticate(
message: "Credentials error",
errors: {
...defaultValues,
credentials: "Incorrect email or password",
credentials: "Invalid email or password",
},
};
case "CallbackRouteError":
+1 -2
View File
@@ -159,8 +159,7 @@ export const updateRole = async (formData: FormData, roleId: string) => {
manage_providers: formData.get("manage_providers") === "true",
manage_account: formData.get("manage_account") === "true",
manage_scans: formData.get("manage_scans") === "true",
// TODO: Add back when we have integrations ready
// manage_integrations: formData.get("manage_integrations") === "true",
manage_integrations: formData.get("manage_integrations") === "true",
unlimited_visibility: formData.get("unlimited_visibility") === "true",
},
relationships: {},
+17 -17
View File
@@ -12,7 +12,7 @@ import { authenticate, createNewUser } from "@/actions/auth";
import { initiateSamlAuth } from "@/actions/integrations/saml";
import { PasswordRequirementsMessage } from "@/components/auth/oss/password-validator";
import { SocialButtons } from "@/components/auth/oss/social-buttons";
import { NotificationIcon, ProwlerExtended } from "@/components/icons";
import { ProwlerExtended } from "@/components/icons";
import { ThemeSwitch } from "@/components/ThemeSwitch";
import { useToast } from "@/components/ui";
import { CustomButton, CustomInput } from "@/components/ui/custom";
@@ -65,6 +65,8 @@ export const AuthForm = ({
const form = useForm<z.infer<typeof formSchema>>({
resolver: zodResolver(formSchema),
mode: "onSubmit",
reValidateMode: "onSubmit",
defaultValues: {
email: "",
password: "",
@@ -111,10 +113,11 @@ export const AuthForm = ({
if (result?.message === "Success") {
router.push("/");
} else if (result?.errors && "credentials" in result.errors) {
form.setError("email", {
type: "server",
message: result.errors.credentials ?? "Incorrect email or password",
});
const message =
result.errors.credentials ?? "Invalid email or password";
form.setError("email", { type: "server", message });
form.setError("password", { type: "server", message });
} else if (result?.message === "User email is not verified") {
router.push("/email-verification");
} else {
@@ -144,7 +147,8 @@ export const AuthForm = ({
} else {
newUser.errors.forEach((error: ApiError) => {
const errorMessage = error.detail;
switch (error.source.pointer) {
const pointer = error.source?.pointer;
switch (pointer) {
case "/data/attributes/name":
form.setError("name", { type: "server", message: errorMessage });
break;
@@ -206,6 +210,8 @@ export const AuthForm = ({
<Form {...form}>
<form
noValidate
method="post"
className="flex flex-col gap-4"
onSubmit={form.handleSubmit(onSubmit)}
>
@@ -237,7 +243,8 @@ export const AuthForm = ({
label="Email"
placeholder="Enter your email"
isInvalid={!!form.formState.errors.email}
showFormMessage={type !== "sign-in"}
// Always show field validation message, including on sign-in
showFormMessage
/>
{!isSamlMode && (
<>
@@ -245,10 +252,8 @@ export const AuthForm = ({
control={form.control}
name="password"
password
isInvalid={
!!form.formState.errors.password ||
!!form.formState.errors.email
}
// Only mark invalid when the password field has an error
isInvalid={!!form.formState.errors.password}
/>
{type === "sign-up" && (
<PasswordRequirementsMessage
@@ -319,12 +324,7 @@ export const AuthForm = ({
)}
</>
)}
{type === "sign-in" && form.formState.errors?.email && (
<div className="flex flex-row items-center text-system-error">
<NotificationIcon size={16} />
<p className="text-small">Invalid email or password</p>
</div>
)}
<CustomButton
type="submit"
ariaLabel={type === "sign-in" ? "Log in" : "Sign up"}
@@ -53,7 +53,8 @@ export const SendInvitationForm = ({
if (data?.errors && data.errors.length > 0) {
data.errors.forEach((error: ApiError) => {
const errorMessage = error.detail;
switch (error.source.pointer) {
const pointer = error.source?.pointer;
switch (pointer) {
case "/data/attributes/email":
form.setError("email", {
type: "server",
-10
View File
@@ -146,16 +146,6 @@ export const Chat = ({ hasConfig, isActive }: ChatProps) => {
return () => document.removeEventListener("keydown", handleKeyDown);
}, [messageValue, onFormSubmit]);
useEffect(() => {
if (messagesContainerRef.current && latestUserMsgRef.current) {
const container = messagesContainerRef.current;
const userMsg = latestUserMsgRef.current;
const containerPadding = 16; // p-4 in Tailwind = 16px
container.scrollTop =
userMsg.offsetTop - container.offsetTop - containerPadding;
}
}, [messages]);
const suggestedActions: SuggestedAction[] = [
{
title: "Are there any exposed S3",
@@ -69,7 +69,8 @@ export const AddGroupForm = ({
if (data?.errors && data.errors.length > 0) {
data.errors.forEach((error: ApiError) => {
const errorMessage = error.detail;
switch (error.source.pointer) {
const pointer = error.source?.pointer;
switch (pointer) {
case "/data/attributes/name":
form.setError("name", {
type: "server",
@@ -105,7 +105,8 @@ export const EditGroupForm = ({
if (data?.errors && data.errors.length > 0) {
data.errors.forEach((error: ApiError) => {
const errorMessage = error.detail;
switch (error.source.pointer) {
const pointer = error.source?.pointer;
switch (pointer) {
case "/data/attributes/name":
form.setError("name", {
type: "server",
@@ -17,7 +17,7 @@ import {
CustomInput,
} from "@/components/ui/custom";
import { Form } from "@/components/ui/form";
import { permissionFormFields } from "@/lib";
import { getErrorMessage, permissionFormFields } from "@/lib";
import { addRoleFormSchema, ApiError } from "@/types";
type FormValues = z.infer<typeof addRoleFormSchema>;
@@ -113,7 +113,8 @@ export const AddRoleForm = ({
if (data?.errors && data.errors.length > 0) {
data.errors.forEach((error: ApiError) => {
const errorMessage = error.detail;
switch (error.source.pointer) {
const pointer = error.source?.pointer;
switch (pointer) {
case "/data/attributes/name":
form.setError("name", {
type: "server",
@@ -139,7 +140,7 @@ export const AddRoleForm = ({
toast({
variant: "destructive",
title: "Error",
description: "An unexpected error occurred. Please try again.",
description: getErrorMessage(error),
});
}
};
@@ -17,7 +17,7 @@ import {
CustomInput,
} from "@/components/ui/custom";
import { Form } from "@/components/ui/form";
import { permissionFormFields } from "@/lib";
import { getErrorMessage, permissionFormFields } from "@/lib";
import { ApiError, editRoleFormSchema } from "@/types";
type FormValues = z.infer<typeof editRoleFormSchema>;
@@ -133,7 +133,8 @@ export const EditRoleForm = ({
if (data?.errors && data.errors.length > 0) {
data.errors.forEach((error: ApiError) => {
const errorMessage = error.detail;
switch (error.source.pointer) {
const pointer = error.source?.pointer;
switch (pointer) {
case "/data/attributes/name":
form.setError("name", {
type: "server",
@@ -159,7 +160,7 @@ export const EditRoleForm = ({
toast({
variant: "destructive",
title: "Error",
description: "An unexpected error occurred. Please try again.",
description: getErrorMessage(error),
});
}
};
+2 -1
View File
@@ -19,7 +19,8 @@ export const useFormServerErrors = <T extends Record<string, any>>(
) => {
errors.forEach((error: ApiError) => {
const errorMessage = error.detail;
const fieldName = errorMapping?.[error.source.pointer];
const pointer = error.source?.pointer;
const fieldName = pointer ? errorMapping?.[pointer] : undefined;
if (fieldName && fieldName in form.formState.defaultValues!) {
form.setError(fieldName as any, {
+49 -9
View File
@@ -348,10 +348,27 @@ export const handleApiResponse = async (
parse = true,
) => {
if (!response.ok) {
const errorData = await response.json().catch(() => null);
const errorDetail = errorData?.errors?.[0]?.detail;
// Read error body safely; prefer JSON, fallback to plain text
const rawErrorText = await response.text().catch(() => "");
let errorData: any = null;
try {
errorData = rawErrorText ? JSON.parse(rawErrorText) : null;
} catch {
errorData = null;
}
// Special handling for server errors (500+)
const errorsArray = Array.isArray(errorData?.errors)
? (errorData.errors as any[])
: undefined;
const errorDetail =
errorsArray?.[0]?.detail ||
errorData?.error ||
errorData?.message ||
(rawErrorText && rawErrorText.trim()) ||
response.statusText ||
"Oops! Something went wrong.";
//5XX errors
if (response.status >= 500) {
throw new Error(
errorDetail ||
@@ -359,14 +376,37 @@ export const handleApiResponse = async (
);
}
// Client errors (4xx)
throw new Error(
errorDetail ||
`Request failed (${response.status}): ${response.statusText}`,
);
return errorsArray
? { error: errorDetail, errors: errorsArray, status: response.status }
: ({ error: errorDetail, status: response.status } as any);
}
const data = await response.json();
// Handle empty or no-content responses gracefully (e.g., 204, empty body)
if (response.status === 204) {
if (pathToRevalidate && pathToRevalidate !== "") {
revalidatePath(pathToRevalidate);
}
return { success: true, status: response.status } as any;
}
// Read raw text to determine if there's a body to parse
const rawText = await response.text();
const hasBody = rawText && rawText.trim().length > 0;
if (!hasBody) {
if (pathToRevalidate && pathToRevalidate !== "") {
revalidatePath(pathToRevalidate);
}
return { success: true, status: response.status } as any;
}
let data: any;
try {
data = JSON.parse(rawText);
} catch (e) {
// If body isn't valid JSON, return as text payload
data = { data: rawText };
}
if (pathToRevalidate && pathToRevalidate !== "") {
revalidatePath(pathToRevalidate);
+4 -6
View File
@@ -48,22 +48,20 @@ test.describe("Login Flow", () => {
test("should handle empty form submission", async ({ page }) => {
// Submit empty form
await submitLoginForm(page);
await verifyLoginError(page, ERROR_MESSAGES.INVALID_CREDENTIALS);
await verifyLoginError(page, ERROR_MESSAGES.INVALID_EMAIL);
// Verify we're still on login page
await expect(page).toHaveURL(URLS.LOGIN);
});
/*
TODO: This test is failing, need UI work before.
test("should validate email format", async ({ page }) => {
// Attempt login with invalid email format
await login(page, TEST_CREDENTIALS.INVALID_EMAIL_FORMAT);
// Verify error message (application shows generic error for invalid email format too)
await verifyLoginError(page, ERROR_MESSAGES.INVALID_CREDENTIALS);
// Verify field-level email validation message
await verifyLoginError(page, ERROR_MESSAGES.INVALID_EMAIL);
// Verify we're still on login page
await expect(page).toHaveURL(URLS.LOGIN);
});
*/
test("should toggle SAML SSO mode", async ({ page }) => {
// Toggle to SAML mode
+3 -1
View File
@@ -2,6 +2,7 @@ import { Page, expect } from "@playwright/test";
export const ERROR_MESSAGES = {
INVALID_CREDENTIALS: "Invalid email or password",
INVALID_EMAIL: "Please enter a valid email address.",
} as const;
export const URLS = {
@@ -69,7 +70,8 @@ export async function verifyLoginError(
page: Page,
errorMessage = "Invalid email or password",
) {
await expect(page.getByText(errorMessage)).toBeVisible();
// There may be multiple field-level errors with the same text; assert at least one is visible
await expect(page.getByText(errorMessage).first()).toBeVisible();
await expect(page).toHaveURL("/sign-in");
}
+6 -1
View File
@@ -96,7 +96,12 @@ export const authFormSchema = (type: string) =>
}),
// Fields for Sign In and Sign Up
email: z.string().email(),
// Trim and normalize email, and provide consistent message
email: z
.string()
.trim()
.toLowerCase()
.email({ message: "Please enter a valid email address." }),
password: type === "sign-in" ? z.string() : validatePassword(),
isSamlMode: z.boolean().optional(),
})