Compare commits

...

48 Commits
4.2.1 ... v4.2

Author SHA1 Message Date
William Leung
a46ea6a447 fix(config/html): handle encoding issues and improve error handling in config and HTML file loading functions (#4203)
Co-authored-by: Sergio <sergio@prowler.com>
2024-06-07 13:00:08 -04:00
Rubén De la Torre Vico
66199ee722 chore(acm): Improve near-expiration certificates check (#4207)
Co-authored-by: Sergio <sergio@prowler.com>
2024-06-07 13:00:03 -04:00
Sergio Garcia
d6ac438c0b fix(compliance): check if custom check has compliance metadata (#4208) 2024-06-07 12:59:56 -04:00
Seiji Ujihira
a3a43459e7 fix(custom): execute custom checks (#4202) 2024-06-07 12:59:49 -04:00
Pedro Martín
e2c1f9816a fix(dashboard): fix styles in overview page (#4204) 2024-06-07 12:59:41 -04:00
Pedro Martín
cce4a6a124 fix(html): fix status from HTML outputs (#4206) 2024-06-07 12:59:36 -04:00
Pepe Fagoaga
d17bbe7c41 chore(regions_update): Changes in regions for AWS services. (#4205)
Co-authored-by: sergargar <38561120+sergargar@users.noreply.github.com>
2024-06-07 12:59:31 -04:00
github-actions
f93e3a470b chore(release): 4.2.3 2024-06-06 16:13:39 +00:00
Pedro Martín
c8d9a47272 fix(html): resolve html changing finding status (#4199) 2024-06-06 12:05:14 -04:00
Pedro Martín
5d7658efe5 fix(html): handle muted status to html outputs (#4195)
Co-authored-by: Sergio Garcia <38561120+sergargar@users.noreply.github.com>
2024-06-06 12:05:14 -04:00
Pedro Martín
e60d22f3e4 docs(readme): Update checks number (#4197) 2024-06-06 12:05:14 -04:00
Sergio Garcia
0de10c4742 fix(s3): check if account is signed up (#4194) 2024-06-06 08:43:49 -04:00
Sergio Garcia
f7b7ce3b95 fix(glue): check if get dev endpoints call is supported (#4193) 2024-06-06 08:43:39 -04:00
Sergio Garcia
7b43b3d31e fix(elasticache): handle empty cluster subnets (#4192) 2024-06-06 08:43:30 -04:00
Sergio Garcia
84b9c442fe fix(rds): handle not existing parameter values (#4191) 2024-06-06 08:43:19 -04:00
Kay Agahd
a890895e8b docs(index): fix docu about output modes (#4187) 2024-06-05 10:10:11 -04:00
Pedro Martín
f3c6720a1c chore(version): update prowler version (#4190) 2024-06-05 09:11:50 -04:00
Kay Agahd
8c29bbfe4e docs(reporting): fix mapping of json-ocsf field cloud.account.type (#4186) 2024-06-04 17:17:28 -04:00
Pepe Fagoaga
910c969473 refactor(run_check): Simplify and add tests (#4183) 2024-06-04 12:35:57 -04:00
Pedro Martín
2795673ebc fix(html): make Prowler logo resizable (#4185) 2024-06-04 11:57:41 -04:00
Pedro Martín
dc510e0683 fix(html): add correct color for manual findings (#4184) 2024-06-04 11:57:22 -04:00
Pepe Fagoaga
070edc1693 refactor(Slack): create class (#4127) 2024-06-04 10:54:12 -04:00
dependabot[bot]
8645ee20c3 chore(deps): bump botocore from 1.34.113 to 1.34.118 (#4170)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 14:46:06 +02:00
Pepe Fagoaga
8d4abd7638 chore(regions_update): Changes in regions for AWS services. (#4178)
Co-authored-by: sergargar <38561120+sergargar@users.noreply.github.com>
2024-06-04 12:04:35 +02:00
dependabot[bot]
f4106f4b72 chore(deps-dev): bump moto from 5.0.8 to 5.0.9 (#4169)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 11:44:49 +02:00
dependabot[bot]
4087aaf6cf chore(deps-dev): bump coverage from 7.5.2 to 7.5.3 (#4167)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 10:51:28 +02:00
dependabot[bot]
c3ef0d4ca8 chore(deps): bump google-api-python-client from 2.130.0 to 2.131.0 (#4166)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 10:15:04 +02:00
dependabot[bot]
a1aed37482 chore(deps-dev): bump mkdocs-git-revision-date-localized-plugin from 1.2.5 to 1.2.6 (#4164)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 09:07:20 +02:00
dependabot[bot]
d05a15ef5a chore(deps): bump boto3 from 1.34.109 to 1.34.113 (#4165)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-04 08:34:19 +02:00
dependabot[bot]
ef9d3b902e chore(deps): bump trufflesecurity/trufflehog from 3.76.3 to 3.77.0 (#4163)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-03 18:30:24 -04:00
Sergio Garcia
366bb91a1e fix(cloudtrail): check if trails exist in service (#4161) 2024-06-03 17:05:39 -04:00
Sergio Garcia
0c01cf28c4 fix(trustedadvisor): handle AccessDenied exception (#4158) 2024-06-03 15:15:00 -04:00
Pepe Fagoaga
f895e4df6a fix(cloudtrail): trail.region must be home region (#4153)
Co-authored-by: Sergio <sergio@prowler.com>
2024-06-03 13:19:40 -04:00
Sergio Garcia
2affed81ad fix(rds): use correct API call for cluster parameters (#4150) 2024-06-03 13:19:11 -04:00
Pepe Fagoaga
b33b529e74 refactor(banner): remove unneeded arguments (#4155) 2024-06-03 14:44:14 +02:00
Sergio Garcia
0bbb762c74 chore(favicon): update favicon logo (#4151)
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2024-06-03 12:01:30 +02:00
Pedro Martín
ec5fb035b1 fix(dependencies): ignore jinja vulnerability (#4154) 2024-06-03 10:07:00 +02:00
Kay Agahd
e45a189422 chore(AWS): allow ingress to any port for user defined network interface types (#4094)
Co-authored-by: Sergio <sergio@prowler.com>
2024-05-31 13:37:52 -04:00
Pepe Fagoaga
b2b66bd080 fix(mutelist): Split code for AWS and the rest of providers (#4143) 2024-05-31 10:06:01 -04:00
Pepe Fagoaga
b905d73b82 fix(rds): Handle DBParameterGroupNotFound (#4148)
Co-authored-by: Sergio <sergio@prowler.com>
2024-05-31 10:01:01 -04:00
rieck-srlabs
6ed3167e17 chore(iam): Downgrade AWS IAM check severity (#4149) 2024-05-31 09:16:50 -04:00
Rubén De la Torre Vico
3a2fea7136 fix(defender): Add new parameter required by new API version (#4147) 2024-05-31 12:40:48 +02:00
Sergio Garcia
212ff2439e chore(ec2): add scan unused services logic to SG check (#4138) 2024-05-30 11:51:17 -04:00
Pepe Fagoaga
7b2a7faf6b fix(mutelist): return False if something fails (#4139) 2024-05-30 11:25:13 -04:00
Sergio Garcia
2725d476a4 chore(vpc): add scan unused services logic to VPC checks (#4137) 2024-05-30 10:59:48 -04:00
Sergio Garcia
dfa940440c chore(version): update Prowler version (#4131) 2024-05-30 15:43:20 +02:00
rieck-srlabs
862bc8cae8 chore(cloudformation): Update related URL (#4134) 2024-05-30 09:25:34 -04:00
Pepe Fagoaga
a51bdef083 fix(mutelist): Handle items starting by * (#4136) 2024-05-30 15:04:08 +02:00
83 changed files with 3325 additions and 1143 deletions

View File

@@ -11,7 +11,7 @@ jobs:
with:
fetch-depth: 0
- name: TruffleHog OSS
uses: trufflesecurity/trufflehog@v3.76.3
uses: trufflesecurity/trufflehog@v3.77.0
with:
path: ./
base: ${{ github.event.repository.default_branch }}

View File

@@ -73,7 +73,7 @@ jobs:
- name: Safety
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry run safety check --ignore 67599
poetry run safety check --ignore 67599 --ignore 70612
- name: Vulture
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |

View File

@@ -97,7 +97,7 @@ repos:
- id: safety
name: safety
description: "Safety is a tool that checks your installed dependencies for known security vulnerabilities"
entry: bash -c 'safety check --ignore 67599'
entry: bash -c 'safety check --ignore 67599 --ignore 70612'
language: system
- id: vulture

View File

@@ -60,7 +60,7 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) |
|---|---|---|---|---|
| AWS | 359 | 66 -> `prowler aws --list-services` | 28 -> `prowler aws --list-compliance` | 7 -> `prowler aws --list-categories` |
| AWS | 360 | 66 -> `prowler aws --list-services` | 28 -> `prowler aws --list-compliance` | 7 -> `prowler aws --list-categories` |
| GCP | 77 | 13 -> `prowler gcp --list-services` | 1 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
| Azure | 127 | 16 -> `prowler azure --list-services` | 2 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | 1 -> `prowler kubernetes --list-compliance` | 7 -> `prowler kubernetes --list-categories` |

View File

@@ -16,7 +16,7 @@ from prowler.lib.banner import print_banner
warnings.filterwarnings("ignore")
cli = sys.modules["flask.cli"]
print_banner(verbose=False)
print_banner()
print(
f"{Fore.GREEN}Loading all CSV files from the folder {folder_path_overview} ...\n{Style.RESET_ALL}"
)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -21,7 +21,7 @@ muted_manual_color = "#b33696"
critical_color = "#951649"
high_color = "#e11d48"
medium_color = "#ee6f15"
low_color = "#f9f5e6"
low_color = "#fcf45d"
informational_color = "#3274d9"
# Folder output path

View File

@@ -945,7 +945,7 @@ def filter_data(
color_mapping_status = {
"FAIL": fail_color,
"PASS": pass_color,
"INFO": info_color,
"LOW": info_color,
"MANUAL": manual_color,
"WARNING": muted_fail_color,
"MUTED (FAIL)": muted_fail_color,
@@ -1564,7 +1564,10 @@ def generate_table(data, index, color_mapping_severity, color_mapping_status):
data.get(
"FINDING_UID", ""
)
)
),
style={
"margin-left": "5px"
},
),
],
style={"display": "flex"},
@@ -1644,28 +1647,10 @@ def generate_table(data, index, color_mapping_severity, color_mapping_status):
"STATUS_EXTENDED",
"",
)
)
),
],
style={"display": "flex"},
),
html.Div(
[
html.P(
html.Strong(
"Risk: ",
style={
"margin-right": "5px"
},
)
),
html.P(
str(
data.get(
"RISK",
"",
)
)
),
style={
"margin-left": "5px"
},
),
],
style={"display": "flex"},
@@ -1689,7 +1674,10 @@ def generate_table(data, index, color_mapping_severity, color_mapping_status):
)
),
html.P(
str(data.get("RISK", ""))
str(data.get("RISK", "")),
style={
"margin-left": "5px"
},
),
],
style={"display": "flex"},
@@ -1744,7 +1732,10 @@ def generate_table(data, index, color_mapping_severity, color_mapping_status):
"REMEDIATION_RECOMMENDATION_TEXT",
"",
)
)
),
style={
"margin-left": "5px"
},
),
],
style={"display": "flex"},
@@ -1772,7 +1763,10 @@ def generate_table(data, index, color_mapping_severity, color_mapping_status):
"",
)
),
style={"color": "#3182ce"},
style={
"color": "#3182ce",
"margin-left": "5px",
},
),
],
style={"display": "flex"},

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -212,10 +212,10 @@ prowler <provider>
If you miss the former output you can use `--verbose` but Prowler v4 is smoking fast, so you won't see much ;
By default, Prowler will generate a CSV, JSON and HTML reports, however you can generate a JSON-ASFF (used by AWS Security Hub) report with `-M` or `--output-modes`:
By default, Prowler generates CSV, JSON-OCSF and HTML reports. However, you can generate a JSON-ASFF report (used by AWS Security Hub) with `-M` or `--output-modes`:
```console
prowler <provider> -M csv json json-asff html
prowler <provider> -M csv json-asff json-ocsf html
```
The html report will be located in the output directory as the other files and it will look like:

View File

@@ -29,17 +29,22 @@ The following list includes all the AWS checks with configurable variables that
| `organizations_delegated_administrators` | `organizations_trusted_delegated_administrators` | List of Strings |
| `ecr_repositories_scan_vulnerabilities_in_latest_image` | `ecr_repository_vulnerability_minimum_severity` | String |
| `trustedadvisor_premium_support_plan_subscribed` | `verify_premium_support_plans` | Boolean |
| `config_recorder_all_regions_enabled` | `mute_non_default_regions` | Boolean |
| `drs_job_exist` | `mute_non_default_regions` | Boolean |
| `guardduty_is_enabled` | `mute_non_default_regions` | Boolean |
| `securityhub_enabled` | `mute_non_default_regions` | Boolean |
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_entropy` | Integer |
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_minutes` | Integer |
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_actions` | List of Strings |
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_entropy` | Integer |
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_minutes` | Integer |
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_actions` | List of Strings |
| `rds_instance_backup_enabled` | `check_rds_instance_replicas` | Boolean |
| `config_recorder_all_regions_enabled` | `mute_non_default_regions` | Boolean |
| `drs_job_exist` | `mute_non_default_regions` | Boolean |
| `guardduty_is_enabled` | `mute_non_default_regions` | Boolean |
| `securityhub_enabled` | `mute_non_default_regions` | Boolean |
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_entropy` | Integer |
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_minutes` | Integer |
| `cloudtrail_threat_detection_privilege_escalation` | `threat_detection_privilege_escalation_actions` | List of Strings |
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_entropy` | Integer |
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_minutes` | Integer |
| `cloudtrail_threat_detection_enumeration` | `threat_detection_enumeration_actions` | List of Strings |
| `rds_instance_backup_enabled` | `check_rds_instance_replicas` | Boolean |
| `ec2_securitygroup_allow_ingress_from_internet_to_any_port` | `ec2_allowed_interface_types` | List of Strings |
| `ec2_securitygroup_allow_ingress_from_internet_to_any_port` | `ec2_allowed_instance_owners` | List of Strings |
| `acm_certificates_expiration_check` | `days_to_expire_threshold` | Integer |
## Azure
### Configurable Checks
@@ -78,10 +83,20 @@ The following list includes all the Azure checks with configurable variables tha
```yaml title="config.yaml"
# AWS Configuration
aws:
# AWS Global Configuration
# aws.mute_non_default_regions --> Mute Failed Findings in non-default regions for GuardDuty, SecurityHub, DRS and Config
# aws.mute_non_default_regions --> Set to True to muted failed findings in non-default regions for AccessAnalyzer, GuardDuty, SecurityHub, DRS and Config
mute_non_default_regions: False
# If you want to mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w mutelist.yaml`:
# Mutelist:
# Accounts:
# "*":
# Checks:
# "*":
# Regions:
# - "ap-southeast-1"
# - "ap-southeast-2"
# Resources:
# - "*"
# AWS IAM Configuration
# aws.iam_user_accesskey_unused --> CIS recommends 45 days
@@ -91,11 +106,24 @@ aws:
# AWS EC2 Configuration
# aws.ec2_elastic_ip_shodan
# TODO: create common config
shodan_api_key: null
# aws.ec2_securitygroup_with_many_ingress_egress_rules --> by default is 50 rules
max_security_group_rules: 50
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
max_ec2_instance_age_in_days: 180
# aws.ec2_securitygroup_allow_ingress_from_internet_to_any_port
# allowed network interface types for security groups open to the Internet
ec2_allowed_interface_types:
[
"api_gateway_managed",
"vpc_endpoint",
]
# allowed network interface owners for security groups open to the Internet
ec2_allowed_instance_owners:
[
"amazon-elb"
]
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
@@ -119,205 +147,222 @@ aws:
# aws.awslambda_function_using_supported_runtimes
obsolete_lambda_runtimes:
[
"java8",
"go1.x",
"provided",
"python3.6",
"python2.7",
"python3.7",
"nodejs4.3",
"nodejs4.3-edge",
"nodejs6.10",
"nodejs",
"nodejs8.10",
"nodejs10.x",
"nodejs12.x",
"nodejs14.x",
"dotnet5.0",
"dotnetcore1.0",
"dotnetcore2.0",
"dotnetcore2.1",
"dotnetcore3.1",
"ruby2.5",
"ruby2.7",
]
# AWS Organizations
# organizations_scp_check_deny_regions
# organizations_enabled_regions: [
# 'eu-central-1',
# 'eu-west-1',
# aws.organizations_scp_check_deny_regions
# aws.organizations_enabled_regions: [
# "eu-central-1",
# "eu-west-1",
# "us-east-1"
# ]
organizations_enabled_regions: []
organizations_trusted_delegated_administrators: []
# AWS ECR
# ecr_repositories_scan_vulnerabilities_in_latest_image
# aws.ecr_repositories_scan_vulnerabilities_in_latest_image
# CRITICAL
# HIGH
# MEDIUM
ecr_repository_vulnerability_minimum_severity: "MEDIUM"
# AWS Trusted Advisor
# trustedadvisor_premium_support_plan_subscribed
# aws.trustedadvisor_premium_support_plan_subscribed
verify_premium_support_plans: True
# AWS CloudTrail Configuration
# aws.cloudtrail_threat_detection_privilege_escalation
threat_detection_privilege_escalation_entropy: 0.7 # Percentage of actions found to decide if it is an privilege_escalation attack event, by default is 0.7 (70%)
threat_detection_privilege_escalation_threshold: 0.1 # Percentage of actions found to decide if it is an privilege_escalation attack event, by default is 0.1 (10%)
threat_detection_privilege_escalation_minutes: 1440 # Past minutes to search from now for privilege_escalation attacks, by default is 1440 minutes (24 hours)
threat_detection_privilege_escalation_actions: [
"AddPermission",
"AddRoleToInstanceProfile",
"AddUserToGroup",
"AssociateAccessPolicy",
"AssumeRole",
"AttachGroupPolicy",
"AttachRolePolicy",
"AttachUserPolicy",
"ChangePassword",
"CreateAccessEntry",
"CreateAccessKey",
"CreateDevEndpoint",
"CreateEventSourceMapping",
"CreateFunction",
"CreateGroup",
"CreateJob",
"CreateKeyPair",
"CreateLoginProfile",
"CreatePipeline",
"CreatePolicyVersion",
"CreateRole",
"CreateStack",
"DeleteRolePermissionsBoundary",
"DeleteRolePolicy",
"DeleteUserPermissionsBoundary",
"DeleteUserPolicy",
"DetachRolePolicy",
"DetachUserPolicy",
"GetCredentialsForIdentity",
"GetId",
"GetPolicyVersion",
"GetUserPolicy",
"Invoke",
"ModifyInstanceAttribute",
"PassRole",
"PutGroupPolicy",
"PutPipelineDefinition",
"PutRolePermissionsBoundary",
"PutRolePolicy",
"PutUserPermissionsBoundary",
"PutUserPolicy",
"ReplaceIamInstanceProfileAssociation",
"RunInstances",
"SetDefaultPolicyVersion",
"UpdateAccessKey",
"UpdateAssumeRolePolicy",
"UpdateDevEndpoint",
"UpdateEventSourceMapping",
"UpdateFunctionCode",
"UpdateJob",
"UpdateLoginProfile",
]
threat_detection_privilege_escalation_actions:
[
"AddPermission",
"AddRoleToInstanceProfile",
"AddUserToGroup",
"AssociateAccessPolicy",
"AssumeRole",
"AttachGroupPolicy",
"AttachRolePolicy",
"AttachUserPolicy",
"ChangePassword",
"CreateAccessEntry",
"CreateAccessKey",
"CreateDevEndpoint",
"CreateEventSourceMapping",
"CreateFunction",
"CreateGroup",
"CreateJob",
"CreateKeyPair",
"CreateLoginProfile",
"CreatePipeline",
"CreatePolicyVersion",
"CreateRole",
"CreateStack",
"DeleteRolePermissionsBoundary",
"DeleteRolePolicy",
"DeleteUserPermissionsBoundary",
"DeleteUserPolicy",
"DetachRolePolicy",
"DetachUserPolicy",
"GetCredentialsForIdentity",
"GetId",
"GetPolicyVersion",
"GetUserPolicy",
"Invoke",
"ModifyInstanceAttribute",
"PassRole",
"PutGroupPolicy",
"PutPipelineDefinition",
"PutRolePermissionsBoundary",
"PutRolePolicy",
"PutUserPermissionsBoundary",
"PutUserPolicy",
"ReplaceIamInstanceProfileAssociation",
"RunInstances",
"SetDefaultPolicyVersion",
"UpdateAccessKey",
"UpdateAssumeRolePolicy",
"UpdateDevEndpoint",
"UpdateEventSourceMapping",
"UpdateFunctionCode",
"UpdateJob",
"UpdateLoginProfile",
]
# aws.cloudtrail_threat_detection_enumeration
threat_detection_enumeration_entropy: 0.7 # Percentage of actions found to decide if it is an enumeration attack event, by default is 0.7 (70%)
threat_detection_enumeration_threshold: 0.1 # Percentage of actions found to decide if it is an enumeration attack event, by default is 0.1 (10%)
threat_detection_enumeration_minutes: 1440 # Past minutes to search from now for enumeration attacks, by default is 1440 minutes (24 hours)
threat_detection_enumeration_actions: [
"DescribeAccessEntry",
"DescribeAccountAttributes",
"DescribeAvailabilityZones",
"DescribeBundleTasks",
"DescribeCarrierGateways",
"DescribeClientVpnRoutes",
"DescribeCluster",
"DescribeDhcpOptions",
"DescribeFlowLogs",
"DescribeImages",
"DescribeInstanceAttribute",
"DescribeInstanceInformation",
"DescribeInstanceTypes",
"DescribeInstances",
"DescribeInstances",
"DescribeKeyPairs",
"DescribeLogGroups",
"DescribeLogStreams",
"DescribeOrganization",
"DescribeRegions",
"DescribeSecurityGroups",
"DescribeSnapshotAttribute",
"DescribeSnapshotTierStatus",
"DescribeSubscriptionFilters",
"DescribeTransitGatewayMulticastDomains",
"DescribeVolumes",
"DescribeVolumesModifications",
"DescribeVpcEndpointConnectionNotifications",
"DescribeVpcs",
"GetAccount",
"GetAccountAuthorizationDetails",
"GetAccountSendingEnabled",
"GetBucketAcl",
"GetBucketLogging",
"GetBucketPolicy",
"GetBucketReplication",
"GetBucketVersioning",
"GetCallerIdentity",
"GetCertificate",
"GetConsoleScreenshot",
"GetCostAndUsage",
"GetDetector",
"GetEbsDefaultKmsKeyId",
"GetEbsEncryptionByDefault",
"GetFindings",
"GetFlowLogsIntegrationTemplate",
"GetIdentityVerificationAttributes",
"GetInstances",
"GetIntrospectionSchema",
"GetLaunchTemplateData",
"GetLaunchTemplateData",
"GetLogRecord",
"GetParameters",
"GetPolicyVersion",
"GetPublicAccessBlock",
"GetQueryResults",
"GetRegions",
"GetSMSAttributes",
"GetSMSSandboxAccountStatus",
"GetSendQuota",
"GetTransitGatewayRouteTableAssociations",
"GetUserPolicy",
"HeadObject",
"ListAccessKeys",
"ListAccounts",
"ListAllMyBuckets",
"ListAssociatedAccessPolicies",
"ListAttachedUserPolicies",
"ListClusters",
"ListDetectors",
"ListDomains",
"ListFindings",
"ListHostedZones",
"ListIPSets",
"ListIdentities",
"ListInstanceProfiles",
"ListObjects",
"ListOrganizationalUnitsForParent",
"ListOriginationNumbers",
"ListPolicyVersions",
"ListRoles",
"ListRoles",
"ListRules",
"ListServiceQuotas",
"ListSubscriptions",
"ListTargetsByRule",
"ListTopics",
"ListUsers",
"LookupEvents",
"Search",
]
threat_detection_enumeration_actions:
[
"DescribeAccessEntry",
"DescribeAccountAttributes",
"DescribeAvailabilityZones",
"DescribeBundleTasks",
"DescribeCarrierGateways",
"DescribeClientVpnRoutes",
"DescribeCluster",
"DescribeDhcpOptions",
"DescribeFlowLogs",
"DescribeImages",
"DescribeInstanceAttribute",
"DescribeInstanceInformation",
"DescribeInstanceTypes",
"DescribeInstances",
"DescribeInstances",
"DescribeKeyPairs",
"DescribeLogGroups",
"DescribeLogStreams",
"DescribeOrganization",
"DescribeRegions",
"DescribeSecurityGroups",
"DescribeSnapshotAttribute",
"DescribeSnapshotTierStatus",
"DescribeSubscriptionFilters",
"DescribeTransitGatewayMulticastDomains",
"DescribeVolumes",
"DescribeVolumesModifications",
"DescribeVpcEndpointConnectionNotifications",
"DescribeVpcs",
"GetAccount",
"GetAccountAuthorizationDetails",
"GetAccountSendingEnabled",
"GetBucketAcl",
"GetBucketLogging",
"GetBucketPolicy",
"GetBucketReplication",
"GetBucketVersioning",
"GetCallerIdentity",
"GetCertificate",
"GetConsoleScreenshot",
"GetCostAndUsage",
"GetDetector",
"GetEbsDefaultKmsKeyId",
"GetEbsEncryptionByDefault",
"GetFindings",
"GetFlowLogsIntegrationTemplate",
"GetIdentityVerificationAttributes",
"GetInstances",
"GetIntrospectionSchema",
"GetLaunchTemplateData",
"GetLaunchTemplateData",
"GetLogRecord",
"GetParameters",
"GetPolicyVersion",
"GetPublicAccessBlock",
"GetQueryResults",
"GetRegions",
"GetSMSAttributes",
"GetSMSSandboxAccountStatus",
"GetSendQuota",
"GetTransitGatewayRouteTableAssociations",
"GetUserPolicy",
"HeadObject",
"ListAccessKeys",
"ListAccounts",
"ListAllMyBuckets",
"ListAssociatedAccessPolicies",
"ListAttachedUserPolicies",
"ListClusters",
"ListDetectors",
"ListDomains",
"ListFindings",
"ListHostedZones",
"ListIPSets",
"ListIdentities",
"ListInstanceProfiles",
"ListObjects",
"ListOrganizationalUnitsForParent",
"ListOriginationNumbers",
"ListPolicyVersions",
"ListRoles",
"ListRoles",
"ListRules",
"ListServiceQuotas",
"ListSubscriptions",
"ListTargetsByRule",
"ListTopics",
"ListUsers",
"LookupEvents",
"Search",
]
# AWS RDS Configuration
# aws.rds_instance_backup_enabled
# Whether to check RDS instance replicas or not
check_rds_instance_replicas: False
# AWS ACM Configuration
# aws.acm_certificates_expiration_check
days_to_expire_threshold: 7
# Azure Configuration
azure:
# Azure Network Configuration
# azure.network_public_ip_shodan
# TODO: create common config
shodan_api_key: null
# Azure App Configuration
# Azure App Service
# azure.app_ensure_php_version_is_latest
php_latest_version: "8.2"
# azure.app_ensure_python_version_is_latest
@@ -331,4 +376,34 @@ gcp:
# gcp.compute_public_address_shodan
shodan_api_key: null
# Kubernetes Configuration
kubernetes:
# Kubernetes API Server
# kubernetes.apiserver_audit_log_maxbackup_set
audit_log_maxbackup: 10
# kubernetes.apiserver_audit_log_maxsize_set
audit_log_maxsize: 100
# kubernetes.apiserver_audit_log_maxage_set
audit_log_maxage: 30
# kubernetes.apiserver_strong_ciphers_only
apiserver_strong_ciphers:
[
"TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256",
]
# Kubelet
# kubernetes.kubelet_strong_ciphers_only
kubelet_strong_ciphers:
[
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_128_GCM_SHA256",
]
```

View File

@@ -125,7 +125,7 @@ The JSON-OCSF output format implements the [Detection Finding](https://schema.oc
"product": {
"name": "Prowler",
"vendor_name": "Prowler",
"version": "4.2.0"
"version": "4.2.2"
},
"version": "1.1.0"
},
@@ -333,7 +333,7 @@ The following is the mapping between the native JSON and the Detection Finding f
| --- |---|
| AssessmentStartTime | event_time |
| FindingUniqueId | finding_info.uid |
| Provider | cloud.account.type |
| Provider | cloud.provider |
| CheckID | metadata.event_code |
| CheckTitle | finding_info.title |
| CheckType | unmapped.check_type |

View File

@@ -11,6 +11,12 @@ prowler <provider> --scan-unused-services
## Services that are ignored
### AWS
#### ACM
You can have certificates in ACM that is not in use by any AWS resource.
Prowler will check if every certificate is going to expire soon, if this certificate is not in use by default it is not going to be check if it is expired, is going to expire soon or it is good.
- `acm_certificates_expiration_check`
#### Athena
When you create an AWS Account, Athena will create a default primary workgroup for you.
Prowler will check if that workgroup is enabled and if it is being used by checking if there were queries in the last 45 days.
@@ -30,9 +36,10 @@ If EBS default encyption is not enabled, sensitive information at rest is not pr
- `ec2_ebs_default_encryption`
If your Security groups are not properly configured the attack surface is increased, nonetheless, Prowler will detect those security groups that are being used (they are attached) to only notify those that are being used. This logic applies to the 15 checks related to open ports in security groups.
If your Security groups are not properly configured the attack surface is increased, nonetheless, Prowler will detect those security groups that are being used (they are attached) to only notify those that are being used. This logic applies to the 15 checks related to open ports in security groups and the check for the default security group.
- `ec2_securitygroup_allow_ingress_from_internet_to_port_X` (15 checks)
- `ec2_securitygroup_default_restrict_traffic`
Prowler will also check for used Network ACLs to only alerts those with open ports that are being used.
@@ -69,3 +76,15 @@ You should enable Public Access Block at the account level to prevent the exposu
VPC Flow Logs provide visibility into network traffic that traverses the VPC and can be used to detect anomalous traffic or insight during security workflows. Nevertheless, Prowler will only check if the Flow Logs are enabled for those VPCs that are in use, in other words, only the VPCs where you have ENIs (network interfaces).
- `vpc_flow_logs_enabled`
VPC subnets must not have public IP addresses by default to prevent the exposure of your resources to the internet. Prowler will only check this configuration for those VPCs that are in use, in other words, only the VPCs where you have ENIs (network interfaces).
- `vpc_subnet_no_public_ip_by_default`
VPCs should have separate private and public subnets to prevent the exposure of your resources to the internet. Prowler will only check this configuration for those VPCs that are in use, in other words, only the VPCs where you have ENIs (network interfaces).
- `vpc_subnet_separate_private_public`
VPCs should have subnets in different availability zones to prevent a single point of failure. Prowler will only check this configuration for those VPCs that are in use, in other words, only the VPCs where you have ENIs (network interfaces).
- `vpc_subnet_different_az`

140
poetry.lock generated
View File

@@ -708,17 +708,17 @@ files = [
[[package]]
name = "boto3"
version = "1.34.109"
version = "1.34.113"
description = "The AWS SDK for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "boto3-1.34.109-py3-none-any.whl", hash = "sha256:50a0f24dd737529ae489a3586f260b9220c6aede1ae7851fa4f33878c8805ef8"},
{file = "boto3-1.34.109.tar.gz", hash = "sha256:98d389562e03a46fd79fea5f988e9e6032674a0c3e9e42c06941ec588b7e1070"},
{file = "boto3-1.34.113-py3-none-any.whl", hash = "sha256:7e59f0a848be477a4c98a90e7a18a0e284adfb643f7879d2b303c5f493661b7a"},
{file = "boto3-1.34.113.tar.gz", hash = "sha256:009cd143509f2ff4c37582c3f45d50f28c95eed68e8a5c36641206bdb597a9ea"},
]
[package.dependencies]
botocore = ">=1.34.109,<1.35.0"
botocore = ">=1.34.113,<1.35.0"
jmespath = ">=0.7.1,<2.0.0"
s3transfer = ">=0.10.0,<0.11.0"
@@ -727,13 +727,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
[[package]]
name = "botocore"
version = "1.34.113"
version = "1.34.118"
description = "Low-level, data-driven core of boto 3."
optional = false
python-versions = ">=3.8"
files = [
{file = "botocore-1.34.113-py3-none-any.whl", hash = "sha256:8ca87776450ef41dd25c327eb6e504294230a5756940d68bcfdedc4a7cdeca97"},
{file = "botocore-1.34.113.tar.gz", hash = "sha256:449912ba3c4ded64f21d09d428146dd9c05337b2a112e15511bf2c4888faae79"},
{file = "botocore-1.34.118-py3-none-any.whl", hash = "sha256:e3f6c5636a4394768e81e33a16f5c6ae7f364f512415d423f9b9dc67fc638df4"},
{file = "botocore-1.34.118.tar.gz", hash = "sha256:0a3d1ec0186f8b516deb39474de3d226d531f77f92a0f56ad79b80219db3ae9e"},
]
[package.dependencies]
@@ -999,63 +999,63 @@ files = [
[[package]]
name = "coverage"
version = "7.5.2"
version = "7.5.3"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "coverage-7.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:554c7327bf0fd688050348e22db7c8e163fb7219f3ecdd4732d7ed606b417263"},
{file = "coverage-7.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d0305e02e40c7cfea5d08d6368576537a74c0eea62b77633179748d3519d6705"},
{file = "coverage-7.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:829fb55ad437d757c70d5b1c51cfda9377f31506a0a3f3ac282bc6a387d6a5f1"},
{file = "coverage-7.5.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:894b1acded706f1407a662d08e026bfd0ff1e59e9bd32062fea9d862564cfb65"},
{file = "coverage-7.5.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe76d6dee5e4febefa83998b17926df3a04e5089e3d2b1688c74a9157798d7a2"},
{file = "coverage-7.5.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c7ebf2a37e4f5fea3c1a11e1f47cea7d75d0f2d8ef69635ddbd5c927083211fc"},
{file = "coverage-7.5.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20e611fc36e1a0fc7bbf957ef9c635c8807d71fbe5643e51b2769b3cc0fb0b51"},
{file = "coverage-7.5.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7c5c5b7ae2763533152880d5b5b451acbc1089ade2336b710a24b2b0f5239d20"},
{file = "coverage-7.5.2-cp310-cp310-win32.whl", hash = "sha256:1e4225990a87df898e40ca31c9e830c15c2c53b1d33df592bc8ef314d71f0281"},
{file = "coverage-7.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:976cd92d9420e6e2aa6ce6a9d61f2b490e07cb468968adf371546b33b829284b"},
{file = "coverage-7.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5997d418c219dcd4dcba64e50671cca849aaf0dac3d7a2eeeb7d651a5bd735b8"},
{file = "coverage-7.5.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ec27e93bbf5976f0465e8936f02eb5add99bbe4e4e7b233607e4d7622912d68d"},
{file = "coverage-7.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f11f98753800eb1ec872562a398081f6695f91cd01ce39819e36621003ec52a"},
{file = "coverage-7.5.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e34680049eecb30b6498784c9637c1c74277dcb1db75649a152f8004fbd6646"},
{file = "coverage-7.5.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e12536446ad4527ac8ed91d8a607813085683bcce27af69e3b31cd72b3c5960"},
{file = "coverage-7.5.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3d3f7744b8a8079d69af69d512e5abed4fb473057625588ce126088e50d05493"},
{file = "coverage-7.5.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:431a3917e32223fcdb90b79fe60185864a9109631ebc05f6c5aa03781a00b513"},
{file = "coverage-7.5.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a7c6574225f34ce45466f04751d957b5c5e6b69fca9351db017c9249786172ce"},
{file = "coverage-7.5.2-cp311-cp311-win32.whl", hash = "sha256:2b144d142ec9987276aeff1326edbc0df8ba4afbd7232f0ca10ad57a115e95b6"},
{file = "coverage-7.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:900532713115ac58bc3491b9d2b52704a05ed408ba0918d57fd72c94bc47fba1"},
{file = "coverage-7.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9a42970ce74c88bdf144df11c52c5cf4ad610d860de87c0883385a1c9d9fa4ab"},
{file = "coverage-7.5.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:26716a1118c6ce2188283b4b60a898c3be29b480acbd0a91446ced4fe4e780d8"},
{file = "coverage-7.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60b66b0363c5a2a79fba3d1cd7430c25bbd92c923d031cae906bdcb6e054d9a2"},
{file = "coverage-7.5.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d22eba19273b2069e4efeff88c897a26bdc64633cbe0357a198f92dca94268"},
{file = "coverage-7.5.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3bb5b92a0ab3d22dfdbfe845e2fef92717b067bdf41a5b68c7e3e857c0cff1a4"},
{file = "coverage-7.5.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1aef719b6559b521ae913ddeb38f5048c6d1a3d366865e8b320270b7bc4693c2"},
{file = "coverage-7.5.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8809c0ea0e8454f756e3bd5c36d04dddf222989216788a25bfd6724bfcee342c"},
{file = "coverage-7.5.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1acc2e2ef098a1d4bf535758085f508097316d738101a97c3f996bccba963ea5"},
{file = "coverage-7.5.2-cp312-cp312-win32.whl", hash = "sha256:97de509043d3f0f2b2cd171bdccf408f175c7f7a99d36d566b1ae4dd84107985"},
{file = "coverage-7.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:8941e35a0e991a7a20a1fa3e3182f82abe357211f2c335a9e6007067c3392fcf"},
{file = "coverage-7.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5662bf0f6fb6757f5c2d6279c541a5af55a39772c2362ed0920b27e3ce0e21f7"},
{file = "coverage-7.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3d9c62cff2ffb4c2a95328488fd7aa96a7a4b34873150650fe76b19c08c9c792"},
{file = "coverage-7.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74eeaa13e8200ad72fca9c5f37395fb310915cec6f1682b21375e84fd9770e84"},
{file = "coverage-7.5.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f29bf497d51a5077994b265e976d78b09d9d0dff6ca5763dbb4804534a5d380"},
{file = "coverage-7.5.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f96aa94739593ae0707eda9813ce363a0a0374a810ae0eced383340fc4a1f73"},
{file = "coverage-7.5.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:51b6cee539168a912b4b3b040e4042b9e2c9a7ad9c8546c09e4eaeff3eacba6b"},
{file = "coverage-7.5.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:59a75e6aa5c25b50b5a1499f9718f2edff54257f545718c4fb100f48d570ead4"},
{file = "coverage-7.5.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29da75ce20cb0a26d60e22658dd3230713c6c05a3465dd8ad040ffc991aea318"},
{file = "coverage-7.5.2-cp38-cp38-win32.whl", hash = "sha256:23f2f16958b16152b43a39a5ecf4705757ddd284b3b17a77da3a62aef9c057ef"},
{file = "coverage-7.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:9e41c94035e5cdb362beed681b58a707e8dc29ea446ea1713d92afeded9d1ddd"},
{file = "coverage-7.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:06d96b9b19bbe7f049c2be3c4f9e06737ec6d8ef8933c7c3a4c557ef07936e46"},
{file = "coverage-7.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:878243e1206828908a6b4a9ca7b1aa8bee9eb129bf7186fc381d2646f4524ce9"},
{file = "coverage-7.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:482df956b055d3009d10fce81af6ffab28215d7ed6ad4a15e5c8e67cb7c5251c"},
{file = "coverage-7.5.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a35c97af60a5492e9e89f8b7153fe24eadfd61cb3a2fb600df1a25b5dab34b7e"},
{file = "coverage-7.5.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24bb4c7859a3f757a116521d4d3a8a82befad56ea1bdacd17d6aafd113b0071e"},
{file = "coverage-7.5.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e1046aab24c48c694f0793f669ac49ea68acde6a0798ac5388abe0a5615b5ec8"},
{file = "coverage-7.5.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:448ec61ea9ea7916d5579939362509145caaecf03161f6f13e366aebb692a631"},
{file = "coverage-7.5.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4a00bd5ba8f1a4114720bef283cf31583d6cb1c510ce890a6da6c4268f0070b7"},
{file = "coverage-7.5.2-cp39-cp39-win32.whl", hash = "sha256:9f805481d5eff2a96bac4da1570ef662bf970f9a16580dc2c169c8c3183fa02b"},
{file = "coverage-7.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:2c79f058e7bec26b5295d53b8c39ecb623448c74ccc8378631f5cb5c16a7e02c"},
{file = "coverage-7.5.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:40dbb8e7727560fe8ab65efcddfec1ae25f30ef02e2f2e5d78cfb52a66781ec5"},
{file = "coverage-7.5.2.tar.gz", hash = "sha256:13017a63b0e499c59b5ba94a8542fb62864ba3016127d1e4ef30d354fc2b00e9"},
{file = "coverage-7.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a6519d917abb15e12380406d721e37613e2a67d166f9fb7e5a8ce0375744cd45"},
{file = "coverage-7.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aea7da970f1feccf48be7335f8b2ca64baf9b589d79e05b9397a06696ce1a1ec"},
{file = "coverage-7.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:923b7b1c717bd0f0f92d862d1ff51d9b2b55dbbd133e05680204465f454bb286"},
{file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62bda40da1e68898186f274f832ef3e759ce929da9a9fd9fcf265956de269dbc"},
{file = "coverage-7.5.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8b7339180d00de83e930358223c617cc343dd08e1aa5ec7b06c3a121aec4e1d"},
{file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:25a5caf742c6195e08002d3b6c2dd6947e50efc5fc2c2205f61ecb47592d2d83"},
{file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:05ac5f60faa0c704c0f7e6a5cbfd6f02101ed05e0aee4d2822637a9e672c998d"},
{file = "coverage-7.5.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:239a4e75e09c2b12ea478d28815acf83334d32e722e7433471fbf641c606344c"},
{file = "coverage-7.5.3-cp310-cp310-win32.whl", hash = "sha256:a5812840d1d00eafae6585aba38021f90a705a25b8216ec7f66aebe5b619fb84"},
{file = "coverage-7.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:33ca90a0eb29225f195e30684ba4a6db05dbef03c2ccd50b9077714c48153cac"},
{file = "coverage-7.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f81bc26d609bf0fbc622c7122ba6307993c83c795d2d6f6f6fd8c000a770d974"},
{file = "coverage-7.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7cec2af81f9e7569280822be68bd57e51b86d42e59ea30d10ebdbb22d2cb7232"},
{file = "coverage-7.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55f689f846661e3f26efa535071775d0483388a1ccfab899df72924805e9e7cd"},
{file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50084d3516aa263791198913a17354bd1dc627d3c1639209640b9cac3fef5807"},
{file = "coverage-7.5.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:341dd8f61c26337c37988345ca5c8ccabeff33093a26953a1ac72e7d0103c4fb"},
{file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ab0b028165eea880af12f66086694768f2c3139b2c31ad5e032c8edbafca6ffc"},
{file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5bc5a8c87714b0c67cfeb4c7caa82b2d71e8864d1a46aa990b5588fa953673b8"},
{file = "coverage-7.5.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38a3b98dae8a7c9057bd91fbf3415c05e700a5114c5f1b5b0ea5f8f429ba6614"},
{file = "coverage-7.5.3-cp311-cp311-win32.whl", hash = "sha256:fcf7d1d6f5da887ca04302db8e0e0cf56ce9a5e05f202720e49b3e8157ddb9a9"},
{file = "coverage-7.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:8c836309931839cca658a78a888dab9676b5c988d0dd34ca247f5f3e679f4e7a"},
{file = "coverage-7.5.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:296a7d9bbc598e8744c00f7a6cecf1da9b30ae9ad51c566291ff1314e6cbbed8"},
{file = "coverage-7.5.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:34d6d21d8795a97b14d503dcaf74226ae51eb1f2bd41015d3ef332a24d0a17b3"},
{file = "coverage-7.5.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e317953bb4c074c06c798a11dbdd2cf9979dbcaa8ccc0fa4701d80042d4ebf1"},
{file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:705f3d7c2b098c40f5b81790a5fedb274113373d4d1a69e65f8b68b0cc26f6db"},
{file = "coverage-7.5.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1196e13c45e327d6cd0b6e471530a1882f1017eb83c6229fc613cd1a11b53cd"},
{file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:015eddc5ccd5364dcb902eaecf9515636806fa1e0d5bef5769d06d0f31b54523"},
{file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:fd27d8b49e574e50caa65196d908f80e4dff64d7e592d0c59788b45aad7e8b35"},
{file = "coverage-7.5.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:33fc65740267222fc02975c061eb7167185fef4cc8f2770267ee8bf7d6a42f84"},
{file = "coverage-7.5.3-cp312-cp312-win32.whl", hash = "sha256:7b2a19e13dfb5c8e145c7a6ea959485ee8e2204699903c88c7d25283584bfc08"},
{file = "coverage-7.5.3-cp312-cp312-win_amd64.whl", hash = "sha256:0bbddc54bbacfc09b3edaec644d4ac90c08ee8ed4844b0f86227dcda2d428fcb"},
{file = "coverage-7.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f78300789a708ac1f17e134593f577407d52d0417305435b134805c4fb135adb"},
{file = "coverage-7.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b368e1aee1b9b75757942d44d7598dcd22a9dbb126affcbba82d15917f0cc155"},
{file = "coverage-7.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f836c174c3a7f639bded48ec913f348c4761cbf49de4a20a956d3431a7c9cb24"},
{file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:244f509f126dc71369393ce5fea17c0592c40ee44e607b6d855e9c4ac57aac98"},
{file = "coverage-7.5.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4c2872b3c91f9baa836147ca33650dc5c172e9273c808c3c3199c75490e709d"},
{file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dd4b3355b01273a56b20c219e74e7549e14370b31a4ffe42706a8cda91f19f6d"},
{file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f542287b1489c7a860d43a7d8883e27ca62ab84ca53c965d11dac1d3a1fab7ce"},
{file = "coverage-7.5.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:75e3f4e86804023e991096b29e147e635f5e2568f77883a1e6eed74512659ab0"},
{file = "coverage-7.5.3-cp38-cp38-win32.whl", hash = "sha256:c59d2ad092dc0551d9f79d9d44d005c945ba95832a6798f98f9216ede3d5f485"},
{file = "coverage-7.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:fa21a04112c59ad54f69d80e376f7f9d0f5f9123ab87ecd18fbb9ec3a2beed56"},
{file = "coverage-7.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5102a92855d518b0996eb197772f5ac2a527c0ec617124ad5242a3af5e25f85"},
{file = "coverage-7.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d1da0a2e3b37b745a2b2a678a4c796462cf753aebf94edcc87dcc6b8641eae31"},
{file = "coverage-7.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8383a6c8cefba1b7cecc0149415046b6fc38836295bc4c84e820872eb5478b3d"},
{file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aad68c3f2566dfae84bf46295a79e79d904e1c21ccfc66de88cd446f8686341"},
{file = "coverage-7.5.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e079c9ec772fedbade9d7ebc36202a1d9ef7291bc9b3a024ca395c4d52853d7"},
{file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bde997cac85fcac227b27d4fb2c7608a2c5f6558469b0eb704c5726ae49e1c52"},
{file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:990fb20b32990b2ce2c5f974c3e738c9358b2735bc05075d50a6f36721b8f303"},
{file = "coverage-7.5.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3d5a67f0da401e105753d474369ab034c7bae51a4c31c77d94030d59e41df5bd"},
{file = "coverage-7.5.3-cp39-cp39-win32.whl", hash = "sha256:e08c470c2eb01977d221fd87495b44867a56d4d594f43739a8028f8646a51e0d"},
{file = "coverage-7.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:1d2a830ade66d3563bb61d1e3c77c8def97b30ed91e166c67d0632c018f380f0"},
{file = "coverage-7.5.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:3538d8fb1ee9bdd2e2692b3b18c22bb1c19ffbefd06880f5ac496e42d7bb3884"},
{file = "coverage-7.5.3.tar.gz", hash = "sha256:04aefca5190d1dc7a53a4c1a5a7f8568811306d7a8ee231c42fb69215571944f"},
]
[package.dependencies]
@@ -1587,13 +1587,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
[[package]]
name = "google-api-python-client"
version = "2.130.0"
version = "2.131.0"
description = "Google API Client Library for Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "google-api-python-client-2.130.0.tar.gz", hash = "sha256:2bba3122b82a649c677b8a694b8e2bbf2a5fbf3420265caf3343bb88e2e9f0ae"},
{file = "google_api_python_client-2.130.0-py2.py3-none-any.whl", hash = "sha256:7d45a28d738628715944a9c9d73e8696e7e03ac50b7de87f5e3035cefa94ed3a"},
{file = "google-api-python-client-2.131.0.tar.gz", hash = "sha256:1c03e24af62238a8817ecc24e9d4c32ddd4cb1f323b08413652d9a9a592fc00d"},
{file = "google_api_python_client-2.131.0-py2.py3-none-any.whl", hash = "sha256:e325409bdcef4604d505d9246ce7199960a010a0569ac503b9f319db8dbdc217"},
]
[package.dependencies]
@@ -2441,13 +2441,13 @@ min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-imp
[[package]]
name = "mkdocs-git-revision-date-localized-plugin"
version = "1.2.5"
version = "1.2.6"
description = "Mkdocs plugin that enables displaying the localized date of the last git modification of a markdown file."
optional = false
python-versions = ">=3.8"
files = [
{file = "mkdocs_git_revision_date_localized_plugin-1.2.5-py3-none-any.whl", hash = "sha256:d796a18b07cfcdb154c133e3ec099d2bb5f38389e4fd54d3eb516a8a736815b8"},
{file = "mkdocs_git_revision_date_localized_plugin-1.2.5.tar.gz", hash = "sha256:0c439816d9d0dba48e027d9d074b2b9f1d7cd179f74ba46b51e4da7bb3dc4b9b"},
{file = "mkdocs_git_revision_date_localized_plugin-1.2.6-py3-none-any.whl", hash = "sha256:f015cb0f3894a39b33447b18e270ae391c4e25275cac5a626e80b243784e2692"},
{file = "mkdocs_git_revision_date_localized_plugin-1.2.6.tar.gz", hash = "sha256:e432942ce4ee8aa9b9f4493e993dee9d2cc08b3ea2b40a3d6b03ca0f2a4bcaa2"},
]
[package.dependencies]
@@ -2514,13 +2514,13 @@ test = ["pytest", "pytest-cov"]
[[package]]
name = "moto"
version = "5.0.8"
version = "5.0.9"
description = ""
optional = false
python-versions = ">=3.8"
files = [
{file = "moto-5.0.8-py2.py3-none-any.whl", hash = "sha256:7d1035e366434bfa9fcc0621f07d5aa724b6846408071d540137a0554c46f214"},
{file = "moto-5.0.8.tar.gz", hash = "sha256:517fb808dc718bcbdda54c6ffeaca0adc34cf6e10821bfb01216ce420a31765c"},
{file = "moto-5.0.9-py2.py3-none-any.whl", hash = "sha256:21a13e02f83d6a18cfcd99949c96abb2e889f4bd51c4c6a3ecc8b78765cb854e"},
{file = "moto-5.0.9.tar.gz", hash = "sha256:eb71f1cba01c70fff1f16086acb24d6d9aeb32830d646d8989f98a29aeae24ba"},
]
[package.dependencies]
@@ -4907,4 +4907,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<3.13"
content-hash = "2d423feb8ba9d92e3f32f240a9b09f2e66e13d4c65447d133efb72050a9c154d"
content-hash = "450da57ae7375ff59256f54de76da7e8aad6e2f531cd6614bfc5f59d6489c9ef"

View File

@@ -40,7 +40,7 @@ from prowler.lib.outputs.compliance.compliance import display_compliance_table
from prowler.lib.outputs.html.html import add_html_footer, fill_html_overview_statistics
from prowler.lib.outputs.json.json import close_json
from prowler.lib.outputs.outputs import extract_findings_statistics
from prowler.lib.outputs.slack import send_slack_message
from prowler.lib.outputs.slack.slack import Slack
from prowler.lib.outputs.summary_table import display_summary_table
from prowler.providers.aws.lib.s3.s3 import send_to_s3_bucket
from prowler.providers.aws.lib.security_hub.security_hub import (
@@ -89,7 +89,8 @@ def prowler():
)
if not args.no_banner:
print_banner(args.verbose, getattr(args, "fixer", None))
legend = args.verbose or getattr(args, "fixer", None)
print_banner(legend)
# We treat the compliance framework as another output format
if compliance_framework:
@@ -179,7 +180,8 @@ def prowler():
# Import custom checks from folder
if checks_folder:
parse_checks_from_folder(global_provider, checks_folder)
custom_checks = parse_checks_from_folder(global_provider, checks_folder)
checks_to_execute.update(custom_checks)
# Exclude checks if -e/--excluded-checks
if excluded_checks:
@@ -248,20 +250,22 @@ def prowler():
stats = extract_findings_statistics(findings)
if args.slack:
# TODO: this should be also in a config file
if "SLACK_API_TOKEN" in environ and (
"SLACK_CHANNEL_NAME" in environ or "SLACK_CHANNEL_ID" in environ
):
_ = send_slack_message(
environ["SLACK_API_TOKEN"],
(
environ["SLACK_CHANNEL_NAME"]
if "SLACK_CHANNEL_NAME" in environ
else environ["SLACK_CHANNEL_ID"]
),
stats,
global_provider,
token = environ["SLACK_API_TOKEN"]
channel = (
environ["SLACK_CHANNEL_NAME"]
if "SLACK_CHANNEL_NAME" in environ
else environ["SLACK_CHANNEL_ID"]
)
prowler_args = " ".join(sys.argv[1:])
slack = Slack(token, channel, global_provider)
_ = slack.send(stats, prowler_args)
else:
# Refactor(CLI)
logger.critical(
"Slack integration needs SLACK_API_TOKEN and SLACK_CHANNEL_NAME environment variables (see more in https://docs.prowler.cloud/en/latest/tutorials/integrations/#slack)."
)

View File

@@ -1,6 +1,5 @@
import os
import pathlib
import sys
from datetime import datetime, timezone
from os import getcwd
@@ -11,7 +10,7 @@ from prowler.lib.logger import logger
timestamp = datetime.today()
timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)
prowler_version = "4.2.0"
prowler_version = "4.2.3"
html_logo_url = "https://github.com/prowler-cloud/prowler/"
square_logo_img = "https://prowler.com/wp-content/uploads/logo-html.png"
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
@@ -99,52 +98,87 @@ def check_current_version():
def load_and_validate_config_file(provider: str, config_file_path: str) -> dict:
"""
load_and_validate_config_file reads the Prowler config file in YAML format from the default location or the file passed with the --config-file flag
Reads the Prowler config file in YAML format from the default location or the file passed with the --config-file flag.
Args:
provider (str): The provider name (e.g., 'aws', 'gcp', 'azure', 'kubernetes').
config_file_path (str): The path to the configuration file.
Returns:
dict: The configuration dictionary for the specified provider.
"""
try:
with open(config_file_path) as f:
config = {}
with open(config_file_path, "r", encoding="utf-8") as f:
config_file = yaml.safe_load(f)
# Not to introduce a breaking change we have to allow the old format config file without any provider keys
# and a new format with a key for each provider to include their configuration values within
# Check if the new format is passed
if (
"aws" in config_file
or "gcp" in config_file
or "azure" in config_file
or "kubernetes" in config_file
):
# Not to introduce a breaking change, allow the old format config file without any provider keys
# and a new format with a key for each provider to include their configuration values within.
if any(key in config_file for key in ["aws", "gcp", "azure", "kubernetes"]):
config = config_file.get(provider, {})
else:
config = config_file if config_file else {}
# Not to break Azure, K8s and GCP does not support neither use the old config format
# Not to break Azure, K8s and GCP does not support or use the old config format
if provider in ["azure", "gcp", "kubernetes"]:
config = {}
return config
except Exception as error:
logger.critical(
except FileNotFoundError as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
sys.exit(1)
except yaml.YAMLError as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
except UnicodeDecodeError as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
return {}
def load_and_validate_fixer_config_file(
provider: str, fixer_config_file_path: str
) -> dict:
"""
load_and_validate_fixer_config_file reads the Prowler fixer config file in YAML format from the default location or the file passed with the --fixer-config flag
Reads the Prowler fixer config file in YAML format from the default location or the file passed with the --fixer-config flag.
Args:
provider (str): The provider name (e.g., 'aws', 'gcp', 'azure', 'kubernetes').
fixer_config_file_path (str): The path to the fixer configuration file.
Returns:
dict: The fixer configuration dictionary for the specified provider.
Raises:
SystemExit: If there is an error reading or parsing the fixer configuration file.
"""
try:
with open(fixer_config_file_path) as f:
with open(fixer_config_file_path, "r", encoding="utf-8") as f:
fixer_config_file = yaml.safe_load(f)
return fixer_config_file.get(provider, {})
except Exception as error:
logger.critical(
except FileNotFoundError as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
sys.exit(1)
except yaml.YAMLError as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
except UnicodeDecodeError as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
return {}

View File

@@ -29,6 +29,18 @@ aws:
max_security_group_rules: 50
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
max_ec2_instance_age_in_days: 180
# aws.ec2_securitygroup_allow_ingress_from_internet_to_any_port
# allowed network interface types for security groups open to the Internet
ec2_allowed_interface_types:
[
"api_gateway_managed",
"vpc_endpoint",
]
# allowed network interface owners for security groups open to the Internet
ec2_allowed_instance_owners:
[
"amazon-elb"
]
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
@@ -250,10 +262,16 @@ aws:
"LookupEvents",
"Search",
]
# AWS RDS Configuration
# aws.rds_instance_backup_enabled
# Whether to check RDS instance replicas or not
check_rds_instance_replicas: False
# AWS ACM Configuration
# aws.acm_certificates_expiration_check
days_to_expire_threshold: 7
# Azure Configuration
azure:
# Azure Network Configuration

View File

@@ -3,7 +3,16 @@ from colorama import Fore, Style
from prowler.config.config import banner_color, orange_color, prowler_version, timestamp
def print_banner(verbose: bool, fixer: bool = False):
def print_banner(legend: bool = False):
"""
Prints the banner with optional legend for color codes.
Parameters:
- legend (bool): Flag to indicate whether to print the color legend or not. Default is False.
Returns:
- None
"""
banner = rf"""{banner_color} _
_ __ _ __ _____ _| | ___ _ __
| '_ \| '__/ _ \ \ /\ / / |/ _ \ '__|
@@ -15,7 +24,7 @@ def print_banner(verbose: bool, fixer: bool = False):
"""
print(banner)
if verbose or fixer:
if legend:
print(
f"""
{Style.BRIGHT}Color code for results:{Style.RESET_ALL}

View File

@@ -126,9 +126,9 @@ def parse_checks_from_file(input_file: str, provider: str) -> set:
# Load checks from custom folder
def parse_checks_from_folder(provider, input_folder: str) -> int:
def parse_checks_from_folder(provider, input_folder: str) -> set:
try:
imported_checks = 0
custom_checks = set()
# Check if input folder is a S3 URI
if provider.type == "aws" and re.search(
"^s3://([^/]+)/(.*?([^/]+))/$", input_folder
@@ -156,8 +156,8 @@ def parse_checks_from_folder(provider, input_folder: str) -> int:
if os.path.exists(prowler_module):
shutil.rmtree(prowler_module)
shutil.copytree(check_module, prowler_module)
imported_checks += 1
return imported_checks
custom_checks.add(check.name)
return custom_checks
except Exception as error:
logger.critical(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
@@ -438,7 +438,7 @@ def import_check(check_path: str) -> ModuleType:
return lib
def run_check(check: Check, output_options) -> list:
def run_check(check: Check, verbose: bool = False, only_logs: bool = False) -> list:
"""
Run the check and return the findings
Args:
@@ -448,7 +448,7 @@ def run_check(check: Check, output_options) -> list:
list: list of findings
"""
findings = []
if output_options.verbose or output_options.fixer:
if verbose:
print(
f"\nCheck ID: {check.CheckID} - {Fore.MAGENTA}{check.ServiceName}{Fore.YELLOW} [{check.Severity}]{Style.RESET_ALL}"
)
@@ -456,7 +456,7 @@ def run_check(check: Check, output_options) -> list:
try:
findings = check.execute()
except Exception as error:
if not output_options.only_logs:
if not only_logs:
print(
f"Something went wrong in {check.CheckID}, please use --log-level ERROR"
)
@@ -698,7 +698,13 @@ def execute(
)
# Run check
check_findings = run_check(check_class, global_provider.output_options)
verbose = (
global_provider.output_options.verbose
or global_provider.output_options.fixer
)
check_findings = run_check(
check_class, verbose, global_provider.output_options.only_logs
)
# Update Audit Status
services_executed.add(service)

View File

@@ -102,7 +102,7 @@ class Check(ABC, Check_Metadata_Model):
return self.json()
@abstractmethod
def execute(self):
def execute(self) -> list:
"""Execute the check's logic"""

View File

@@ -0,0 +1,23 @@
from schema import Optional, Schema
mutelist_schema = Schema(
{
"Accounts": {
str: {
"Checks": {
str: {
"Regions": list,
"Resources": list,
Optional("Tags"): list,
Optional("Exceptions"): {
Optional("Accounts"): list,
Optional("Regions"): list,
Optional("Resources"): list,
Optional("Tags"): list,
},
}
}
}
}
}
)

View File

@@ -1,121 +1,40 @@
import re
import sys
from typing import Any
import yaml
from boto3 import Session
from boto3.dynamodb.conditions import Attr
from schema import Optional, Schema
from prowler.lib.logger import logger
from prowler.lib.mutelist.models import mutelist_schema
from prowler.lib.outputs.utils import unroll_tags
mutelist_schema = Schema(
{
"Accounts": {
str: {
"Checks": {
str: {
"Regions": list,
"Resources": list,
Optional("Tags"): list,
Optional("Exceptions"): {
Optional("Accounts"): list,
Optional("Regions"): list,
Optional("Resources"): list,
Optional("Tags"): list,
},
}
}
}
}
}
)
def parse_mutelist_file(
mutelist_path: str, aws_session: Session = None, aws_account: str = None
):
def get_mutelist_file_from_local_file(mutelist_path: str):
try:
# Check if file is a S3 URI
if re.search("^s3://([^/]+)/(.*?([^/]+))$", mutelist_path):
bucket = mutelist_path.split("/")[2]
key = ("/").join(mutelist_path.split("/")[3:])
s3_client = aws_session.client("s3")
mutelist = yaml.safe_load(
s3_client.get_object(Bucket=bucket, Key=key)["Body"]
)["Mutelist"]
# Check if file is a Lambda Function ARN
elif re.search(r"^arn:(\w+):lambda:", mutelist_path):
lambda_region = mutelist_path.split(":")[3]
lambda_client = aws_session.client("lambda", region_name=lambda_region)
lambda_response = lambda_client.invoke(
FunctionName=mutelist_path, InvocationType="RequestResponse"
)
lambda_payload = lambda_response["Payload"].read()
mutelist = yaml.safe_load(lambda_payload)["Mutelist"]
# Check if file is a DynamoDB ARN
elif re.search(
r"^arn:aws(-cn|-us-gov)?:dynamodb:[a-z]{2}-[a-z-]+-[1-9]{1}:[0-9]{12}:table\/[a-zA-Z0-9._-]+$",
mutelist_path,
):
mutelist = {"Accounts": {}}
table_region = mutelist_path.split(":")[3]
dynamodb_resource = aws_session.resource(
"dynamodb", region_name=table_region
)
dynamo_table = dynamodb_resource.Table(mutelist_path.split("/")[1])
response = dynamo_table.scan(
FilterExpression=Attr("Accounts").is_in([aws_account, "*"])
)
dynamodb_items = response["Items"]
# Paginate through all results
while "LastEvaluatedKey" in dynamodb_items:
response = dynamo_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"],
FilterExpression=Attr("Accounts").is_in([aws_account, "*"]),
)
dynamodb_items.update(response["Items"])
for item in dynamodb_items:
# Create mutelist for every item
mutelist["Accounts"][item["Accounts"]] = {
"Checks": {
item["Checks"]: {
"Regions": item["Regions"],
"Resources": item["Resources"],
}
}
}
if "Tags" in item:
mutelist["Accounts"][item["Accounts"]]["Checks"][item["Checks"]][
"Tags"
] = item["Tags"]
if "Exceptions" in item:
mutelist["Accounts"][item["Accounts"]]["Checks"][item["Checks"]][
"Exceptions"
] = item["Exceptions"]
else:
with open(mutelist_path) as f:
mutelist = yaml.safe_load(f)["Mutelist"]
try:
mutelist_schema.validate(mutelist)
except Exception as error:
logger.critical(
f"{error.__class__.__name__} -- Mutelist YAML is malformed - {error}[{error.__traceback__.tb_lineno}]"
)
sys.exit(1)
return mutelist
with open(mutelist_path) as f:
mutelist = yaml.safe_load(f)["Mutelist"]
return mutelist
except Exception as error:
logger.critical(
logger.error(
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
)
sys.exit(1)
return {}
def validate_mutelist(mutelist: dict) -> dict:
try:
mutelist = mutelist_schema.validate(mutelist)
return mutelist
except Exception as error:
logger.error(
f"{error.__class__.__name__} -- Mutelist YAML is malformed - {error}[{error.__traceback__.tb_lineno}]"
)
return {}
def mutelist_findings(
global_provider: Any,
check_findings: list[Any],
):
) -> list[Any]:
# Check if finding is muted
for finding in check_findings:
# TODO: Move this mapping to the execute_check function and pass that output to the mutelist and the report
@@ -167,7 +86,21 @@ def is_muted(
finding_region: str,
finding_resource: str,
finding_tags,
):
) -> bool:
"""
Check if the provided finding is muted for the audited account, check, region, resource and tags.
Args:
mutelist (dict): Dictionary containing information about muted checks for different accounts.
audited_account (str): The account being audited.
check (str): The check to be evaluated for muting.
finding_region (str): The region where the finding occurred.
finding_resource (str): The resource related to the finding.
finding_tags: The tags associated with the finding.
Returns:
bool: True if the finding is muted for the audited account, check, region, resource and tags., otherwise False.
"""
try:
# By default is not muted
is_finding_muted = False
@@ -189,10 +122,10 @@ def is_muted(
return is_finding_muted
except Exception as error:
logger.critical(
logger.error(
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
)
sys.exit(1)
return False
def is_muted_in_check(
@@ -202,7 +135,21 @@ def is_muted_in_check(
finding_region,
finding_resource,
finding_tags,
):
) -> bool:
"""
Check if the provided check is muted.
Args:
muted_checks (dict): Dictionary containing information about muted checks.
audited_account (str): The account to be audited.
check (str): The check to be evaluated for muting.
finding_region (str): The region where the finding occurred.
finding_resource (str): The resource related to the finding.
finding_tags (str): The tags associated with the finding.
Returns:
bool: True if the check is muted, otherwise False.
"""
try:
# Default value is not muted
is_check_muted = False
@@ -263,44 +210,74 @@ def is_muted_in_check(
return is_check_muted
except Exception as error:
logger.critical(
logger.error(
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
)
sys.exit(1)
return False
def is_muted_in_region(
mutelist_regions,
finding_region,
):
) -> bool:
"""
Check if the finding_region is present in the mutelist_regions.
Args:
mutelist_regions (list): List of regions in the mute list.
finding_region (str): Region to check if it is muted.
Returns:
bool: True if the finding_region is muted in any of the mutelist_regions, otherwise False.
"""
try:
return __is_item_matched__(mutelist_regions, finding_region)
except Exception as error:
logger.critical(
logger.error(
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
)
sys.exit(1)
return False
def is_muted_in_tags(muted_tags, finding_tags):
def is_muted_in_tags(muted_tags, finding_tags) -> bool:
"""
Check if any of the muted tags are present in the finding tags.
Args:
muted_tags (list): List of muted tags to be checked.
finding_tags (str): String containing tags to search for muted tags.
Returns:
bool: True if any of the muted tags are present in the finding tags, otherwise False.
"""
try:
return __is_item_matched__(muted_tags, finding_tags)
except Exception as error:
logger.critical(
logger.error(
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
)
sys.exit(1)
return False
def is_muted_in_resource(muted_resources, finding_resource):
def is_muted_in_resource(muted_resources, finding_resource) -> bool:
"""
Check if any of the muted_resources are present in the finding_resource.
Args:
muted_resources (list): List of muted resources to be checked.
finding_resource (str): Resource to search for muted resources.
Returns:
bool: True if any of the muted_resources are present in the finding_resource, otherwise False.
"""
try:
return __is_item_matched__(muted_resources, finding_resource)
except Exception as error:
logger.critical(
logger.error(
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
)
sys.exit(1)
return False
def is_excepted(
@@ -309,8 +286,20 @@ def is_excepted(
finding_region,
finding_resource,
finding_tags,
):
"""is_excepted returns True if the account, region, resource and tags are excepted"""
) -> bool:
"""
Check if the provided account, region, resource, and tags are excepted based on the exceptions dictionary.
Args:
exceptions (dict): Dictionary containing exceptions for different attributes like Accounts, Regions, Resources, and Tags.
audited_account (str): The account to be audited.
finding_region (str): The region where the finding occurred.
finding_resource (str): The resource related to the finding.
finding_tags (str): The tags associated with the finding.
Returns:
bool: True if the account, region, resource, and tags are excepted based on the exceptions, otherwise False.
"""
try:
excepted = False
is_account_excepted = False
@@ -350,26 +339,35 @@ def is_excepted(
excepted = True
return excepted
except Exception as error:
logger.critical(
logger.error(
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
)
sys.exit(1)
return False
def __is_item_matched__(matched_items, finding_items):
"""__is_item_matched__ return True if any of the matched_items are present in the finding_items, otherwise returns False."""
"""
Check if any of the items in matched_items are present in finding_items.
Args:
matched_items (list): List of items to be matched.
finding_items (str): String to search for matched items.
Returns:
bool: True if any of the matched_items are present in finding_items, otherwise False.
"""
try:
is_item_matched = False
if matched_items and (finding_items or finding_items == ""):
for item in matched_items:
if item == "*":
item = ".*"
if item.startswith("*"):
item = ".*" + item[1:]
if re.search(item, finding_items):
is_item_matched = True
break
return is_item_matched
except Exception as error:
logger.critical(
logger.error(
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
)
sys.exit(1)
return False

View File

@@ -60,23 +60,28 @@ def get_check_compliance_frameworks_in_input(
):
"""get_check_compliance_frameworks_in_input returns a list of Compliance for the given check if the compliance framework is present in the input compliance to execute"""
check_compliances = []
if bulk_checks_metadata and bulk_checks_metadata[check_id]:
for compliance in bulk_checks_metadata[check_id].Compliance:
compliance_name = ""
if compliance.Version:
compliance_name = (
compliance.Framework.lower()
+ "_"
+ compliance.Version.lower()
+ "_"
+ compliance.Provider.lower()
)
else:
compliance_name = (
compliance.Framework.lower() + "_" + compliance.Provider.lower()
)
if compliance_name.replace("-", "_") in input_compliance_frameworks:
check_compliances.append(compliance)
try:
if bulk_checks_metadata and bulk_checks_metadata.get(check_id):
for compliance in bulk_checks_metadata[check_id].Compliance:
compliance_name = ""
if compliance.Version:
compliance_name = (
compliance.Framework.lower()
+ "_"
+ compliance.Version.lower()
+ "_"
+ compliance.Provider.lower()
)
else:
compliance_name = (
compliance.Framework.lower() + "_" + compliance.Provider.lower()
)
if compliance_name.replace("-", "_") in input_compliance_frameworks:
check_compliances.append(compliance)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return check_compliances
@@ -221,7 +226,7 @@ def get_check_compliance(finding, provider_type, output_options) -> dict:
check_compliance[compliance_fw].append(requirement.Id)
return check_compliance
except Exception as error:
logger.critical(
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
sys.exit(1)
return {}

View File

@@ -58,7 +58,7 @@ def add_html_header(file_descriptor, provider):
<a href="{html_logo_url}"><img class="float-left card-img-left mt-4 mr-4 ml-4"
src={square_logo_img}
alt="prowler-logo"
style="width: 300px; height:auto;"/></a>
style="width: 15rem; height:auto;"/></a>
<div class="card">
<div class="card-header">
Report Information
@@ -135,18 +135,20 @@ def add_html_header(file_descriptor, provider):
def fill_html(file_descriptor, finding):
try:
row_class = "p-3 mb-2 bg-success-custom"
finding.status = finding.status.split(".")[0]
if finding.status == "INFO":
finding_status = finding.status.split(".")[0]
# Change the status of the finding if it's muted
if finding.muted:
finding_status = f"MUTED ({finding_status})"
row_class = "table-warning"
if finding.status == "MANUAL":
row_class = "table-info"
elif finding.status == "FAIL":
row_class = "table-danger"
elif finding.status == "WARNING":
row_class = "table-warning"
file_descriptor.write(
f"""
<tr class="{row_class}">
<td>{finding.status}</td>
<td>{finding_status}</td>
<td>{finding.severity.split(".")[0]}</td>
<td>{finding.service_name}</td>
<td>{finding.region.lower()}</td>
@@ -171,33 +173,42 @@ def fill_html(file_descriptor, finding):
def fill_html_overview_statistics(stats, output_filename, output_directory):
try:
filename = f"{output_directory}/{output_filename}{html_file_suffix}"
# Read file
# Read file
if path.isfile(filename):
with open(filename, "r") as file:
with open(filename, "r", encoding="utf-8") as file:
filedata = file.read()
# Replace statistics
# TOTAL_FINDINGS
filedata = filedata.replace(
"TOTAL_FINDINGS", str(stats.get("findings_count"))
"TOTAL_FINDINGS", str(stats.get("findings_count", 0))
)
# TOTAL_RESOURCES
filedata = filedata.replace(
"TOTAL_RESOURCES", str(stats.get("resources_count"))
"TOTAL_RESOURCES", str(stats.get("resources_count", 0))
)
# TOTAL_PASS
filedata = filedata.replace("TOTAL_PASS", str(stats.get("total_pass")))
filedata = filedata.replace("TOTAL_PASS", str(stats.get("total_pass", 0)))
# TOTAL_FAIL
filedata = filedata.replace("TOTAL_FAIL", str(stats.get("total_fail")))
filedata = filedata.replace("TOTAL_FAIL", str(stats.get("total_fail", 0)))
# Write file
with open(filename, "w") as file:
with open(filename, "w", encoding="utf-8") as file:
file.write(filedata)
except Exception as error:
logger.critical(
except FileNotFoundError as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
except UnicodeDecodeError as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
sys.exit(1)
def add_html_footer(output_filename, output_directory):

View File

@@ -1,156 +0,0 @@
import sys
from slack_sdk import WebClient
from prowler.config.config import aws_logo, azure_logo, gcp_logo, square_logo_img
from prowler.lib.logger import logger
def send_slack_message(token, channel, stats, provider):
try:
client = WebClient(token=token)
identity, logo = create_message_identity(provider)
response = client.chat_postMessage(
username="Prowler",
icon_url=square_logo_img,
channel=f"#{channel}",
blocks=create_message_blocks(identity, logo, stats),
)
return response
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
# TODO: move this to each provider
def create_message_identity(provider):
"""
Create a Slack message identity based on the provider type.
Parameters:
- provider (Provider): The Provider (e.g. "AwsProvider", "GcpProvider", "AzureProvide").
Returns:
- identity (str): The message identity based on the provider type.
- logo (str): The logo URL associated with the provider type.
"""
try:
identity = ""
logo = aws_logo
if provider.type == "aws":
identity = f"AWS Account *{provider.identity.account}*"
elif provider.type == "gcp":
identity = f"GCP Projects *{', '.join(provider.project_ids)}*"
logo = gcp_logo
elif provider.type == "azure":
printed_subscriptions = []
for key, value in provider.identity.subscriptions.items():
intermediate = f"- *{key}: {value}*\n"
printed_subscriptions.append(intermediate)
identity = f"Azure Subscriptions:\n{''.join(printed_subscriptions)}"
logo = azure_logo
return identity, logo
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def create_title(identity, stats):
try:
title = f"Hey there 👋 \n I'm *Prowler*, _the handy multi-cloud security tool_ :cloud::key:\n\n I have just finished the security assessment on your {identity} with a total of *{stats['findings_count']}* findings."
return title
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def create_message_blocks(identity, logo, stats):
try:
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": create_title(identity, stats),
},
"accessory": {
"type": "image",
"image_url": logo,
"alt_text": "Provider Logo",
},
},
{"type": "divider"},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"\n:white_check_mark: *{stats['total_pass']} Passed findings* ({round(stats['total_pass'] / stats['findings_count'] * 100 , 2)}%)\n",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"\n:x: *{stats['total_fail']} Failed findings* ({round(stats['total_fail'] / stats['findings_count'] * 100 , 2)}%)\n ",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"\n:bar_chart: *{stats['resources_count']} Scanned Resources*\n",
},
},
{"type": "divider"},
{
"type": "context",
"elements": [
{
"type": "mrkdwn",
"text": f"Used parameters: `prowler {' '.join(sys.argv[1:])} `",
}
],
},
{"type": "divider"},
{
"type": "section",
"text": {"type": "mrkdwn", "text": "Join our Slack Community!"},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Prowler :slack:"},
"url": "https://join.slack.com/t/prowler-workspace/shared_invite/zt-1hix76xsl-2uq222JIXrC7Q8It~9ZNog",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Feel free to contact us in our repo",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Prowler :github:"},
"url": "https://github.com/prowler-cloud/prowler",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "See all the things you can do with ProwlerPro",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Prowler Pro"},
"url": "https://prowler.pro",
},
},
]
return blocks
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)

View File

View File

@@ -0,0 +1,206 @@
from typing import Any
from slack_sdk import WebClient
from slack_sdk.web.base_client import SlackResponse
from prowler.config.config import aws_logo, azure_logo, gcp_logo, square_logo_img
from prowler.lib.logger import logger
class Slack:
_provider: Any
_token: str
_channel: str
def __init__(self, token: str, channel: str, provider: Any) -> "Slack":
self._token = token
self._channel = channel
self._provider = provider
@property
def token(self):
return self._token
@property
def channel(self):
return self._channel
def send(self, stats: dict, args: str) -> SlackResponse:
"""
Sends the findings to Slack.
Args:
stats (dict): A dictionary containing audit statistics.
args (str): Command line arguments used for the audit.
Returns:
SlackResponse: Slack response if successful, error object if an exception occurs.
"""
try:
client = WebClient(token=self.token)
identity, logo = self.__create_message_identity__(self._provider)
response = client.chat_postMessage(
username="Prowler",
icon_url=square_logo_img,
channel=f"#{self.channel}",
blocks=self.__create_message_blocks__(identity, logo, stats, args),
)
return response
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return error
def __create_message_identity__(self, provider: Any):
"""
Create a Slack message identity based on the provider type.
Parameters:
- provider (Provider): The Provider (e.g. "AwsProvider", "GcpProvider", "AzureProvide").
Returns:
- identity (str): The message identity based on the provider type.
- logo (str): The logo URL associated with the provider type.
"""
# TODO: support kubernetes
try:
identity = ""
logo = aws_logo
if provider.type == "aws":
identity = f"AWS Account *{provider.identity.account}*"
elif provider.type == "gcp":
identity = f"GCP Projects *{', '.join(provider.project_ids)}*"
logo = gcp_logo
elif provider.type == "azure":
printed_subscriptions = []
for key, value in provider.identity.subscriptions.items():
intermediate = f"- *{key}: {value}*\n"
printed_subscriptions.append(intermediate)
identity = f"Azure Subscriptions:\n{''.join(printed_subscriptions)}"
logo = azure_logo
return identity, logo
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __create_message_blocks__(self, identity, logo, stats, args) -> list:
"""
Create the Slack message blocks.
Args:
identity: message identity.
logo: logo URL.
stats: audit statistics.
args: command line arguments used.
Returns:
list: list of Slack message blocks.
"""
try:
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": self.__create_title__(identity, stats),
},
"accessory": {
"type": "image",
"image_url": logo,
"alt_text": "Provider Logo",
},
},
{"type": "divider"},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"\n:white_check_mark: *{stats['total_pass']} Passed findings* ({round(stats['total_pass'] / stats['findings_count'] * 100 , 2)}%)\n",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"\n:x: *{stats['total_fail']} Failed findings* ({round(stats['total_fail'] / stats['findings_count'] * 100 , 2)}%)\n ",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"\n:bar_chart: *{stats['resources_count']} Scanned Resources*\n",
},
},
{"type": "divider"},
{
"type": "context",
"elements": [
{
"type": "mrkdwn",
"text": f"Used parameters: `prowler {args}`",
}
],
},
{"type": "divider"},
{
"type": "section",
"text": {"type": "mrkdwn", "text": "Join our Slack Community!"},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Prowler :slack:"},
"url": "https://join.slack.com/t/prowler-workspace/shared_invite/zt-1hix76xsl-2uq222JIXrC7Q8It~9ZNog",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Feel free to contact us in our repo",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Prowler :github:"},
"url": "https://github.com/prowler-cloud/prowler",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "See all the things you can do with ProwlerPro",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Prowler Pro"},
"url": "https://prowler.pro",
},
},
]
return blocks
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __create_title__(self, identity, stats) -> str:
"""
Create the Slack message title.
Args:
identity: message identity.
stats: audit statistics.
Returns:
str: Slack message title.
"""
try:
title = f"Hey there 👋 \n I'm *Prowler*, _the handy multi-cloud security tool_ :cloud::key:\n\n I have just finished the security assessment on your {identity} with a total of *{stats['findings_count']}* findings."
return title
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)

View File

@@ -1,5 +1,6 @@
import os
import pathlib
import re
import sys
from argparse import Namespace
from datetime import datetime
@@ -15,11 +16,16 @@ from tzlocal import get_localzone
from prowler.config.config import (
aws_services_json_file,
get_default_mute_file_path,
load_and_validate_config_file,
load_and_validate_fixer_config_file,
)
from prowler.lib.check.check import list_modules, recover_checks_from_service
from prowler.lib.logger import logger
from prowler.lib.mutelist.mutelist import (
get_mutelist_file_from_local_file,
validate_mutelist,
)
from prowler.lib.utils.utils import open_file, parse_json_file, print_boxes
from prowler.providers.aws.config import (
AWS_STS_GLOBAL_ENDPOINT_REGION,
@@ -28,6 +34,11 @@ from prowler.providers.aws.config import (
)
from prowler.providers.aws.lib.arn.arn import parse_iam_credentials_arn
from prowler.providers.aws.lib.arn.models import ARN
from prowler.providers.aws.lib.mutelist.mutelist import (
get_mutelist_file_from_dynamodb,
get_mutelist_file_from_lambda,
get_mutelist_file_from_s3,
)
from prowler.providers.aws.lib.organizations.organizations import (
get_organizations_metadata,
parse_organizations_metadata,
@@ -285,6 +296,51 @@ class AwsProvider(Provider):
arguments, bulk_checks_metadata, self._identity
)
@property
def mutelist(self):
"""
mutelist method returns the provider's mutelist.
"""
return self._mutelist
@mutelist.setter
def mutelist(self, mutelist_path):
"""
mutelist.setter sets the provider's mutelist.
"""
# Set default mutelist path if none is set
if not mutelist_path:
mutelist_path = get_default_mute_file_path(self.type)
if mutelist_path:
# Mutelist from S3 URI
if re.search("^s3://([^/]+)/(.*?([^/]+))$", mutelist_path):
mutelist = get_mutelist_file_from_s3(
mutelist_path, self._session.current_session
)
# Mutelist from Lambda Function ARN
elif re.search(r"^arn:(\w+):lambda:", mutelist_path):
mutelist = get_mutelist_file_from_lambda(
mutelist_path,
self._session.current_session,
)
# Mutelist from DynamoDB ARN
elif re.search(
r"^arn:aws(-cn|-us-gov)?:dynamodb:[a-z]{2}-[a-z-]+-[1-9]{1}:[0-9]{12}:table\/[a-zA-Z0-9._-]+$",
mutelist_path,
):
mutelist = get_mutelist_file_from_dynamodb(
mutelist_path, self._session.current_session, self._identity.account
)
else:
mutelist = get_mutelist_file_from_local_file(mutelist_path)
mutelist = validate_mutelist(mutelist)
else:
mutelist = {}
self._mutelist = mutelist
self._mutelist_file_path = mutelist_path
@property
def get_output_mapping(self):
return {

View File

@@ -1256,9 +1256,12 @@
"ap-south-1",
"ap-southeast-1",
"ap-southeast-2",
"ca-central-1",
"eu-central-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"sa-east-1",
"us-east-1",
"us-west-2"
],
@@ -4958,6 +4961,7 @@
"ap-southeast-3",
"ap-southeast-4",
"ca-central-1",
"ca-west-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
@@ -10700,9 +10704,11 @@
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
"sa-east-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2"
],
"aws-cn": [],

View File

@@ -0,0 +1,84 @@
import yaml
from boto3 import Session
from boto3.dynamodb.conditions import Attr
from prowler.lib.logger import logger
def get_mutelist_file_from_s3(mutelist_path: str, aws_session: Session = None):
try:
bucket = mutelist_path.split("/")[2]
key = ("/").join(mutelist_path.split("/")[3:])
s3_client = aws_session.client("s3")
mutelist = yaml.safe_load(s3_client.get_object(Bucket=bucket, Key=key)["Body"])[
"Mutelist"
]
return mutelist
except Exception as error:
logger.error(
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
)
return {}
def get_mutelist_file_from_lambda(mutelist_path: str, aws_session: Session = None):
try:
lambda_region = mutelist_path.split(":")[3]
lambda_client = aws_session.client("lambda", region_name=lambda_region)
lambda_response = lambda_client.invoke(
FunctionName=mutelist_path, InvocationType="RequestResponse"
)
lambda_payload = lambda_response["Payload"].read()
mutelist = yaml.safe_load(lambda_payload)["Mutelist"]
return mutelist
except Exception as error:
logger.error(
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
)
return {}
def get_mutelist_file_from_dynamodb(
mutelist_path: str, aws_session: Session = None, aws_account: str = None
):
try:
mutelist = {"Accounts": {}}
table_region = mutelist_path.split(":")[3]
dynamodb_resource = aws_session.resource("dynamodb", region_name=table_region)
dynamo_table = dynamodb_resource.Table(mutelist_path.split("/")[1])
response = dynamo_table.scan(
FilterExpression=Attr("Accounts").is_in([aws_account, "*"])
)
dynamodb_items = response["Items"]
# Paginate through all results
while "LastEvaluatedKey" in dynamodb_items:
response = dynamo_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"],
FilterExpression=Attr("Accounts").is_in([aws_account, "*"]),
)
dynamodb_items.update(response["Items"])
for item in dynamodb_items:
# Create mutelist for every item
mutelist["Accounts"][item["Accounts"]] = {
"Checks": {
item["Checks"]: {
"Regions": item["Regions"],
"Resources": item["Resources"],
}
}
}
if "Tags" in item:
mutelist["Accounts"][item["Accounts"]]["Checks"][item["Checks"]][
"Tags"
] = item["Tags"]
if "Exceptions" in item:
mutelist["Accounts"][item["Accounts"]]["Checks"][item["Checks"]][
"Exceptions"
] = item["Exceptions"]
return mutelist
except Exception as error:
logger.error(
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
)
return {}

View File

@@ -1,33 +1,36 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.acm.acm_client import acm_client
DAYS_TO_EXPIRE_THRESHOLD = 7
class acm_certificates_expiration_check(Check):
def execute(self):
findings = []
for certificate in acm_client.certificates:
report = Check_Report_AWS(self.metadata())
report.region = certificate.region
if certificate.expiration_days > DAYS_TO_EXPIRE_THRESHOLD:
report.status = "PASS"
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} expires in {certificate.expiration_days} days."
report.resource_id = certificate.id
report.resource_details = certificate.name
report.resource_arn = certificate.arn
report.resource_tags = certificate.tags
else:
report.status = "FAIL"
if certificate.expiration_days < 0:
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} has expired ({abs(certificate.expiration_days)} days ago)."
if certificate.in_use or acm_client.provider.scan_unused_services:
report = Check_Report_AWS(self.metadata())
report.region = certificate.region
if certificate.expiration_days > acm_client.audit_config.get(
"days_to_expire_threshold", 7
):
report.status = "PASS"
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} expires in {certificate.expiration_days} days."
report.resource_id = certificate.id
report.resource_details = certificate.name
report.resource_arn = certificate.arn
report.resource_tags = certificate.tags
else:
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} is about to expire in {certificate.expiration_days} days."
report.status = "FAIL"
if certificate.expiration_days < 0:
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} has expired ({abs(certificate.expiration_days)} days ago)."
report.check_metadata.Severity = "high"
else:
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} is about to expire in {certificate.expiration_days} days."
report.check_metadata.Severity = "medium"
report.resource_id = certificate.id
report.resource_details = certificate.name
report.resource_arn = certificate.arn
report.resource_tags = certificate.tags
report.resource_id = certificate.id
report.resource_details = certificate.name
report.resource_arn = certificate.arn
report.resource_tags = certificate.tags
findings.append(report)
findings.append(report)
return findings

View File

@@ -50,6 +50,7 @@ class ACM(AWSService):
id=certificate["CertificateArn"].split("/")[-1],
type=certificate["Type"],
expiration_days=certificate_expiration_time,
in_use=certificate.get("InUse", False),
transparency_logging=False,
region=regional_client.region,
)
@@ -99,5 +100,6 @@ class Certificate(BaseModel):
type: str
tags: Optional[list] = []
expiration_days: int
in_use: bool
transparency_logging: Optional[bool]
region: str

View File

@@ -10,7 +10,7 @@
"ResourceType": "AwsCloudFormationStack",
"Description": "Find secrets in CloudFormation outputs",
"Risk": "Secrets hardcoded into CloudFormation outputs can be used by malware and bad actors to gain lateral access to other services.",
"RelatedUrl": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html",
"RelatedUrl": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/outputs-section-structure.html",
"Remediation": {
"Code": {
"CLI": "https://docs.prowler.com/checks/aws/secrets-policies/bc_aws_secrets_2#cli-command",

View File

@@ -14,7 +14,7 @@ class cloudtrail_bucket_requires_mfa_delete(Check):
trail_bucket_is_in_account = False
trail_bucket = trail.s3_bucket
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.region = trail.home_region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags

View File

@@ -15,7 +15,7 @@ class cloudtrail_cloudwatch_logging_enabled(Check):
for trail in cloudtrail_client.trails.values():
if trail.name:
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.region = trail.home_region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags

View File

@@ -11,7 +11,7 @@ class cloudtrail_insights_exist(Check):
for trail in cloudtrail_client.trails.values():
if trail.is_logging:
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.region = trail.home_region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags

View File

@@ -11,7 +11,7 @@ class cloudtrail_kms_encryption_enabled(Check):
for trail in cloudtrail_client.trails.values():
if trail.name:
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.region = trail.home_region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags

View File

@@ -11,7 +11,7 @@ class cloudtrail_log_file_validation_enabled(Check):
for trail in cloudtrail_client.trails.values():
if trail.name:
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.region = trail.home_region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags

View File

@@ -14,7 +14,7 @@ class cloudtrail_logs_s3_bucket_access_logging_enabled(Check):
trail_bucket_is_in_account = False
trail_bucket = trail.s3_bucket
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.region = trail.home_region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags

View File

@@ -14,7 +14,7 @@ class cloudtrail_logs_s3_bucket_is_not_publicly_accessible(Check):
trail_bucket_is_in_account = False
trail_bucket = trail.s3_bucket
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.region = trail.home_region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags

View File

@@ -8,48 +8,55 @@ class cloudtrail_multi_region_enabled_logging_management_events(Check):
def execute(self):
findings = []
if cloudtrail_client.trails is not None:
report = Check_Report_AWS(self.metadata())
report.status = "FAIL"
report.status_extended = "No trail found with multi-region enabled and logging management events."
report.region = cloudtrail_client.region
report.resource_id = cloudtrail_client.audited_account
report.resource_arn = cloudtrail_client.trail_arn_template
for trail in cloudtrail_client.trails.values():
if trail.is_logging:
if trail.is_multiregion:
for event in trail.data_events:
# Classic event selectors
if not event.is_advanced:
# Check if trail has IncludeManagementEvents and ReadWriteType is All
if (
event.event_selector["ReadWriteType"] == "All"
and event.event_selector["IncludeManagementEvents"]
):
report.region = trail.region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags
report.status = "PASS"
report.status_extended = f"Trail {trail.name} from home region {trail.home_region} is multi-region, is logging and have management events enabled."
# Advanced event selectors
elif event.is_advanced:
if event.event_selector.get(
"Name"
) == "Management events selector" and all(
[
field["Field"] != "readOnly"
for field in event.event_selector[
"FieldSelectors"
for region in cloudtrail_client.regional_clients.keys():
report = Check_Report_AWS(self.metadata())
report.status = "FAIL"
report.status_extended = "No CloudTrail trails enabled and logging management events were found."
report.region = region
report.resource_id = cloudtrail_client.audited_account
report.resource_arn = cloudtrail_client.trail_arn_template
trail_is_logging_management_events = False
for trail in cloudtrail_client.trails.values():
if trail.region == region or trail.is_multiregion:
if trail.is_logging:
for event in trail.data_events:
# Classic event selectors
if not event.is_advanced:
# Check if trail has IncludeManagementEvents and ReadWriteType is All
if (
event.event_selector["ReadWriteType"] == "All"
and event.event_selector[
"IncludeManagementEvents"
]
]
):
report.region = trail.region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags
report.status = "PASS"
report.status_extended = f"Trail {trail.name} from home region {trail.home_region} is multi-region, is logging and have management events enabled."
findings.append(report)
):
trail_is_logging_management_events = True
# Advanced event selectors
elif event.is_advanced:
if event.event_selector.get(
"Name"
) == "Management events selector" and all(
[
field["Field"] != "readOnly"
for field in event.event_selector[
"FieldSelectors"
]
]
):
trail_is_logging_management_events = True
if trail_is_logging_management_events:
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags
report.status = "PASS"
if trail.is_multiregion:
report.status_extended = f"Trail {trail.name} from home region {trail.home_region} is multi-region, is logging and have management events enabled."
else:
report.status_extended = f"Trail {trail.name} in region {trail.home_region} is logging and have management events enabled."
# Since there exists a logging trail in that region there is no point in checking the remaining trails
# Store the finding and exit the loop
findings.append(report)
break
if report.status == "FAIL":
findings.append(report)
return findings

View File

@@ -28,7 +28,7 @@ class cloudtrail_s3_dataevents_read_enabled(Check):
in resource["Values"]
):
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.region = trail.home_region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags
@@ -45,7 +45,7 @@ class cloudtrail_s3_dataevents_read_enabled(Check):
and field_selector["Equals"][0] == "AWS::S3::Object"
):
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.region = trail.home_region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags

View File

@@ -28,7 +28,7 @@ class cloudtrail_s3_dataevents_write_enabled(Check):
in resource["Values"]
):
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.region = trail.home_region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags
@@ -45,7 +45,7 @@ class cloudtrail_s3_dataevents_write_enabled(Check):
and field_selector["Equals"][0] == "AWS::S3::Object"
):
report = Check_Report_AWS(self.metadata())
report.region = trail.region
report.region = trail.home_region
report.resource_id = trail.name
report.resource_arn = trail.arn
report.resource_tags = trail.tags

View File

@@ -36,6 +36,10 @@ class Cloudtrail(AWSService):
describe_trails = regional_client.describe_trails()["trailList"]
trails_count = 0
for trail in describe_trails:
# If a multi region trail was already retrieved in another region
if self.trails and trail["TrailARN"] in self.trails.keys():
continue
if not self.audit_resources or (
is_resource_filtered(trail["TrailARN"], self.audit_resources)
):
@@ -208,16 +212,21 @@ class Cloudtrail(AWSService):
logger.info("CloudTrail - List Tags...")
try:
for trail in self.trails.values():
# Check if trails are in this account and region
if (
trail.region == trail.home_region
and self.audited_account in trail.arn
):
regional_client = self.regional_clients[trail.region]
response = regional_client.list_tags(ResourceIdList=[trail.arn])[
"ResourceTagList"
][0]
trail.tags = response.get("TagsList")
try:
# Check if trails are in this account and region
if (
trail.region == trail.home_region
and self.audited_account in trail.arn
):
regional_client = self.regional_clients[trail.region]
response = regional_client.list_tags(
ResourceIdList=[trail.arn]
)["ResourceTagList"][0]
trail.tags = response.get("TagsList")
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -234,6 +243,7 @@ class Trail(BaseModel):
is_multiregion: bool = None
home_region: str = None
arn: str = None
# Region holds the region where the trail is audited
region: str
is_logging: bool = None
log_file_validation_enabled: bool = None

View File

@@ -1,5 +1,6 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.ec2.ec2_service import NetworkInterface
from prowler.providers.aws.services.ec2.lib.security_groups import check_security_group
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
from prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_all_ports import (
@@ -35,11 +36,68 @@ class ec2_securitygroup_allow_ingress_from_internet_to_any_port(Check):
if check_security_group(
ingress_rule, "-1", ports=None, any_address=True
):
report.status = "FAIL"
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has at least one port open to the Internet."
self.check_enis(
report=report,
security_group_name=security_group.name,
security_group_id=security_group.id,
enis=security_group.network_interfaces,
)
if report.status == "FAIL":
break # no need to check other ingress rules because at least one failed already
else:
report.status_extended = f"Security group {security_group.name} ({security_group.id}) has all ports open to the Internet and therefore was not checked against a specific port."
findings.append(report)
return findings
def check_enis(
self,
report,
security_group_name: str,
security_group_id: str,
enis: [NetworkInterface],
):
report.status_extended = f"Security group {security_group_name} ({security_group_id}) has at least one port open to the Internet but is exclusively not attached to any network interface."
for eni in enis:
if self.is_allowed_eni_type(eni_type=eni.type):
report.status = "PASS"
report.status_extended = f"Security group {security_group_name} ({security_group_id}) has at least one port open to the Internet but is exclusively attached to an allowed network interface type ({eni.type})."
continue
eni_owner = self.get_eni_owner(eni=eni)
if self.is_allowed_eni_owner(eni_owner=eni_owner):
report.status = "PASS"
report.status_extended = f"Security group {security_group_name} ({security_group_id}) has at least one port open to the Internet but is exclusively attached to an allowed network interface instance owner ({eni_owner})."
continue
else:
report.status = "FAIL"
report.status_extended = f"Security group {security_group_name} ({security_group_id}) has at least one port open to the Internet and neither its network interface type ({eni.type}) nor its network interface instance owner ({eni_owner}) are part of the allowed network interfaces."
break # no need to check other network interfaces because at least one failed already
@staticmethod
def is_allowed_eni_type(eni_type: str) -> bool:
return eni_type in ec2_client.audit_config.get(
"ec2_allowed_interface_types", []
)
@staticmethod
def get_eni_owner(eni) -> str:
eni_owner = ""
if (
hasattr(eni, "attachment")
and isinstance(eni.attachment, dict)
and "InstanceOwnerId" in eni.attachment
):
eni_owner = eni.attachment["InstanceOwnerId"]
return eni_owner
@staticmethod
def is_allowed_eni_owner(eni_owner: str) -> bool:
return eni_owner in ec2_client.audit_config.get(
"ec2_allowed_instance_owners", []
)

View File

@@ -1,19 +1,27 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
class ec2_securitygroup_default_restrict_traffic(Check):
def execute(self):
findings = []
for security_group in ec2_client.security_groups:
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
# Find default security group
if security_group.name == "default":
# Check if ignoring flag is set and if the VPC and the default SG are in used
if security_group.name == "default" and (
ec2_client.provider.scan_unused_services
or (
security_group.vpc_id in vpc_client.vpcs
and vpc_client.vpcs[security_group.vpc_id].in_use
and len(security_group.network_interfaces) > 0
)
):
report = Check_Report_AWS(self.metadata())
report.region = security_group.region
report.resource_details = security_group.name
report.resource_id = security_group.id
report.resource_arn = security_group.arn
report.resource_tags = security_group.tags
report.status = "FAIL"
report.status_extended = (
f"Default Security Group ({security_group.id}) rules allow traffic."

View File

@@ -115,7 +115,6 @@ class EC2(AWSService):
is_resource_filtered(arn, self.audit_resources)
):
associated_sgs = []
# check if sg has public access to all ports
for ingress_rule in sg["IpPermissions"]:
# check associated security groups
for sg_group in ingress_rule.get("UserIdGroupPairs", []):

View File

@@ -101,5 +101,5 @@ class Cluster(BaseModel):
arn: str
region: str
cache_subnet_group_id: Optional[str]
subnets: Optional[list]
subnets: list = []
tags: Optional[list]

View File

@@ -1,5 +1,6 @@
from typing import Optional
from botocore.exceptions import ClientError
from pydantic import BaseModel
from prowler.lib.logger import logger
@@ -72,6 +73,18 @@ class Glue(AWSService):
region=regional_client.region,
)
)
except ClientError as error:
# Check if the operation is not supported in the region
if error.response["Error"]["Message"].startswith(
"Operation is not supported"
):
logger.warning(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"

View File

@@ -8,11 +8,11 @@
"ServiceName": "iam",
"SubServiceName": "",
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
"Severity": "critical",
"Severity": "high",
"ResourceType": "AwsIamPolicy",
"Description": "Ensure that no custom IAM policies exist which allow permissive role assumption (e.g. sts:AssumeRole on *)",
"Risk": "If not restricted unintended access could happen.",
"RelatedUrl": "",
"RelatedUrl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_permissions-to-switch.html#roles-usingrole-createpolicy",
"Remediation": {
"Code": {
"CLI": "",

View File

@@ -26,6 +26,7 @@ class RDS(AWSService):
self.__threading_call__(self.__describe_db_snapshots__)
self.__threading_call__(self.__describe_db_snapshot_attributes__)
self.__threading_call__(self.__describe_db_clusters__)
self.__threading_call__(self.__describe_db_cluster_parameters__)
self.__threading_call__(self.__describe_db_cluster_snapshots__)
self.__threading_call__(self.__describe_db_cluster_snapshot_attributes__)
self.__threading_call__(self.__describe_db_engine_versions__)
@@ -198,57 +199,102 @@ class RDS(AWSService):
"describe_db_clusters"
)
for page in describe_db_clusters_paginator.paginate():
for cluster in page["DBClusters"]:
db_cluster_arn = f"arn:{self.audited_partition}:rds:{regional_client.region}:{self.audited_account}:cluster:{cluster['DBClusterIdentifier']}"
if not self.audit_resources or (
is_resource_filtered(db_cluster_arn, self.audit_resources)
):
if cluster["Engine"] != "docdb":
describe_db_parameters_paginator = (
regional_client.get_paginator("describe_db_parameters")
)
db_cluster = DBCluster(
id=cluster["DBClusterIdentifier"],
arn=db_cluster_arn,
endpoint=cluster.get("Endpoint"),
engine=cluster["Engine"],
status=cluster["Status"],
public=cluster.get("PubliclyAccessible", False),
encrypted=cluster["StorageEncrypted"],
auto_minor_version_upgrade=cluster.get(
"AutoMinorVersionUpgrade", False
),
backup_retention_period=cluster.get(
"BackupRetentionPeriod"
),
cloudwatch_logs=cluster.get(
"EnabledCloudwatchLogsExports"
),
deletion_protection=cluster["DeletionProtection"],
parameter_group=cluster["DBClusterParameterGroup"],
multi_az=cluster["MultiAZ"],
region=regional_client.region,
tags=cluster.get("TagList", []),
)
for page in describe_db_parameters_paginator.paginate(
DBParameterGroupName=cluster["DBClusterParameterGroup"]
try:
for cluster in page["DBClusters"]:
try:
db_cluster_arn = f"arn:{self.audited_partition}:rds:{regional_client.region}:{self.audited_account}:cluster:{cluster['DBClusterIdentifier']}"
if not self.audit_resources or (
is_resource_filtered(
db_cluster_arn, self.audit_resources
)
):
for parameter in page["Parameters"]:
if cluster["Engine"] != "docdb":
db_cluster = DBCluster(
id=cluster["DBClusterIdentifier"],
arn=db_cluster_arn,
endpoint=cluster.get("Endpoint"),
engine=cluster["Engine"],
status=cluster["Status"],
public=cluster.get("PubliclyAccessible", False),
encrypted=cluster["StorageEncrypted"],
auto_minor_version_upgrade=cluster.get(
"AutoMinorVersionUpgrade", False
),
backup_retention_period=cluster.get(
"BackupRetentionPeriod"
),
cloudwatch_logs=cluster.get(
"EnabledCloudwatchLogsExports"
),
deletion_protection=cluster[
"DeletionProtection"
],
parameter_group=cluster[
"DBClusterParameterGroup"
],
multi_az=cluster["MultiAZ"],
region=regional_client.region,
tags=cluster.get("TagList", []),
)
# We must use a unique value as the dict key to have unique keys
self.db_clusters[db_cluster_arn] = db_cluster
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def __describe_db_cluster_parameters__(self, regional_client):
logger.info("RDS - Describe DB Cluster Parameters...")
try:
for cluster in self.db_clusters.values():
if cluster.region == regional_client.region:
try:
describe_db_cluster_parameters_paginator = (
regional_client.get_paginator(
"describe_db_cluster_parameters"
)
)
for page in describe_db_cluster_parameters_paginator.paginate(
DBClusterParameterGroupName=cluster.parameter_group
):
for parameter in page["Parameters"]:
if (
"ParameterValue" in parameter
and "ParameterName" in parameter
):
if parameter["ParameterName"] == "rds.force_ssl":
db_cluster.force_ssl = parameter[
"ParameterValue"
]
cluster.force_ssl = parameter["ParameterValue"]
if (
parameter["ParameterName"]
== "require_secure_transport"
):
db_cluster.require_secure_transport = parameter[
cluster.require_secure_transport = parameter[
"ParameterValue"
]
# We must use a unique value as the dict key to have unique keys
self.db_clusters[db_cluster_arn] = db_cluster
except ClientError as error:
if (
error.response["Error"]["Code"]
== "DBClusterParameterGroupName"
):
logger.warning(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -392,8 +438,8 @@ class DBCluster(BaseModel):
auto_minor_version_upgrade: bool
multi_az: bool
parameter_group: str
force_ssl: Optional[bool]
require_secure_transport: Optional[str]
force_ssl: str = "0"
require_secure_transport: str = "OFF"
region: str
tags: Optional[list] = []

View File

@@ -84,10 +84,23 @@ class S3(AWSService):
logger.warning(
f"{bucket['Name']} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{bucket['Name']} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{bucket['Name']} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except ClientError as error:
if error.response["Error"]["Code"] == "NotSignedUp":
logger.warning(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"

View File

@@ -25,7 +25,7 @@ class TrustedAdvisor(AWSService):
self.client = self.session.client(self.service, region_name=support_region)
self.client.region = support_region
self.__describe_services__()
if self.premium_support.enabled:
if getattr(self.premium_support, "enabled", False):
self.__describe_trusted_advisor_checks__()
self.__describe_trusted_advisor_check_result__()

View File

@@ -28,9 +28,9 @@ class VPC(AWSService):
self.__describe_flow_logs__()
self.__describe_peering_route_tables__()
self.__describe_vpc_endpoint_service_permissions__()
self.__describe_network_interfaces__()
self.vpc_subnets = {}
self.__threading_call__(self.__describe_vpc_subnets__)
self.__describe_network_interfaces__()
def __describe_vpcs__(self, regional_client):
logger.info("VPC - Describing VPCs...")
@@ -192,6 +192,19 @@ class VPC(AWSService):
)["NetworkInterfaces"]
if enis:
vpc.in_use = True
for subnet in vpc.subnets:
enis = regional_client.describe_network_interfaces(
Filters=[
{
"Name": "subnet-id",
"Values": [
subnet.id,
],
},
]
)["NetworkInterfaces"]
if enis:
subnet.in_use = True
except Exception as error:
logger.error(
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
@@ -395,6 +408,7 @@ class VpcSubnet(BaseModel):
cidr_block: Optional[str]
availability_zone: str
public: bool
in_use: bool = False
nat_gateway: bool
region: str
mapPublicIpOnLaunch: bool

View File

@@ -6,28 +6,29 @@ class vpc_subnet_different_az(Check):
def execute(self):
findings = []
for vpc in vpc_client.vpcs.values():
report = Check_Report_AWS(self.metadata())
report.region = vpc.region
report.resource_tags = vpc.tags
report.status = "FAIL"
report.status_extended = (
f"VPC {vpc.name if vpc.name else vpc.id} has no subnets."
)
report.resource_id = vpc.id
report.resource_arn = vpc.arn
if vpc.subnets:
availability_zone = None
for subnet in vpc.subnets:
if (
availability_zone
and subnet.availability_zone != availability_zone
):
report.status = "PASS"
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has subnets in more than one availability zone."
break
availability_zone = subnet.availability_zone
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has only subnets in {availability_zone}."
if vpc_client.provider.scan_unused_services or vpc.in_use:
report = Check_Report_AWS(self.metadata())
report.region = vpc.region
report.resource_tags = vpc.tags
report.status = "FAIL"
report.status_extended = (
f"VPC {vpc.name if vpc.name else vpc.id} has no subnets."
)
report.resource_id = vpc.id
report.resource_arn = vpc.arn
if vpc.subnets:
availability_zone = None
for subnet in vpc.subnets:
if (
availability_zone
and subnet.availability_zone != availability_zone
):
report.status = "PASS"
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has subnets in more than one availability zone."
break
availability_zone = subnet.availability_zone
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has only subnets in {availability_zone}."
findings.append(report)
findings.append(report)
return findings

View File

@@ -7,17 +7,19 @@ class vpc_subnet_no_public_ip_by_default(Check):
findings = []
for vpc in vpc_client.vpcs.values():
for subnet in vpc.subnets:
report = Check_Report_AWS(self.metadata())
report.region = subnet.region
report.resource_tags = subnet.tags
report.resource_id = subnet.id
report.resource_arn = subnet.arn
if subnet.mapPublicIpOnLaunch:
report.status = "FAIL"
report.status_extended = f"VPC subnet {subnet.name if subnet.name else subnet.id} assigns public IP by default."
else:
report.status = "PASS"
report.status_extended = f"VPC subnet {subnet.name if subnet.name else subnet.id} does NOT assign public IP by default."
findings.append(report)
# Check if ignoring flag is set and if the VPC Subnet is in use
if vpc_client.provider.scan_unused_services or subnet.in_use:
report = Check_Report_AWS(self.metadata())
report.region = subnet.region
report.resource_tags = subnet.tags
report.resource_id = subnet.id
report.resource_arn = subnet.arn
if subnet.mapPublicIpOnLaunch:
report.status = "FAIL"
report.status_extended = f"VPC subnet {subnet.name if subnet.name else subnet.id} assigns public IP by default."
else:
report.status = "PASS"
report.status_extended = f"VPC subnet {subnet.name if subnet.name else subnet.id} does NOT assign public IP by default."
findings.append(report)
return findings

View File

@@ -6,28 +6,29 @@ class vpc_subnet_separate_private_public(Check):
def execute(self):
findings = []
for vpc in vpc_client.vpcs.values():
report = Check_Report_AWS(self.metadata())
report.region = vpc.region
report.resource_tags = vpc.tags
report.status = "FAIL"
report.status_extended = (
f"VPC {vpc.name if vpc.name else vpc.id} has no subnets."
)
report.resource_id = vpc.id
report.resource_arn = vpc.arn
if vpc.subnets:
public = False
private = False
for subnet in vpc.subnets:
if subnet.public:
public = True
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has only public subnets."
if not subnet.public:
private = True
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has only private subnets."
if public and private:
report.status = "PASS"
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has private and public subnets."
findings.append(report)
if vpc_client.provider.scan_unused_services or vpc.in_use:
report = Check_Report_AWS(self.metadata())
report.region = vpc.region
report.resource_tags = vpc.tags
report.status = "FAIL"
report.status_extended = (
f"VPC {vpc.name if vpc.name else vpc.id} has no subnets."
)
report.resource_id = vpc.id
report.resource_arn = vpc.arn
if vpc.subnets:
public = False
private = False
for subnet in vpc.subnets:
if subnet.public:
public = True
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has only public subnets."
if not subnet.public:
private = True
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has only private subnets."
if public and private:
report.status = "PASS"
report.status_extended = f"VPC {vpc.name if vpc.name else vpc.id} has private and public subnets."
findings.append(report)
return findings

View File

@@ -31,7 +31,9 @@ class Defender(AzureService):
pricings = {}
for subscription_name, client in self.clients.items():
try:
pricings_list = client.pricings.list()
pricings_list = client.pricings.list(
scope_id=f"subscriptions/{self.subscriptions[subscription_name]}"
)
pricings.update({subscription_name: {}})
for pricing in pricings_list.value:
pricings[subscription_name].update(

View File

@@ -7,7 +7,10 @@ from typing import Any, Optional
from prowler.config.config import get_default_mute_file_path
from prowler.lib.logger import logger
from prowler.lib.mutelist.mutelist import parse_mutelist_file
from prowler.lib.mutelist.mutelist import (
get_mutelist_file_from_local_file,
validate_mutelist,
)
providers_path = "prowler.providers"
@@ -178,7 +181,8 @@ class Provider(ABC):
if not mutelist_path:
mutelist_path = get_default_mute_file_path(self.type)
if mutelist_path:
mutelist = parse_mutelist_file(mutelist_path)
mutelist = get_mutelist_file_from_local_file(mutelist_path)
mutelist = validate_mutelist(mutelist)
else:
mutelist = {}

View File

@@ -23,7 +23,7 @@ packages = [
{include = "dashboard"}
]
readme = "README.md"
version = "4.2.0"
version = "4.2.3"
[tool.poetry.dependencies]
alive-progress = "3.1.5"
@@ -46,13 +46,13 @@ azure-mgmt-storage = "21.1.0"
azure-mgmt-subscription = "3.1.1"
azure-mgmt-web = "7.2.0"
azure-storage-blob = "12.20.0"
boto3 = "1.34.109"
botocore = "1.34.113"
boto3 = "1.34.113"
botocore = "1.34.118"
colorama = "0.4.6"
dash = "2.17.0"
dash-bootstrap-components = "1.6.0"
detect-secrets = "1.5.0"
google-api-python-client = "2.130.0"
google-api-python-client = "2.131.0"
google-auth-httplib2 = ">=0.1,<0.3"
jsonschema = "4.22.0"
kubernetes = "29.0.0"
@@ -79,12 +79,12 @@ typer = "0.12.3"
[tool.poetry.group.dev.dependencies]
bandit = "1.7.8"
black = "24.4.2"
coverage = "7.5.2"
coverage = "7.5.3"
docker = "7.1.0"
flake8 = "7.0.0"
freezegun = "1.5.1"
mock = "5.1.0"
moto = {extras = ["all"], version = "5.0.8"}
moto = {extras = ["all"], version = "5.0.9"}
openapi-schema-validator = "0.6.2"
openapi-spec-validator = "0.7.1"
pylint = "3.2.2"
@@ -101,7 +101,7 @@ optional = true
[tool.poetry.group.docs.dependencies]
mkdocs = "1.5.3"
mkdocs-git-revision-date-localized-plugin = "1.2.5"
mkdocs-git-revision-date-localized-plugin = "1.2.6"
mkdocs-material = "9.5.18"
mkdocs-material-extensions = "1.3.1"

View File

@@ -1,3 +1,4 @@
import logging
import os
import pathlib
from unittest import mock
@@ -24,10 +25,12 @@ def mock_prowler_get_latest_release(_, **kwargs):
return response
config_aws = {
old_config_aws = {
"shodan_api_key": None,
"max_security_group_rules": 50,
"max_ec2_instance_age_in_days": 180,
"ec2_allowed_interface_types": ["api_gateway_managed", "vpc_endpoint"],
"ec2_allowed_instance_owners": ["amazon-elb"],
"trusted_account_ids": [],
"log_group_retention_days": 365,
"max_idle_disconnect_timeout_in_seconds": 600,
@@ -59,9 +62,231 @@ config_aws = {
"organizations_enabled_regions": [],
"organizations_trusted_delegated_administrators": [],
"check_rds_instance_replicas": False,
"days_to_expire_threshold": 7,
}
config_aws = {
"mute_non_default_regions": False,
"max_unused_access_keys_days": 45,
"max_console_access_days": 45,
"shodan_api_key": None,
"max_security_group_rules": 50,
"max_ec2_instance_age_in_days": 180,
"ec2_allowed_interface_types": ["api_gateway_managed", "vpc_endpoint"],
"ec2_allowed_instance_owners": ["amazon-elb"],
"trusted_account_ids": [],
"log_group_retention_days": 365,
"max_idle_disconnect_timeout_in_seconds": 600,
"max_disconnect_timeout_in_seconds": 300,
"max_session_duration_seconds": 36000,
"obsolete_lambda_runtimes": [
"java8",
"go1.x",
"provided",
"python3.6",
"python2.7",
"python3.7",
"nodejs4.3",
"nodejs4.3-edge",
"nodejs6.10",
"nodejs",
"nodejs8.10",
"nodejs10.x",
"nodejs12.x",
"nodejs14.x",
"dotnet5.0",
"dotnetcore1.0",
"dotnetcore2.0",
"dotnetcore2.1",
"dotnetcore3.1",
"ruby2.5",
"ruby2.7",
],
"organizations_enabled_regions": [],
"organizations_trusted_delegated_administrators": [],
"ecr_repository_vulnerability_minimum_severity": "MEDIUM",
"verify_premium_support_plans": True,
"threat_detection_privilege_escalation_threshold": 0.1,
"threat_detection_privilege_escalation_minutes": 1440,
"threat_detection_privilege_escalation_actions": [
"AddPermission",
"AddRoleToInstanceProfile",
"AddUserToGroup",
"AssociateAccessPolicy",
"AssumeRole",
"AttachGroupPolicy",
"AttachRolePolicy",
"AttachUserPolicy",
"ChangePassword",
"CreateAccessEntry",
"CreateAccessKey",
"CreateDevEndpoint",
"CreateEventSourceMapping",
"CreateFunction",
"CreateGroup",
"CreateJob",
"CreateKeyPair",
"CreateLoginProfile",
"CreatePipeline",
"CreatePolicyVersion",
"CreateRole",
"CreateStack",
"DeleteRolePermissionsBoundary",
"DeleteRolePolicy",
"DeleteUserPermissionsBoundary",
"DeleteUserPolicy",
"DetachRolePolicy",
"DetachUserPolicy",
"GetCredentialsForIdentity",
"GetId",
"GetPolicyVersion",
"GetUserPolicy",
"Invoke",
"ModifyInstanceAttribute",
"PassRole",
"PutGroupPolicy",
"PutPipelineDefinition",
"PutRolePermissionsBoundary",
"PutRolePolicy",
"PutUserPermissionsBoundary",
"PutUserPolicy",
"ReplaceIamInstanceProfileAssociation",
"RunInstances",
"SetDefaultPolicyVersion",
"UpdateAccessKey",
"UpdateAssumeRolePolicy",
"UpdateDevEndpoint",
"UpdateEventSourceMapping",
"UpdateFunctionCode",
"UpdateJob",
"UpdateLoginProfile",
],
"threat_detection_enumeration_threshold": 0.1,
"threat_detection_enumeration_minutes": 1440,
"threat_detection_enumeration_actions": [
"DescribeAccessEntry",
"DescribeAccountAttributes",
"DescribeAvailabilityZones",
"DescribeBundleTasks",
"DescribeCarrierGateways",
"DescribeClientVpnRoutes",
"DescribeCluster",
"DescribeDhcpOptions",
"DescribeFlowLogs",
"DescribeImages",
"DescribeInstanceAttribute",
"DescribeInstanceInformation",
"DescribeInstanceTypes",
"DescribeInstances",
"DescribeInstances",
"DescribeKeyPairs",
"DescribeLogGroups",
"DescribeLogStreams",
"DescribeOrganization",
"DescribeRegions",
"DescribeSecurityGroups",
"DescribeSnapshotAttribute",
"DescribeSnapshotTierStatus",
"DescribeSubscriptionFilters",
"DescribeTransitGatewayMulticastDomains",
"DescribeVolumes",
"DescribeVolumesModifications",
"DescribeVpcEndpointConnectionNotifications",
"DescribeVpcs",
"GetAccount",
"GetAccountAuthorizationDetails",
"GetAccountSendingEnabled",
"GetBucketAcl",
"GetBucketLogging",
"GetBucketPolicy",
"GetBucketReplication",
"GetBucketVersioning",
"GetCallerIdentity",
"GetCertificate",
"GetConsoleScreenshot",
"GetCostAndUsage",
"GetDetector",
"GetEbsDefaultKmsKeyId",
"GetEbsEncryptionByDefault",
"GetFindings",
"GetFlowLogsIntegrationTemplate",
"GetIdentityVerificationAttributes",
"GetInstances",
"GetIntrospectionSchema",
"GetLaunchTemplateData",
"GetLaunchTemplateData",
"GetLogRecord",
"GetParameters",
"GetPolicyVersion",
"GetPublicAccessBlock",
"GetQueryResults",
"GetRegions",
"GetSMSAttributes",
"GetSMSSandboxAccountStatus",
"GetSendQuota",
"GetTransitGatewayRouteTableAssociations",
"GetUserPolicy",
"HeadObject",
"ListAccessKeys",
"ListAccounts",
"ListAllMyBuckets",
"ListAssociatedAccessPolicies",
"ListAttachedUserPolicies",
"ListClusters",
"ListDetectors",
"ListDomains",
"ListFindings",
"ListHostedZones",
"ListIPSets",
"ListIdentities",
"ListInstanceProfiles",
"ListObjects",
"ListOrganizationalUnitsForParent",
"ListOriginationNumbers",
"ListPolicyVersions",
"ListRoles",
"ListRoles",
"ListRules",
"ListServiceQuotas",
"ListSubscriptions",
"ListTargetsByRule",
"ListTopics",
"ListUsers",
"LookupEvents",
"Search",
],
"check_rds_instance_replicas": False,
"days_to_expire_threshold": 7,
}
config_azure = {"shodan_api_key": None}
config_azure = {
"shodan_api_key": None,
"php_latest_version": "8.2",
"python_latest_version": "3.12",
"java_latest_version": "17",
}
config_gcp = {"shodan_api_key": None}
config_kubernetes = {
"audit_log_maxbackup": 10,
"audit_log_maxsize": 100,
"audit_log_maxage": 30,
"apiserver_strong_ciphers": [
"TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256",
],
"kubelet_strong_ciphers": [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_128_GCM_SHA256",
],
}
class Test_Config:
@@ -126,7 +351,7 @@ class Test_Config:
path = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
config_test_file = f"{path}/fixtures/config.yaml"
provider = "aws"
print(load_and_validate_config_file(provider, config_test_file))
assert load_and_validate_config_file(provider, config_test_file) == config_aws
def test_load_and_validate_config_file_gcp(self):
@@ -134,14 +359,17 @@ class Test_Config:
config_test_file = f"{path}/fixtures/config.yaml"
provider = "gcp"
assert load_and_validate_config_file(provider, config_test_file) is None
assert load_and_validate_config_file(provider, config_test_file) == config_gcp
def test_load_and_validate_config_file_kubernetes(self):
path = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
config_test_file = f"{path}/fixtures/config.yaml"
provider = "kubernetes"
assert load_and_validate_config_file(provider, config_test_file) is None
print(load_and_validate_config_file(provider, config_test_file))
assert (
load_and_validate_config_file(provider, config_test_file)
== config_kubernetes
)
def test_load_and_validate_config_file_azure(self):
path = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
@@ -153,18 +381,22 @@ class Test_Config:
def test_load_and_validate_config_file_old_format(self):
path = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
config_test_file = f"{path}/fixtures/config_old.yaml"
assert load_and_validate_config_file("aws", config_test_file) == config_aws
print(load_and_validate_config_file("aws", config_test_file))
assert load_and_validate_config_file("aws", config_test_file) == old_config_aws
assert load_and_validate_config_file("gcp", config_test_file) == {}
assert load_and_validate_config_file("azure", config_test_file) == {}
assert load_and_validate_config_file("kubernetes", config_test_file) == {}
def test_load_and_validate_config_file_invalid_config_file_path(self):
def test_load_and_validate_config_file_invalid_config_file_path(self, caplog):
provider = "aws"
config_file_path = "invalid/path/to/fixer_config.yaml"
with pytest.raises(SystemExit):
load_and_validate_config_file(provider, config_file_path)
with caplog.at_level(logging.ERROR):
result = load_and_validate_config_file(provider, config_file_path)
assert "FileNotFoundError" in caplog.text
assert result == {}
assert pytest is not None
def test_load_and_validate_fixer_config_aws(self):
path = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
@@ -194,9 +426,13 @@ class Test_Config:
assert load_and_validate_fixer_config_file(provider, config_test_file) == {}
def test_load_and_validate_fixer_config_invalid_fixer_config_path(self):
def test_load_and_validate_fixer_config_invalid_fixer_config_path(self, caplog):
provider = "aws"
fixer_config_path = "invalid/path/to/fixer_config.yaml"
with pytest.raises(SystemExit):
load_and_validate_fixer_config_file(provider, fixer_config_path)
with caplog.at_level(logging.ERROR):
result = load_and_validate_fixer_config_file(provider, fixer_config_path)
assert "FileNotFoundError" in caplog.text
assert result == {}
assert pytest is not None

View File

@@ -1,14 +1,46 @@
# TODO: UPDATE YAML
# AWS Configuration
aws:
# AWS Global Configuration
# aws.mute_non_default_regions --> Set to True to muted failed findings in non-default regions for AccessAnalyzer, GuardDuty, SecurityHub, DRS and Config
mute_non_default_regions: False
# If you want to mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w mutelist.yaml`:
# Mutelist:
# Accounts:
# "*":
# Checks:
# "*":
# Regions:
# - "ap-southeast-1"
# - "ap-southeast-2"
# Resources:
# - "*"
# AWS IAM Configuration
# aws.iam_user_accesskey_unused --> CIS recommends 45 days
max_unused_access_keys_days: 45
# aws.iam_user_console_access_unused --> CIS recommends 45 days
max_console_access_days: 45
# AWS EC2 Configuration
# aws.ec2_elastic_ip_shodan
# TODO: create common config
shodan_api_key: null
# aws.ec2_securitygroup_with_many_ingress_egress_rules --> by default is 50 rules
max_security_group_rules: 50
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
max_ec2_instance_age_in_days: 180
# aws.ec2_securitygroup_allow_ingress_from_internet_to_any_port
# allowed network interface types for security groups open to the Internet
ec2_allowed_interface_types:
[
"api_gateway_managed",
"vpc_endpoint",
]
# allowed network interface owners for security groups open to the Internet
ec2_allowed_instance_owners:
[
"amazon-elb"
]
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
@@ -56,27 +88,237 @@ aws:
]
# AWS Organizations
# organizations_scp_check_deny_regions
# organizations_enabled_regions: [
# 'eu-central-1',
# 'eu-west-1',
# aws.organizations_scp_check_deny_regions
# aws.organizations_enabled_regions: [
# "eu-central-1",
# "eu-west-1",
# "us-east-1"
# ]
organizations_enabled_regions: []
organizations_trusted_delegated_administrators: []
# AWS ECR
# aws.ecr_repositories_scan_vulnerabilities_in_latest_image
# CRITICAL
# HIGH
# MEDIUM
ecr_repository_vulnerability_minimum_severity: "MEDIUM"
# AWS Trusted Advisor
# aws.trustedadvisor_premium_support_plan_subscribed
verify_premium_support_plans: True
# AWS CloudTrail Configuration
# aws.cloudtrail_threat_detection_privilege_escalation
threat_detection_privilege_escalation_threshold: 0.1 # Percentage of actions found to decide if it is an privilege_escalation attack event, by default is 0.1 (10%)
threat_detection_privilege_escalation_minutes: 1440 # Past minutes to search from now for privilege_escalation attacks, by default is 1440 minutes (24 hours)
threat_detection_privilege_escalation_actions:
[
"AddPermission",
"AddRoleToInstanceProfile",
"AddUserToGroup",
"AssociateAccessPolicy",
"AssumeRole",
"AttachGroupPolicy",
"AttachRolePolicy",
"AttachUserPolicy",
"ChangePassword",
"CreateAccessEntry",
"CreateAccessKey",
"CreateDevEndpoint",
"CreateEventSourceMapping",
"CreateFunction",
"CreateGroup",
"CreateJob",
"CreateKeyPair",
"CreateLoginProfile",
"CreatePipeline",
"CreatePolicyVersion",
"CreateRole",
"CreateStack",
"DeleteRolePermissionsBoundary",
"DeleteRolePolicy",
"DeleteUserPermissionsBoundary",
"DeleteUserPolicy",
"DetachRolePolicy",
"DetachUserPolicy",
"GetCredentialsForIdentity",
"GetId",
"GetPolicyVersion",
"GetUserPolicy",
"Invoke",
"ModifyInstanceAttribute",
"PassRole",
"PutGroupPolicy",
"PutPipelineDefinition",
"PutRolePermissionsBoundary",
"PutRolePolicy",
"PutUserPermissionsBoundary",
"PutUserPolicy",
"ReplaceIamInstanceProfileAssociation",
"RunInstances",
"SetDefaultPolicyVersion",
"UpdateAccessKey",
"UpdateAssumeRolePolicy",
"UpdateDevEndpoint",
"UpdateEventSourceMapping",
"UpdateFunctionCode",
"UpdateJob",
"UpdateLoginProfile",
]
# aws.cloudtrail_threat_detection_enumeration
threat_detection_enumeration_threshold: 0.1 # Percentage of actions found to decide if it is an enumeration attack event, by default is 0.1 (10%)
threat_detection_enumeration_minutes: 1440 # Past minutes to search from now for enumeration attacks, by default is 1440 minutes (24 hours)
threat_detection_enumeration_actions:
[
"DescribeAccessEntry",
"DescribeAccountAttributes",
"DescribeAvailabilityZones",
"DescribeBundleTasks",
"DescribeCarrierGateways",
"DescribeClientVpnRoutes",
"DescribeCluster",
"DescribeDhcpOptions",
"DescribeFlowLogs",
"DescribeImages",
"DescribeInstanceAttribute",
"DescribeInstanceInformation",
"DescribeInstanceTypes",
"DescribeInstances",
"DescribeInstances",
"DescribeKeyPairs",
"DescribeLogGroups",
"DescribeLogStreams",
"DescribeOrganization",
"DescribeRegions",
"DescribeSecurityGroups",
"DescribeSnapshotAttribute",
"DescribeSnapshotTierStatus",
"DescribeSubscriptionFilters",
"DescribeTransitGatewayMulticastDomains",
"DescribeVolumes",
"DescribeVolumesModifications",
"DescribeVpcEndpointConnectionNotifications",
"DescribeVpcs",
"GetAccount",
"GetAccountAuthorizationDetails",
"GetAccountSendingEnabled",
"GetBucketAcl",
"GetBucketLogging",
"GetBucketPolicy",
"GetBucketReplication",
"GetBucketVersioning",
"GetCallerIdentity",
"GetCertificate",
"GetConsoleScreenshot",
"GetCostAndUsage",
"GetDetector",
"GetEbsDefaultKmsKeyId",
"GetEbsEncryptionByDefault",
"GetFindings",
"GetFlowLogsIntegrationTemplate",
"GetIdentityVerificationAttributes",
"GetInstances",
"GetIntrospectionSchema",
"GetLaunchTemplateData",
"GetLaunchTemplateData",
"GetLogRecord",
"GetParameters",
"GetPolicyVersion",
"GetPublicAccessBlock",
"GetQueryResults",
"GetRegions",
"GetSMSAttributes",
"GetSMSSandboxAccountStatus",
"GetSendQuota",
"GetTransitGatewayRouteTableAssociations",
"GetUserPolicy",
"HeadObject",
"ListAccessKeys",
"ListAccounts",
"ListAllMyBuckets",
"ListAssociatedAccessPolicies",
"ListAttachedUserPolicies",
"ListClusters",
"ListDetectors",
"ListDomains",
"ListFindings",
"ListHostedZones",
"ListIPSets",
"ListIdentities",
"ListInstanceProfiles",
"ListObjects",
"ListOrganizationalUnitsForParent",
"ListOriginationNumbers",
"ListPolicyVersions",
"ListRoles",
"ListRoles",
"ListRules",
"ListServiceQuotas",
"ListSubscriptions",
"ListTargetsByRule",
"ListTopics",
"ListUsers",
"LookupEvents",
"Search",
]
# AWS RDS Configuration
# aws.rds_instance_backup_enabled
# Whether to check RDS instance replicas or not
check_rds_instance_replicas: False
# AWS ACM Configuration
# aws.acm_certificates_expiration_check
days_to_expire_threshold: 7
# Azure Configuration
azure:
# Azure Network Configuration
# azure.network_public_ip_shodan
# TODO: create common config
shodan_api_key: null
# Azure App Service
# azure.app_ensure_php_version_is_latest
php_latest_version: "8.2"
# azure.app_ensure_python_version_is_latest
python_latest_version: "3.12"
# azure.app_ensure_java_version_is_latest
java_latest_version: "17"
# GCP Configuration
gcp:
# GCP Compute Configuration
# gcp.compute_public_address_shodan
shodan_api_key: null
# Kubernetes Configuration
kubernetes:
# Kubernetes API Server
# kubernetes.apiserver_audit_log_maxbackup_set
audit_log_maxbackup: 10
# kubernetes.apiserver_audit_log_maxsize_set
audit_log_maxsize: 100
# kubernetes.apiserver_audit_log_maxage_set
audit_log_maxage: 30
# kubernetes.apiserver_strong_ciphers_only
apiserver_strong_ciphers:
[
"TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256",
]
# Kubelet
# kubernetes.kubelet_strong_ciphers_only
kubelet_strong_ciphers:
[
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_128_GCM_SHA256",
]

View File

@@ -5,6 +5,18 @@ shodan_api_key: null
max_security_group_rules: 50
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
max_ec2_instance_age_in_days: 180
# aws.ec2_securitygroup_allow_ingress_from_internet_to_any_port
# allowed network interface types for security groups open to the Internet
ec2_allowed_interface_types:
[
"api_gateway_managed",
"vpc_endpoint",
]
# allowed network interface owners for security groups open to the Internet
ec2_allowed_instance_owners:
[
"amazon-elb"
]
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
@@ -64,3 +76,7 @@ organizations_trusted_delegated_administrators: []
# aws.rds_instance_backup_enabled
# Whether to check RDS instance replicas or not
check_rds_instance_replicas: False
# AWS ACM Configuration
# aws.acm_certificates_expiration_check
days_to_expire_threshold: 7

View File

@@ -1,12 +1,15 @@
import os
import pathlib
import traceback
from argparse import Namespace
from importlib.machinery import FileFinder
from logging import DEBUG, ERROR
from pkgutil import ModuleInfo
from boto3 import client
from colorama import Fore, Style
from fixtures.bulk_checks_metadata import test_bulk_checks_metadata
from mock import patch
from mock import Mock, patch
from moto import mock_aws
from prowler.lib.check.check import (
@@ -21,6 +24,7 @@ from prowler.lib.check.check import (
recover_checks_from_provider,
recover_checks_from_service,
remove_custom_checks_module,
run_check,
update_audit_metadata,
)
from prowler.lib.check.models import load_check_metadata
@@ -449,14 +453,14 @@ class TestCheck:
"path": test_checks_folder,
"provider": "aws",
},
"expected": 3,
"expected": {"check11", "check12", "check7777"},
},
{
"input": {
"path": "s3://test/checks_folder/",
"provider": "aws",
},
"expected": 3,
"expected": {"check11", "check12", "check7777"},
},
]
@@ -786,3 +790,89 @@ class TestCheck:
checks_json
== '{\n "aws": [\n "awslambda_function_invoke_api_operations_cloudtrail_logging_enabled",\n "awslambda_function_no_secrets_in_code",\n "awslambda_function_no_secrets_in_variables",\n "awslambda_function_not_publicly_accessible",\n "awslambda_function_url_cors_policy",\n "awslambda_function_url_public",\n "awslambda_function_using_supported_runtimes"\n ]\n}'
)
def test_run_check(self, caplog):
caplog.set_level(DEBUG)
findings = []
check = Mock()
check.CheckID = "test-check"
check.execute = Mock(return_value=findings)
with patch("prowler.lib.check.check.execute", return_value=findings):
assert run_check(check) == findings
assert caplog.record_tuples == [
(
"root",
DEBUG,
f"Executing check: {check.CheckID}",
)
]
def test_run_check_verbose(self, capsys):
findings = []
check = Mock()
check.CheckID = "test-check"
check.ServiceName = "test-service"
check.Severity = "test-severity"
check.execute = Mock(return_value=findings)
with patch("prowler.lib.check.check.execute", return_value=findings):
assert run_check(check, verbose=True) == findings
assert (
capsys.readouterr().out
== f"\nCheck ID: {check.CheckID} - {Fore.MAGENTA}{check.ServiceName}{Fore.YELLOW} [{check.Severity}]{Style.RESET_ALL}\n"
)
def test_run_check_exception_only_logs(self, caplog):
caplog.set_level(ERROR)
findings = []
check = Mock()
check.CheckID = "test-check"
check.ServiceName = "test-service"
check.Severity = "test-severity"
error = Exception()
check.execute = Mock(side_effect=error)
with patch("prowler.lib.check.check.execute", return_value=findings):
assert run_check(check, only_logs=True) == findings
assert caplog.record_tuples == [
(
"root",
ERROR,
f"{check.CheckID} -- {error.__class__.__name__}[{traceback.extract_tb(error.__traceback__)[-1].lineno}]: {error}",
)
]
def test_run_check_exception(self, caplog, capsys):
caplog.set_level(ERROR)
findings = []
check = Mock()
check.CheckID = "test-check"
check.ServiceName = "test-service"
check.Severity = "test-severity"
error = Exception()
check.execute = Mock(side_effect=error)
with patch("prowler.lib.check.check.execute", return_value=findings):
assert (
run_check(
check,
verbose=False,
)
== findings
)
assert caplog.record_tuples == [
(
"root",
ERROR,
f"{check.CheckID} -- {error.__class__.__name__}[{traceback.extract_tb(error.__traceback__)[-1].lineno}]: {error}",
)
]
assert (
capsys.readouterr().out
== f"Something went wrong in {check.CheckID}, please use --log-level ERROR\n"
)

View File

@@ -1,9 +1,8 @@
import yaml
from boto3 import resource
from mock import MagicMock
from moto import mock_aws
from prowler.lib.mutelist.mutelist import (
get_mutelist_file_from_local_file,
is_excepted,
is_muted,
is_muted_in_check,
@@ -11,7 +10,7 @@ from prowler.lib.mutelist.mutelist import (
is_muted_in_resource,
is_muted_in_tags,
mutelist_findings,
parse_mutelist_file,
validate_mutelist,
)
from tests.providers.aws.utils import (
AWS_ACCOUNT_NUMBER,
@@ -24,118 +23,33 @@ from tests.providers.aws.utils import (
class TestMutelist:
# Test S3 mutelist
@mock_aws
def test_s3_mutelist(self):
aws_provider = set_mocked_aws_provider()
# Create bucket and upload mutelist yaml
s3_resource = resource("s3", region_name=AWS_REGION_US_EAST_1)
s3_resource.create_bucket(Bucket="test-mutelist")
s3_resource.Object("test-mutelist", "mutelist.yaml").put(
Body=open(
"tests//lib/mutelist/fixtures/aws_mutelist.yaml",
"rb",
)
)
def test_get_mutelist_file_from_local_file(self):
mutelist_path = "tests/lib/mutelist/fixtures/aws_mutelist.yaml"
with open(mutelist_path) as f:
mutelist_fixture = yaml.safe_load(f)["Mutelist"]
with open("tests//lib/mutelist/fixtures/aws_mutelist.yaml") as f:
assert yaml.safe_load(f)["Mutelist"] == parse_mutelist_file(
"s3://test-mutelist/mutelist.yaml",
aws_provider.session.current_session,
aws_provider.identity.account,
)
assert get_mutelist_file_from_local_file(mutelist_path) == mutelist_fixture
# Test DynamoDB mutelist
@mock_aws
def test_dynamo_mutelist(self):
aws_provider = set_mocked_aws_provider()
# Create table and put item
dynamodb_resource = resource("dynamodb", region_name=AWS_REGION_US_EAST_1)
table_name = "test-mutelist"
params = {
"TableName": table_name,
"KeySchema": [
{"AttributeName": "Accounts", "KeyType": "HASH"},
{"AttributeName": "Checks", "KeyType": "RANGE"},
],
"AttributeDefinitions": [
{"AttributeName": "Accounts", "AttributeType": "S"},
{"AttributeName": "Checks", "AttributeType": "S"},
],
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10,
},
}
table = dynamodb_resource.create_table(**params)
table.put_item(
Item={
"Accounts": "*",
"Checks": "iam_user_hardware_mfa_enabled",
"Regions": [AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1],
"Resources": ["keyword"],
}
)
def test_get_mutelist_file_from_local_file_non_existent(self):
mutelist_path = "tests/lib/mutelist/fixtures/not_present"
assert (
"keyword"
in parse_mutelist_file(
"arn:aws:dynamodb:"
+ AWS_REGION_US_EAST_1
+ ":"
+ str(AWS_ACCOUNT_NUMBER)
+ ":table/"
+ table_name,
aws_provider.session.current_session,
aws_provider.identity.account,
)["Accounts"]["*"]["Checks"]["iam_user_hardware_mfa_enabled"]["Resources"]
)
assert get_mutelist_file_from_local_file(mutelist_path) == {}
@mock_aws
def test_dynamo_mutelist_with_tags(self):
aws_provider = set_mocked_aws_provider()
# Create table and put item
dynamodb_resource = resource("dynamodb", region_name=AWS_REGION_US_EAST_1)
table_name = "test-mutelist"
params = {
"TableName": table_name,
"KeySchema": [
{"AttributeName": "Accounts", "KeyType": "HASH"},
{"AttributeName": "Checks", "KeyType": "RANGE"},
],
"AttributeDefinitions": [
{"AttributeName": "Accounts", "AttributeType": "S"},
{"AttributeName": "Checks", "AttributeType": "S"},
],
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10,
},
}
table = dynamodb_resource.create_table(**params)
table.put_item(
Item={
"Accounts": "*",
"Checks": "*",
"Regions": ["*"],
"Resources": ["*"],
"Tags": ["environment=dev"],
}
)
def test_validate_mutelist(self):
mutelist_path = "tests/lib/mutelist/fixtures/aws_mutelist.yaml"
with open(mutelist_path) as f:
mutelist_fixture = yaml.safe_load(f)["Mutelist"]
assert (
"environment=dev"
in parse_mutelist_file(
"arn:aws:dynamodb:"
+ AWS_REGION_US_EAST_1
+ ":"
+ str(AWS_ACCOUNT_NUMBER)
+ ":table/"
+ table_name,
aws_provider.session.current_session,
aws_provider.identity.account,
)["Accounts"]["*"]["Checks"]["*"]["Tags"]
)
assert validate_mutelist(mutelist_fixture) == mutelist_fixture
def test_validate_mutelist_not_valid_key(self):
mutelist_path = "tests/lib/mutelist/fixtures/aws_mutelist.yaml"
with open(mutelist_path) as f:
mutelist_fixture = yaml.safe_load(f)["Mutelist"]
mutelist_fixture["Accounts1"] = mutelist_fixture["Accounts"]
del mutelist_fixture["Accounts"]
assert validate_mutelist(mutelist_fixture) == {}
def test_mutelist_findings_only_wildcard(self):
@@ -1323,3 +1237,8 @@ class TestMutelist:
assert is_muted_in_resource(mutelist_resources, "prowler-test")
assert is_muted_in_resource(mutelist_resources, "test-prowler")
assert not is_muted_in_resource(mutelist_resources, "random")
def test_is_muted_in_resource_starting_by_star(self):
allowlist_resources = ["*.es"]
assert is_muted_in_resource(allowlist_resources, "google.es")

View File

@@ -1,13 +1,7 @@
import sys
from unittest import mock
from prowler.config.config import aws_logo, azure_logo, gcp_logo
from prowler.lib.outputs.slack import (
create_message_blocks,
create_message_identity,
create_title,
send_slack_message,
)
from prowler.lib.outputs.slack.slack import Slack
from tests.providers.aws.utils import AWS_ACCOUNT_NUMBER, set_mocked_aws_provider
from tests.providers.azure.azure_fixtures import (
AZURE_SUBSCRIPTION_ID,
@@ -16,28 +10,25 @@ from tests.providers.azure.azure_fixtures import (
)
from tests.providers.gcp.gcp_fixtures import set_mocked_gcp_provider
def mock_create_message_blocks(*_):
return [{}]
def mock_create_message_identity(*_):
return "", ""
SLACK_CHANNEL = "test-channel"
SLACK_TOKEN = "test-token"
class TestSlackIntegration:
def test_create_message_identity_aws(self):
aws_provider = set_mocked_aws_provider()
slack = Slack(SLACK_TOKEN, SLACK_CHANNEL, aws_provider)
assert create_message_identity(aws_provider) == (
assert slack.__create_message_identity__(aws_provider) == (
f"AWS Account *{aws_provider.identity.account}*",
aws_logo,
)
def test_create_message_identity_azure(self):
azure_provider = set_mocked_azure_provider()
slack = Slack(SLACK_TOKEN, SLACK_CHANNEL, azure_provider)
assert create_message_identity(azure_provider) == (
assert slack.__create_message_identity__(azure_provider) == (
f"Azure Subscriptions:\n- *{AZURE_SUBSCRIPTION_ID}: {AZURE_SUBSCRIPTION_NAME}*\n",
azure_logo,
)
@@ -46,27 +37,50 @@ class TestSlackIntegration:
gcp_provider = set_mocked_gcp_provider(
project_ids=["test-project1", "test-project2"],
)
slack = Slack(SLACK_TOKEN, SLACK_CHANNEL, gcp_provider)
assert create_message_identity(gcp_provider) == (
assert slack.__create_message_identity__(gcp_provider) == (
f"GCP Projects *{', '.join(gcp_provider.project_ids)}*",
gcp_logo,
)
def test_create_message_blocks(self):
aws_identity = f"AWS Account *{AWS_ACCOUNT_NUMBER}*"
azure_identity = "Azure Subscriptions:\n- *subscription 1: qwerty*\n- *subscription 2: asdfg*\n"
gcp_identity = "GCP Project *gcp-project*"
def test_create_title(self):
aws_provider = set_mocked_aws_provider()
slack = Slack(SLACK_TOKEN, SLACK_CHANNEL, aws_provider)
stats = {}
stats["total_pass"] = 12
stats["total_fail"] = 10
stats["resources_count"] = 20
stats["findings_count"] = 22
assert create_message_blocks(aws_identity, aws_logo, stats) == [
identity = slack.__create_message_identity__(aws_provider) == (
f"AWS Account *{aws_provider.identity.account}*",
aws_logo,
)
assert (
slack.__create_title__(identity, stats)
== f"Hey there 👋 \n I'm *Prowler*, _the handy multi-cloud security tool_ :cloud::key:\n\n I have just finished the security assessment on your {identity} with a total of *{stats['findings_count']}* findings."
)
def test_create_message_blocks_aws(self):
aws_provider = set_mocked_aws_provider()
slack = Slack(SLACK_TOKEN, SLACK_CHANNEL, aws_provider)
args = "--slack"
stats = {}
stats["total_pass"] = 12
stats["total_fail"] = 10
stats["resources_count"] = 20
stats["findings_count"] = 22
aws_identity = f"AWS Account *{AWS_ACCOUNT_NUMBER}*"
assert slack.__create_message_blocks__(aws_identity, aws_logo, stats, args) == [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": create_title(aws_identity, stats),
"text": slack.__create_title__(aws_identity, stats),
},
"accessory": {
"type": "image",
@@ -102,7 +116,7 @@ class TestSlackIntegration:
"elements": [
{
"type": "mrkdwn",
"text": f"Used parameters: `prowler {' '.join(sys.argv[1:])} `",
"text": f"Used parameters: `prowler {args}`",
}
],
},
@@ -141,12 +155,27 @@ class TestSlackIntegration:
},
},
]
assert create_message_blocks(azure_identity, azure_logo, stats) == [
def test_create_message_blocks_azure(self):
aws_provider = set_mocked_aws_provider()
slack = Slack(SLACK_TOKEN, SLACK_CHANNEL, aws_provider)
args = "--slack"
stats = {}
stats["total_pass"] = 12
stats["total_fail"] = 10
stats["resources_count"] = 20
stats["findings_count"] = 22
azure_identity = "Azure Subscriptions:\n- *subscription 1: qwerty*\n- *subscription 2: asdfg*\n"
assert slack.__create_message_blocks__(
azure_identity, azure_logo, stats, args
) == [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": create_title(azure_identity, stats),
"text": slack.__create_title__(azure_identity, stats),
},
"accessory": {
"type": "image",
@@ -182,7 +211,7 @@ class TestSlackIntegration:
"elements": [
{
"type": "mrkdwn",
"text": f"Used parameters: `prowler {' '.join(sys.argv[1:])} `",
"text": f"Used parameters: `prowler {args}`",
}
],
},
@@ -221,12 +250,25 @@ class TestSlackIntegration:
},
},
]
assert create_message_blocks(gcp_identity, gcp_logo, stats) == [
def test_create_message_blocks_gcp(self):
aws_provider = set_mocked_aws_provider()
slack = Slack(SLACK_TOKEN, SLACK_CHANNEL, aws_provider)
args = "--slack"
stats = {}
stats["total_pass"] = 12
stats["total_fail"] = 10
stats["resources_count"] = 20
stats["findings_count"] = 22
gcp_identity = "GCP Project *gcp-project*"
assert slack.__create_message_blocks__(gcp_identity, gcp_logo, stats, args) == [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": create_title(gcp_identity, stats),
"text": slack.__create_title__(gcp_identity, stats),
},
"accessory": {
"type": "image",
@@ -262,7 +304,7 @@ class TestSlackIntegration:
"elements": [
{
"type": "mrkdwn",
"text": f"Used parameters: `prowler {' '.join(sys.argv[1:])} `",
"text": f"Used parameters: `prowler {args}`",
}
],
},
@@ -324,14 +366,13 @@ class TestSlackIntegration:
mocked_web_client.chat_postMessage = mock.Mock(
return_value=mocked_slack_response
)
with mock.patch(
"prowler.lib.outputs.slack.create_message_blocks",
new=mock_create_message_blocks,
), mock.patch(
"prowler.lib.outputs.slack.create_message_identity",
new=mock_create_message_identity,
), mock.patch(
"prowler.lib.outputs.slack.WebClient", new=mocked_web_client
"prowler.lib.outputs.slack.slack.WebClient", new=mocked_web_client
):
response = send_slack_message("test-token", "test-channel", {}, "provider")
aws_provider = set_mocked_aws_provider()
slack = Slack(SLACK_TOKEN, SLACK_CHANNEL, aws_provider)
stats = {}
args = "--slack"
response = slack.send(stats, args)
assert response == mocked_slack_response

View File

@@ -9,7 +9,7 @@ from os import rmdir
from re import search
import botocore
from boto3 import client, session
from boto3 import client, resource, session
from freezegun import freeze_time
from mock import patch
from moto import mock_aws
@@ -56,7 +56,6 @@ from tests.providers.aws.utils import (
set_mocked_aws_provider,
)
# Mocking GetCallerIdentity for China and GovCloud
make_api_call = botocore.client.BaseClient._make_api_call
@@ -528,38 +527,39 @@ aws:
@mock_aws
def test_aws_provider_mutelist(self):
mutelist = {
"Accounts": {
AWS_ACCOUNT_NUMBER: {
"Checks": {
"test-check": {
"Regions": [],
"Resources": [],
"Tags": [],
"Exceptions": {
"Accounts": [],
"Mutelist": {
"Accounts": {
AWS_ACCOUNT_NUMBER: {
"Checks": {
"test-check": {
"Regions": [],
"Resources": [],
"Tags": [],
},
"Exceptions": {
"Accounts": [],
"Regions": [],
"Resources": [],
"Tags": [],
},
}
}
}
}
}
}
mutelist_content = {"Mutelist": mutelist}
config_file = tempfile.NamedTemporaryFile(delete=False)
with open(config_file.name, "w") as allowlist_file:
allowlist_file.write(json.dumps(mutelist_content, indent=4))
mutelist_file = tempfile.NamedTemporaryFile(delete=False)
with open(mutelist_file.name, "w") as mutelist_file:
mutelist_file.write(json.dumps(mutelist, indent=4))
arguments = Namespace()
aws_provider = AwsProvider(arguments)
aws_provider.mutelist = config_file.name
aws_provider.mutelist = mutelist_file.name
os.remove(config_file.name)
os.remove(mutelist_file.name)
assert aws_provider.mutelist == mutelist
assert aws_provider.mutelist == mutelist["Mutelist"]
@mock_aws
def test_aws_provider_mutelist_none(self):
@@ -567,13 +567,135 @@ aws:
aws_provider = AwsProvider(arguments)
with patch(
"prowler.providers.common.provider.get_default_mute_file_path",
"prowler.providers.aws.aws_provider.get_default_mute_file_path",
return_value=None,
):
aws_provider.mutelist = None
assert aws_provider.mutelist == {}
@mock_aws
def test_aws_provider_mutelist_s3(self):
# Create mutelist temp file
mutelist = {
"Mutelist": {
"Accounts": {
AWS_ACCOUNT_NUMBER: {
"Checks": {
"test-check": {
"Regions": [],
"Resources": [],
"Tags": [],
"Exceptions": {
"Accounts": [],
"Regions": [],
"Resources": [],
"Tags": [],
},
}
}
}
}
}
}
mutelist_file = tempfile.NamedTemporaryFile(delete=False)
with open(mutelist_file.name, "w") as mutelist_file:
mutelist_file.write(json.dumps(mutelist, indent=4))
# Create bucket and upload mutelist yaml
s3_resource = resource("s3", region_name=AWS_REGION_US_EAST_1)
bucket_name = "test-mutelist"
mutelist_file_name = "mutelist.yaml"
mutelist_bucket_object_uri = f"s3://{bucket_name}/{mutelist_file_name}"
s3_resource.create_bucket(Bucket=bucket_name)
s3_resource.Object(bucket_name, "mutelist.yaml").put(
Body=open(
mutelist_file.name,
"rb",
)
)
arguments = Namespace()
aws_provider = AwsProvider(arguments)
aws_provider.mutelist = mutelist_bucket_object_uri
os.remove(mutelist_file.name)
assert aws_provider.mutelist == mutelist["Mutelist"]
@mock_aws
def test_aws_provider_mutelist_lambda(self):
# Create mutelist temp file
mutelist = {
"Mutelist": {
"Accounts": {
AWS_ACCOUNT_NUMBER: {
"Checks": {
"test-check": {
"Regions": [],
"Resources": [],
"Tags": [],
"Exceptions": {
"Accounts": [],
"Regions": [],
"Resources": [],
"Tags": [],
},
}
}
}
}
}
}
arguments = Namespace()
aws_provider = AwsProvider(arguments)
with patch(
"prowler.providers.aws.aws_provider.get_mutelist_file_from_lambda",
return_value=mutelist["Mutelist"],
):
aws_provider.mutelist = f"arn:aws:lambda:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:function:lambda-mutelist"
assert aws_provider.mutelist == mutelist["Mutelist"]
@mock_aws
def test_aws_provider_mutelist_dynamodb(self):
# Create mutelist temp file
mutelist = {
"Mutelist": {
"Accounts": {
AWS_ACCOUNT_NUMBER: {
"Checks": {
"test-check": {
"Regions": [],
"Resources": [],
"Tags": [],
"Exceptions": {
"Accounts": [],
"Regions": [],
"Resources": [],
"Tags": [],
},
}
}
}
}
}
}
arguments = Namespace()
aws_provider = AwsProvider(arguments)
with patch(
"prowler.providers.aws.aws_provider.get_mutelist_file_from_dynamodb",
return_value=mutelist["Mutelist"],
):
aws_provider.mutelist = f"arn:aws:dynamodb:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:table/mutelist-dynamo"
assert aws_provider.mutelist == mutelist["Mutelist"]
@mock_aws
def test_generate_regional_clients_all_enabled_regions(self):
arguments = Namespace()

View File

@@ -0,0 +1,282 @@
import io
from json import dumps
import botocore
import yaml
from boto3 import client, resource
from mock import patch
from moto import mock_aws
from prowler.providers.aws.lib.mutelist.mutelist import (
get_mutelist_file_from_dynamodb,
get_mutelist_file_from_lambda,
get_mutelist_file_from_s3,
)
from tests.providers.aws.services.awslambda.awslambda_service_test import (
create_zip_file,
)
from tests.providers.aws.utils import (
AWS_ACCOUNT_NUMBER,
AWS_REGION_EU_WEST_1,
AWS_REGION_US_EAST_1,
set_mocked_aws_provider,
)
make_api_call = botocore.client.BaseClient._make_api_call
def mock_make_api_call(self, operation_name, kwarg):
if operation_name == "Invoke":
return {
"Payload": io.BytesIO(
dumps(
{
"Mutelist": {
"Accounts": {
"*": {
"Checks": {
"*": {
"Regions": ["*"],
"Resources": ["*"],
"Tags": ["key:value"],
},
}
},
}
}
}
).encode("utf-8")
)
}
return make_api_call(self, operation_name, kwarg)
class TestMutelistAWS:
@mock_aws
def test_get_mutelist_file_from_s3(self):
aws_provider = set_mocked_aws_provider()
# Create bucket and upload mutelist yaml
s3_resource = resource("s3", region_name=AWS_REGION_US_EAST_1)
s3_resource.create_bucket(Bucket="test-mutelist")
s3_resource.Object("test-mutelist", "mutelist.yaml").put(
Body=open(
"tests/lib/mutelist/fixtures/aws_mutelist.yaml",
"rb",
)
)
with open("tests/lib/mutelist/fixtures/aws_mutelist.yaml") as f:
fixture_mutelist = yaml.safe_load(f)["Mutelist"]
assert (
get_mutelist_file_from_s3(
"s3://test-mutelist/mutelist.yaml",
aws_provider.session.current_session,
)
== fixture_mutelist
)
@mock_aws
def test_get_mutelist_file_from_s3_not_present(self):
aws_provider = set_mocked_aws_provider()
assert (
get_mutelist_file_from_s3(
"s3://test-mutelist/mutelist.yaml",
aws_provider.session.current_session,
)
== {}
)
@mock_aws
def test_get_mutelist_file_from_dynamodb(self):
aws_provider = set_mocked_aws_provider()
# Create table and put item
dynamodb_resource = resource("dynamodb", region_name=AWS_REGION_US_EAST_1)
table_name = "test-mutelist"
table_arn = f"arn:aws:dynamodb:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:table/{table_name}"
params = {
"TableName": table_name,
"KeySchema": [
{"AttributeName": "Accounts", "KeyType": "HASH"},
{"AttributeName": "Checks", "KeyType": "RANGE"},
],
"AttributeDefinitions": [
{"AttributeName": "Accounts", "AttributeType": "S"},
{"AttributeName": "Checks", "AttributeType": "S"},
],
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10,
},
}
table = dynamodb_resource.create_table(**params)
dynamo_db_mutelist = {
"Accounts": "*",
"Checks": "iam_user_hardware_mfa_enabled",
"Regions": [AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1],
"Resources": ["keyword"],
"Exceptions": {},
}
mutelist = {
"Accounts": {
"*": {
"Checks": {
"iam_user_hardware_mfa_enabled": {
"Regions": [AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1],
"Resources": ["keyword"],
"Exceptions": {},
},
}
},
}
}
table.put_item(Item=dynamo_db_mutelist)
assert (
get_mutelist_file_from_dynamodb(
table_arn,
aws_provider.session.current_session,
aws_provider.identity.account,
)
== mutelist
)
@mock_aws
def test_get_mutelist_file_from_dynamodb_with_tags(self):
aws_provider = set_mocked_aws_provider()
# Create table and put item
dynamodb_resource = resource("dynamodb", region_name=AWS_REGION_US_EAST_1)
table_name = "test-mutelist"
table_arn = f"arn:aws:dynamodb:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:table/{table_name}"
params = {
"TableName": table_name,
"KeySchema": [
{"AttributeName": "Accounts", "KeyType": "HASH"},
{"AttributeName": "Checks", "KeyType": "RANGE"},
],
"AttributeDefinitions": [
{"AttributeName": "Accounts", "AttributeType": "S"},
{"AttributeName": "Checks", "AttributeType": "S"},
],
"ProvisionedThroughput": {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10,
},
}
table = dynamodb_resource.create_table(**params)
dynamo_db_mutelist = {
"Accounts": "*",
"Checks": "*",
"Regions": ["*"],
"Resources": ["*"],
"Tags": ["environment=dev"],
}
mutelist = {
"Accounts": {
"*": {
"Checks": {
"*": {
"Regions": ["*"],
"Resources": ["*"],
"Tags": ["environment=dev"],
},
}
},
}
}
table.put_item(Item=dynamo_db_mutelist)
assert (
get_mutelist_file_from_dynamodb(
table_arn,
aws_provider.session.current_session,
aws_provider.identity.account,
)
== mutelist
)
@mock_aws
def test_get_mutelist_file_from_dynamodb_not_present(self):
aws_provider = set_mocked_aws_provider()
table_name = "non-existent"
table_arn = f"arn:aws:dynamodb:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:table/{table_name}"
assert (
get_mutelist_file_from_dynamodb(
table_arn,
aws_provider.session.current_session,
aws_provider.identity.account,
)
== {}
)
@mock_aws(config={"lambda": {"use_docker": False}})
@patch("botocore.client.BaseClient._make_api_call", new=mock_make_api_call)
def test_get_mutelist_file_from_lambda(self):
aws_provider = set_mocked_aws_provider()
lambda_name = "mutelist"
lambda_role = "lambda_role"
lambda_client = client("lambda", region_name=AWS_REGION_US_EAST_1)
iam_client = client("iam", region_name=AWS_REGION_US_EAST_1)
lambda_role_assume_policy = {
"Version": "2012-10-17",
"Statement": {
"Sid": "test",
"Effect": "Allow",
"Principal": {"AWS": f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root"},
"Action": "sts:AssumeRole",
},
}
lambda_role_arn = iam_client.create_role(
RoleName=lambda_role,
AssumeRolePolicyDocument=dumps(lambda_role_assume_policy),
)["Role"]["Arn"]
lambda_code = """def handler(event, context):
checks = {}
checks["*"] = { "Regions": [ "*" ], "Resources": [ "" ], Optional("Tags"): [ "key:value" ] }
al = { "Mutelist": { "Accounts": { "*": { "Checks": checks } } } }
return al"""
lambda_function = lambda_client.create_function(
FunctionName=lambda_name,
Runtime="3.9",
Role=lambda_role_arn,
Handler="lambda_function.lambda_handler",
Code={"ZipFile": create_zip_file(code=lambda_code).read()},
Description="test lambda function",
)
lambda_function_arn = lambda_function["FunctionArn"]
mutelist = {
"Accounts": {
"*": {
"Checks": {
"*": {
"Regions": ["*"],
"Resources": ["*"],
"Tags": ["key:value"],
},
}
},
}
}
assert (
get_mutelist_file_from_lambda(
lambda_function_arn, aws_provider.session.current_session
)
== mutelist
)
@mock_aws
def test_get_mutelist_file_from_lambda_invalid_arn(self):
aws_provider = set_mocked_aws_provider()
lambda_function_arn = "invalid_arn"
assert (
get_mutelist_file_from_lambda(
lambda_function_arn, aws_provider.session.current_session
)
== {}
)

View File

@@ -33,6 +33,7 @@ class Test_acm_certificates_expiration_check:
certificate_name = "test-certificate.com"
certificate_type = "AMAZON_ISSUED"
expiration_days = 5
in_use = True
acm_client = mock.MagicMock
acm_client.certificates = [
@@ -42,11 +43,14 @@ class Test_acm_certificates_expiration_check:
name=certificate_name,
type=certificate_type,
expiration_days=expiration_days,
in_use=in_use,
transparency_logging=True,
region=AWS_REGION,
)
]
acm_client.audit_config = {"days_to_expire_threshold": 7}
with mock.patch(
"prowler.providers.aws.services.acm.acm_service.ACM",
new=acm_client,
@@ -76,6 +80,7 @@ class Test_acm_certificates_expiration_check:
certificate_name = "test-certificate.com"
certificate_type = "AMAZON_ISSUED"
expiration_days = -400
in_use = True
acm_client = mock.MagicMock
acm_client.certificates = [
@@ -85,16 +90,18 @@ class Test_acm_certificates_expiration_check:
name=certificate_name,
type=certificate_type,
expiration_days=expiration_days,
in_use=in_use,
transparency_logging=True,
region=AWS_REGION,
)
]
acm_client.audit_config = {"days_to_expire_threshold": 7}
with mock.patch(
"prowler.providers.aws.services.acm.acm_service.ACM",
new=acm_client,
):
# Test Check
from prowler.providers.aws.services.acm.acm_certificates_expiration_check.acm_certificates_expiration_check import (
acm_certificates_expiration_check,
)
@@ -119,6 +126,7 @@ class Test_acm_certificates_expiration_check:
certificate_name = "test-certificate.com"
certificate_type = "AMAZON_ISSUED"
expiration_days = 365
in_use = True
acm_client = mock.MagicMock
acm_client.certificates = [
@@ -128,16 +136,18 @@ class Test_acm_certificates_expiration_check:
name=certificate_name,
type=certificate_type,
expiration_days=expiration_days,
in_use=in_use,
transparency_logging=True,
region=AWS_REGION,
)
]
acm_client.audit_config = {"days_to_expire_threshold": 7}
with mock.patch(
"prowler.providers.aws.services.acm.acm_service.ACM",
new=acm_client,
):
# Test Check
from prowler.providers.aws.services.acm.acm_certificates_expiration_check.acm_certificates_expiration_check import (
acm_certificates_expiration_check,
)
@@ -155,3 +165,90 @@ class Test_acm_certificates_expiration_check:
assert result[0].resource_arn == certificate_arn
assert result[0].region == AWS_REGION
assert result[0].resource_tags == []
def test_acm_certificate_not_in_use(self):
certificate_id = str(uuid.uuid4())
certificate_arn = f"arn:aws:acm:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:certificate/{certificate_id}"
certificate_name = "test-certificate.com"
certificate_type = "AMAZON_ISSUED"
expiration_days = 365
in_use = False
acm_client = mock.MagicMock
acm_client.certificates = [
Certificate(
arn=certificate_arn,
id=certificate_id,
name=certificate_name,
type=certificate_type,
expiration_days=expiration_days,
in_use=in_use,
transparency_logging=True,
region=AWS_REGION,
)
]
acm_client.audit_config = {"days_to_expire_threshold": 7}
acm_client.provider = mock.MagicMock(scan_unused_services=False)
with mock.patch(
"prowler.providers.aws.services.acm.acm_service.ACM",
new=acm_client,
):
from prowler.providers.aws.services.acm.acm_certificates_expiration_check.acm_certificates_expiration_check import (
acm_certificates_expiration_check,
)
check = acm_certificates_expiration_check()
result = check.execute()
assert len(result) == 0
def test_acm_certificate_not_in_use_expired_scan_unused_services(self):
certificate_id = str(uuid.uuid4())
certificate_arn = f"arn:aws:acm:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:certificate/{certificate_id}"
certificate_name = "test-certificate.com"
certificate_type = "AMAZON_ISSUED"
expiration_days = -400
in_use = False
acm_client = mock.MagicMock
acm_client.certificates = [
Certificate(
arn=certificate_arn,
id=certificate_id,
name=certificate_name,
type=certificate_type,
expiration_days=expiration_days,
in_use=in_use,
transparency_logging=True,
region=AWS_REGION,
)
]
acm_client.audit_config = {"days_to_expire_threshold": 7}
acm_client.provider = mock.MagicMock(scan_unused_services=True)
with mock.patch(
"prowler.providers.aws.services.acm.acm_service.ACM",
new=acm_client,
):
from prowler.providers.aws.services.acm.acm_certificates_expiration_check.acm_certificates_expiration_check import (
acm_certificates_expiration_check,
)
check = acm_certificates_expiration_check()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"ACM Certificate {certificate_id} for {certificate_name} has expired ({abs(expiration_days)} days ago)."
)
assert result[0].resource_id == certificate_id
assert result[0].resource_arn == certificate_arn
assert result[0].region == AWS_REGION
assert result[0].resource_tags == []

View File

@@ -41,6 +41,7 @@ class Test_acm_certificates_transparency_logs_enabled:
type=certificate_type,
expiration_days=365,
transparency_logging=True,
in_use=True,
region=AWS_REGION,
)
]
@@ -83,6 +84,7 @@ class Test_acm_certificates_transparency_logs_enabled:
type=certificate_type,
expiration_days=365,
transparency_logging=False,
in_use=True,
region=AWS_REGION,
)
]

View File

@@ -205,6 +205,7 @@ class Test_cloudtrail_cloudwatch_logging_enabled:
report.status_extended,
f"Multiregion trail {trail_name_us} has been logging the last 24h.",
)
assert report.region == AWS_REGION_US_EAST_1
assert report.resource_tags == []
if (
report.resource_id == trail_name_eu
@@ -217,6 +218,7 @@ class Test_cloudtrail_cloudwatch_logging_enabled:
report.status_extended,
f"Single region trail {trail_name_eu} is not logging in the last 24h.",
)
assert report.region == AWS_REGION_EU_WEST_1
assert report.resource_tags == []
@mock_aws
@@ -293,6 +295,7 @@ class Test_cloudtrail_cloudwatch_logging_enabled:
report.status_extended
== f"Single region trail {trail_name_us} has been logging the last 24h."
)
assert report.region == AWS_REGION_US_EAST_1
assert report.resource_tags == []
if report.resource_id == trail_name_eu:
assert report.resource_id == trail_name_eu
@@ -302,6 +305,7 @@ class Test_cloudtrail_cloudwatch_logging_enabled:
report.status_extended
== f"Single region trail {trail_name_eu} is not logging in the last 24h or not configured to deliver logs."
)
assert report.region == AWS_REGION_EU_WEST_1
assert report.resource_tags == []
@mock_aws

View File

@@ -229,7 +229,6 @@ class Test_cloudtrail_logs_s3_bucket_access_logging_enabled:
@mock_aws
def test_access_denied(self):
from prowler.providers.aws.services.cloudtrail.cloudtrail_service import (
Cloudtrail,
)

View File

@@ -6,6 +6,7 @@ from moto import mock_aws
from tests.providers.aws.utils import (
AWS_ACCOUNT_NUMBER,
AWS_REGION_US_EAST_1,
AWS_REGION_US_EAST_2,
set_mocked_aws_provider,
)
@@ -44,7 +45,7 @@ class Test_cloudtrail_multi_region_enabled_logging_management_events:
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "No trail found with multi-region enabled and logging management events."
== "No CloudTrail trails enabled and logging management events were found."
)
@mock_aws
@@ -159,7 +160,7 @@ class Test_cloudtrail_multi_region_enabled_logging_management_events:
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "No trail found with multi-region enabled and logging management events."
== "No CloudTrail trails enabled and logging management events were found."
)
@mock_aws
@@ -271,7 +272,7 @@ class Test_cloudtrail_multi_region_enabled_logging_management_events:
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "No trail found with multi-region enabled and logging management events."
== "No CloudTrail trails enabled and logging management events were found."
)
@mock_aws
@@ -299,3 +300,36 @@ class Test_cloudtrail_multi_region_enabled_logging_management_events:
check = cloudtrail_multi_region_enabled_logging_management_events()
result = check.execute()
assert len(result) == 0
def test_no_trails_two_regions(self):
from prowler.providers.aws.services.cloudtrail.cloudtrail_service import (
Cloudtrail,
)
aws_provider = set_mocked_aws_provider(
[AWS_REGION_US_EAST_1, AWS_REGION_US_EAST_2]
)
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.cloudtrail.cloudtrail_multi_region_enabled_logging_management_events.cloudtrail_multi_region_enabled_logging_management_events.cloudtrail_client",
new=Cloudtrail(aws_provider),
):
# Test Check
from prowler.providers.aws.services.cloudtrail.cloudtrail_multi_region_enabled_logging_management_events.cloudtrail_multi_region_enabled_logging_management_events import (
cloudtrail_multi_region_enabled_logging_management_events,
)
check = cloudtrail_multi_region_enabled_logging_management_events()
result = check.execute()
assert len(result) == 2
for r in result:
assert r.resource_id == AWS_ACCOUNT_NUMBER
assert r.status == "FAIL"
assert (
r.status_extended
== "No CloudTrail trails enabled and logging management events were found."
)

View File

@@ -4,6 +4,7 @@ from moto import mock_aws
from prowler.providers.aws.services.cloudtrail.cloudtrail_service import Cloudtrail
from tests.providers.aws.utils import (
AWS_ACCOUNT_NUMBER,
AWS_REGION_EU_SOUTH_2,
AWS_REGION_EU_WEST_1,
AWS_REGION_US_EAST_1,
set_mocked_aws_provider,
@@ -50,23 +51,14 @@ class Test_Cloudtrail_Service:
@mock_aws
def test_describe_trails(self):
# USA
cloudtrail_client_us_east_1 = client(
"cloudtrail", region_name=AWS_REGION_US_EAST_1
)
s3_client_us_east_1 = client("s3", region_name=AWS_REGION_US_EAST_1)
cloudtrail_client_eu_west_1 = client(
"cloudtrail", region_name=AWS_REGION_EU_WEST_1
)
s3_client_eu_west_1 = client("s3", region_name=AWS_REGION_EU_WEST_1)
trail_name_us = "trail_test_us"
bucket_name_us = "bucket_test_us"
trail_name_eu = "trail_test_eu"
bucket_name_eu = "bucket_test_eu"
s3_client_us_east_1.create_bucket(Bucket=bucket_name_us)
s3_client_eu_west_1.create_bucket(
Bucket=bucket_name_eu,
CreateBucketConfiguration={"LocationConstraint": AWS_REGION_EU_WEST_1},
)
cloudtrail_client_us_east_1.create_trail(
Name=trail_name_us,
S3BucketName=bucket_name_us,
@@ -75,6 +67,18 @@ class Test_Cloudtrail_Service:
{"Key": "test", "Value": "test"},
],
)
# IRELAND
cloudtrail_client_eu_west_1 = client(
"cloudtrail", region_name=AWS_REGION_EU_WEST_1
)
s3_client_eu_west_1 = client("s3", region_name=AWS_REGION_EU_WEST_1)
trail_name_eu = "trail_test_eu"
bucket_name_eu = "bucket_test_eu"
s3_client_eu_west_1.create_bucket(
Bucket=bucket_name_eu,
CreateBucketConfiguration={"LocationConstraint": AWS_REGION_EU_WEST_1},
)
cloudtrail_client_eu_west_1.create_trail(
Name=trail_name_eu,
S3BucketName=bucket_name_eu,
@@ -83,19 +87,60 @@ class Test_Cloudtrail_Service:
{"Key": "test", "Value": "test"},
],
)
# SPAIN
cloudtrail_client_eu_south_2 = client(
"cloudtrail", region_name=AWS_REGION_EU_SOUTH_2
)
s3_client_eu_south_2 = client("s3", region_name=AWS_REGION_EU_SOUTH_2)
trail_name_sp = "trail_test_sp"
bucket_name_sp = "bucket_test_sp"
s3_client_eu_south_2.create_bucket(
Bucket=bucket_name_sp,
CreateBucketConfiguration={"LocationConstraint": AWS_REGION_EU_SOUTH_2},
)
cloudtrail_client_eu_south_2.create_trail(
Name=trail_name_sp,
S3BucketName=bucket_name_sp,
IsMultiRegionTrail=True,
TagsList=[
{"Key": "test", "Value": "test"},
],
)
# We are not going to include AWS_REGION_EU_SOUTH_2 in the audited
# regions, but that trail is regional so it'll appear
aws_provider = set_mocked_aws_provider(
[AWS_REGION_US_EAST_1, AWS_REGION_EU_WEST_1]
)
cloudtrail = Cloudtrail(aws_provider)
assert len(cloudtrail.trails) == 2
assert len(cloudtrail.trails) == 3
for trail in cloudtrail.trails.values():
if trail.name:
assert trail.name == trail_name_us or trail.name == trail_name_eu
if trail.name == trail_name_us:
assert not trail.is_multiregion
assert (
trail.home_region == AWS_REGION_US_EAST_1
or trail.home_region == AWS_REGION_EU_WEST_1
)
assert trail.home_region == AWS_REGION_US_EAST_1
assert trail.region == AWS_REGION_US_EAST_1
assert not trail.is_logging
assert not trail.log_file_validation_enabled
assert not trail.latest_cloudwatch_delivery_time
assert trail.s3_bucket == bucket_name_us
assert trail.tags == [
{"Key": "test", "Value": "test"},
]
if trail.name == trail_name_eu:
assert not trail.is_multiregion
assert trail.home_region == AWS_REGION_EU_WEST_1
assert trail.region == AWS_REGION_EU_WEST_1
assert not trail.is_logging
assert not trail.log_file_validation_enabled
assert not trail.latest_cloudwatch_delivery_time
assert trail.s3_bucket == bucket_name_eu
assert trail.tags == [
{"Key": "test", "Value": "test"},
]
if trail.name == trail_name_sp:
assert trail.is_multiregion
assert trail.home_region == AWS_REGION_EU_SOUTH_2
# The region is the first audited region since the trail home region is not audited
assert (
trail.region == AWS_REGION_US_EAST_1
or trail.region == AWS_REGION_EU_WEST_1
@@ -103,13 +148,9 @@ class Test_Cloudtrail_Service:
assert not trail.is_logging
assert not trail.log_file_validation_enabled
assert not trail.latest_cloudwatch_delivery_time
assert (
trail.s3_bucket == bucket_name_eu
or trail.s3_bucket == bucket_name_us
)
assert trail.tags == [
{"Key": "test", "Value": "test"},
]
assert trail.s3_bucket == bucket_name_sp
# No tags since the trail region is not audited and the tags are retrieved from the regional endpoint
assert trail.tags == []
@mock_aws
def test_status_trails(self):

View File

@@ -1,8 +1,10 @@
from unittest import mock
from boto3 import client, resource
from moto import mock_aws
from prowler.providers.aws.services.vpc.vpc_service import VPC
from tests.providers.aws.utils import (
AWS_REGION_EU_WEST_1,
@@ -53,12 +55,23 @@ class Test_ec2_securitygroup_allow_ingress_from_internet_to_any_port:
def test_ec2_non_compliant_default_sg(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION_US_EAST_1)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
vpc_response = ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
vpc_id = vpc_response["Vpc"]["VpcId"]
# Create Subnet
subnet_response = ec2_client.create_subnet(
VpcId=vpc_id, CidrBlock="10.0.1.0/24"
)
subnet_id = subnet_response["Subnet"]["SubnetId"]
default_sg = ec2_client.describe_security_groups(GroupNames=["default"])[
"SecurityGroups"
][0]
default_sg_id = default_sg["GroupId"]
default_sg_name = default_sg["GroupName"]
# Authorize ingress rule
ec2_client.authorize_security_group_ingress(
GroupId=default_sg_id,
IpPermissions=[
@@ -69,12 +82,31 @@ class Test_ec2_securitygroup_allow_ingress_from_internet_to_any_port:
],
)
# Create Network Interface
network_interface_response = ec2_client.create_network_interface(
SubnetId=subnet_id,
Groups=[
default_sg_id
], # Associating the network interface with the default security group
Description="Test Network Interface",
)
self.verify_check_fail(
default_sg_id, default_sg_name, network_interface_response
)
def verify_check_fail(
self, default_sg_id, default_sg_name, network_interface_response
):
eni = network_interface_response.get("NetworkInterface", {})
att = eni.get("Attachment", {})
eni_type = eni.get("InterfaceType", "")
eni_owner = att.get("InstanceOwnerId", "")
from prowler.providers.aws.services.ec2.ec2_service import EC2
aws_provider = set_mocked_aws_provider(
[AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1],
)
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
@@ -102,7 +134,266 @@ class Test_ec2_securitygroup_allow_ingress_from_internet_to_any_port:
assert sg.region == AWS_REGION_US_EAST_1
assert (
sg.status_extended
== f"Security group {default_sg_name} ({default_sg_id}) has at least one port open to the Internet."
== f"Security group {default_sg_name} ({default_sg_id}) has at least one port open to the Internet and neither its network interface type ({eni_type}) nor its network interface instance owner ({eni_owner}) are part of the allowed network interfaces."
)
assert (
sg.resource_arn
== f"arn:{aws_provider.identity.partition}:ec2:{AWS_REGION_US_EAST_1}:{aws_provider.identity.account}:security-group/{default_sg_id}"
)
assert sg.resource_details == default_sg_name
assert sg.resource_tags == []
@mock_aws
def test_check_enis(self):
aws_provider = set_mocked_aws_provider(
[AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1],
audit_config={
"ec2_allowed_interface_types": ["api_gateway_managed", "vpc_endpoint"],
"ec2_allowed_instance_owners": ["amazon-elb"],
},
)
from prowler.providers.aws.services.ec2.ec2_service import EC2
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
), mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_client",
new=EC2(aws_provider),
):
from prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_securitygroup_allow_ingress_from_internet_to_any_port import (
ec2_securitygroup_allow_ingress_from_internet_to_any_port,
)
from unittest.mock import Mock
from prowler.providers.aws.services.ec2.ec2_service import NetworkInterface
tests = [
{
"eni_interface_type": "vpc_endpoint",
"eni_instance_owner": "NOT_ALLOWED",
"report": {
"status": "PASS",
"status_extended": "Security group SG_name (SG_id) has at least one port open to the Internet but is exclusively attached to an allowed network interface type (vpc_endpoint).",
},
},
{
"eni_interface_type": "NOT_ALLOWED",
"eni_instance_owner": "amazon-elb",
"report": {
"status": "PASS",
"status_extended": "Security group SG_name (SG_id) has at least one port open to the Internet but is exclusively attached to an allowed network interface instance owner (amazon-elb).",
},
},
{
"eni_interface_type": "NOT_ALLOWED_ENI_TYPE",
"eni_instance_owner": "NOT_ALLOWED_INSTANCE_OWNER",
"report": {
"status": "FAIL",
"status_extended": "Security group SG_name (SG_id) has at least one port open to the Internet and neither its network interface type (NOT_ALLOWED_ENI_TYPE) nor its network interface instance owner (NOT_ALLOWED_INSTANCE_OWNER) are part of the allowed network interfaces.",
},
},
]
check = ec2_securitygroup_allow_ingress_from_internet_to_any_port()
for test in tests:
eni = NetworkInterface(
id="1",
association={},
attachment={"InstanceOwnerId": test["eni_instance_owner"]},
private_ip="1",
type=test["eni_interface_type"],
subnet_id="1",
vpc_id="1",
region="1",
)
report = Mock()
check.check_enis(
report=report,
security_group_name="SG_name",
security_group_id="SG_id",
enis=[eni],
)
assert report.status == test["report"]["status"]
assert report.status_extended == test["report"]["status_extended"]
@mock_aws
def test_ec2_open_sg_attached_to_allowed_eni_type(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION_US_EAST_1)
# Create VPC
vpc_response = ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
vpc_id = vpc_response["Vpc"]["VpcId"]
# Create Subnet
subnet_response = ec2_client.create_subnet(
VpcId=vpc_id, CidrBlock="10.0.1.0/24"
)
subnet_id = subnet_response["Subnet"]["SubnetId"]
# Get default security group
default_sg = ec2_client.describe_security_groups(
Filters=[
{"Name": "vpc-id", "Values": [vpc_id]},
{"Name": "group-name", "Values": ["default"]},
]
)["SecurityGroups"][0]
default_sg_id = default_sg["GroupId"]
default_sg_name = default_sg["GroupName"]
# Authorize ingress rule
ec2_client.authorize_security_group_ingress(
GroupId=default_sg_id,
IpPermissions=[
{
"IpProtocol": "-1",
"IpRanges": [{"CidrIp": "0.0.0.0/0"}],
}
],
)
# Create Network Interface
network_interface_response = ec2_client.create_network_interface(
SubnetId=subnet_id,
Groups=[
default_sg_id
], # Associating the network interface with the default security group
Description="Test Network Interface",
)
eni_type = network_interface_response["NetworkInterface"]["InterfaceType"]
from prowler.providers.aws.services.ec2.ec2_service import EC2
aws_provider = set_mocked_aws_provider(
[AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1],
audit_config={"ec2_allowed_interface_types": [eni_type]},
)
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
), mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_client",
new=EC2(aws_provider),
), mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_securitygroup_allow_ingress_from_internet_to_any_port.vpc_client",
new=VPC(aws_provider),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_securitygroup_allow_ingress_from_internet_to_any_port import (
ec2_securitygroup_allow_ingress_from_internet_to_any_port,
)
check = ec2_securitygroup_allow_ingress_from_internet_to_any_port()
result = check.execute()
# One default sg per region
assert len(result) == 3
# Search changed sg
for sg in result:
if sg.resource_id == default_sg_id:
assert sg.status == "PASS"
assert sg.region == AWS_REGION_US_EAST_1
assert (
sg.status_extended
== f"Security group {default_sg_name} ({default_sg_id}) has at least one port open to the Internet but is exclusively attached to an allowed network interface type ({eni_type})."
)
assert (
sg.resource_arn
== f"arn:{aws_provider.identity.partition}:ec2:{AWS_REGION_US_EAST_1}:{aws_provider.identity.account}:security-group/{default_sg_id}"
)
assert sg.resource_details == default_sg_name
assert sg.resource_tags == []
@mock_aws
def test_ec2_open_sg_attached_to_allowed_eni_owner(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION_US_EAST_1)
# Create VPC
vpc_response = ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
vpc_id = vpc_response["Vpc"]["VpcId"]
# Create Subnet
subnet_response = ec2_client.create_subnet(
VpcId=vpc_id, CidrBlock="10.0.1.0/24"
)
subnet_id = subnet_response["Subnet"]["SubnetId"]
# Get default security group
default_sg = ec2_client.describe_security_groups(
Filters=[
{"Name": "vpc-id", "Values": [vpc_id]},
{"Name": "group-name", "Values": ["default"]},
]
)["SecurityGroups"][0]
default_sg_id = default_sg["GroupId"]
default_sg_name = default_sg["GroupName"]
# Authorize ingress rule
ec2_client.authorize_security_group_ingress(
GroupId=default_sg_id,
IpPermissions=[
{
"IpProtocol": "-1",
"IpRanges": [{"CidrIp": "0.0.0.0/0"}],
}
],
)
# Create Network Interface
network_interface_response = ec2_client.create_network_interface(
SubnetId=subnet_id,
Groups=[
default_sg_id
], # Associating the network interface with the default security group
Description="Test Network Interface",
)
eni = network_interface_response.get("NetworkInterface", {})
att = eni.get("Attachment", {})
eni_owner = att.get("InstanceOwnerId", "")
from prowler.providers.aws.services.ec2.ec2_service import EC2
aws_provider = set_mocked_aws_provider(
[AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1],
audit_config={"ec2_allowed_instance_owners": [eni_owner]},
)
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
), mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_client",
new=EC2(aws_provider),
), mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_securitygroup_allow_ingress_from_internet_to_any_port.vpc_client",
new=VPC(aws_provider),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_securitygroup_allow_ingress_from_internet_to_any_port import (
ec2_securitygroup_allow_ingress_from_internet_to_any_port,
)
check = ec2_securitygroup_allow_ingress_from_internet_to_any_port()
result = check.execute()
# One default sg per region
assert len(result) == 3
# Search changed sg
for sg in result:
if sg.resource_id == default_sg_id:
assert sg.status == "PASS"
assert sg.region == AWS_REGION_US_EAST_1
assert (
sg.status_extended
== f"Security group {default_sg_name} ({default_sg_id}) has at least one port open to the Internet but is exclusively attached to an allowed network interface instance owner ({eni_owner})."
)
assert (
sg.resource_arn
@@ -174,15 +465,27 @@ class Test_ec2_securitygroup_allow_ingress_from_internet_to_any_port:
assert sg.resource_tags == []
@mock_aws
def test_ec2_compliant_default_sg_only_open_to_one_port(self):
def test_ec2_non_compliant_default_sg_open_to_one_port(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION_US_EAST_1)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
# Create VPC
vpc_response = ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
vpc_id = vpc_response["Vpc"]["VpcId"]
# Create Subnet
subnet_response = ec2_client.create_subnet(
VpcId=vpc_id, CidrBlock="10.0.1.0/24"
)
subnet_id = subnet_response["Subnet"]["SubnetId"]
default_sg = ec2_client.describe_security_groups(GroupNames=["default"])[
"SecurityGroups"
][0]
default_sg_id = default_sg["GroupId"]
default_sg_name = default_sg["GroupName"]
# Authorize ingress rule
ec2_client.authorize_security_group_ingress(
GroupId=default_sg_id,
IpPermissions=[
@@ -198,47 +501,18 @@ class Test_ec2_securitygroup_allow_ingress_from_internet_to_any_port:
],
)
from prowler.providers.aws.services.ec2.ec2_service import EC2
aws_provider = set_mocked_aws_provider(
[AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1],
# Create Network Interface
network_interface_response = ec2_client.create_network_interface(
SubnetId=subnet_id,
Groups=[
default_sg_id
], # Associating the network interface with the default security group
Description="Test Network Interface",
)
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
), mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_client",
new=EC2(aws_provider),
), mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_securitygroup_allow_ingress_from_internet_to_any_port.vpc_client",
new=VPC(aws_provider),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_allow_ingress_from_internet_to_any_port.ec2_securitygroup_allow_ingress_from_internet_to_any_port import (
ec2_securitygroup_allow_ingress_from_internet_to_any_port,
)
check = ec2_securitygroup_allow_ingress_from_internet_to_any_port()
result = check.execute()
# One default sg per region
assert len(result) == 3
# Search changed sg
for sg in result:
if sg.resource_id == default_sg_id:
assert sg.status == "FAIL"
assert sg.region == AWS_REGION_US_EAST_1
assert (
sg.status_extended
== f"Security group {default_sg_name} ({default_sg_id}) has at least one port open to the Internet."
)
assert (
sg.resource_arn
== f"arn:{aws_provider.identity.partition}:ec2:{AWS_REGION_US_EAST_1}:{aws_provider.identity.account}:security-group/{default_sg_id}"
)
assert sg.resource_details == default_sg_name
assert sg.resource_tags == []
self.verify_check_fail(
default_sg_id, default_sg_name, network_interface_response
)
@mock_aws
def test_ec2_default_sgs_ignoring(self):
@@ -316,7 +590,7 @@ class Test_ec2_securitygroup_allow_ingress_from_internet_to_any_port:
assert result[0].region == AWS_REGION_US_EAST_1
@mock_aws
def test_ec2_default_sgs_with_all_ports_check(self):
def test_ec2_default_sgs_with_any_ports_check(self):
# Create EC2 Mocked Resources
ec2 = resource("ec2", region_name=AWS_REGION_US_EAST_1)
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")

View File

@@ -127,6 +127,58 @@ class Test_ec2_securitygroup_default_restrict_traffic:
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_id == default_sg_id
@mock_aws
def test_ec2_non_compliant_sg_ingress_rule_but_unused(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION_US_EAST_1)
default_sg = ec2_client.describe_security_groups(GroupNames=["default"])[
"SecurityGroups"
][0]
default_sg_id = default_sg["GroupId"]
default_sg["GroupName"]
ec2_client.authorize_security_group_ingress(
GroupId=default_sg_id,
IpPermissions=[
{"IpProtocol": "-1", "IpRanges": [{"CidrIp": "10.0.0.16/0"}]}
],
)
ec2_client.revoke_security_group_egress(
GroupId=default_sg_id,
IpPermissions=[
{
"IpProtocol": "-1",
"IpRanges": [{"CidrIp": "0.0.0.0/0"}],
"Ipv6Ranges": [],
"PrefixListIds": [],
"UserIdGroupPairs": [],
}
],
)
from prowler.providers.aws.services.ec2.ec2_service import EC2
aws_provider = set_mocked_aws_provider(
audited_regions=[AWS_REGION_US_EAST_1], scan_unused_services=False
)
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
), mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_default_restrict_traffic.ec2_securitygroup_default_restrict_traffic.ec2_client",
new=EC2(aws_provider),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_default_restrict_traffic.ec2_securitygroup_default_restrict_traffic import (
ec2_securitygroup_default_restrict_traffic,
)
check = ec2_securitygroup_default_restrict_traffic()
result = check.execute()
# One default sg per region
assert len(result) == 0
@mock_aws
def test_ec2_non_compliant_sg_egress_rule(self):
# Create EC2 Mocked Resources

View File

@@ -58,6 +58,54 @@ class Test_elasticache_cluster_uses_public_subnet:
result = check.execute()
assert len(result) == 0
def test_elasticache_no_subnets(self):
# Mock ElastiCache Service
elasticache_service = MagicMock
elasticache_service.clusters = {}
elasticache_service.clusters[ELASTICACHE_CLUSTER_ARN] = Cluster(
arn=ELASTICACHE_CLUSTER_ARN,
name=ELASTICACHE_CLUSTER_NAME,
id=ELASTICACHE_CLUSTER_NAME,
region=AWS_REGION_US_EAST_1,
cache_subnet_group_id=SUBNET_GROUP_NAME,
tags=ELASTICACHE_CLUSTER_TAGS,
)
# Mock VPC Service
vpc_client = MagicMock
vpc_client.vpc_subnets = {}
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_aws_provider([AWS_REGION_US_EAST_1]),
), mock.patch(
"prowler.providers.aws.services.elasticache.elasticache_service.ElastiCache",
new=elasticache_service,
), mock.patch(
"prowler.providers.aws.services.vpc.vpc_service.VPC",
new=vpc_client,
), mock.patch(
"prowler.providers.aws.services.vpc.vpc_client.vpc_client",
new=vpc_client,
):
from prowler.providers.aws.services.elasticache.elasticache_cluster_uses_public_subnet.elasticache_cluster_uses_public_subnet import (
elasticache_cluster_uses_public_subnet,
)
check = elasticache_cluster_uses_public_subnet()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"Cluster {ELASTICACHE_CLUSTER_NAME} is not using public subnets."
)
assert result[0].region == AWS_REGION_US_EAST_1
assert result[0].resource_id == ELASTICACHE_CLUSTER_NAME
assert result[0].resource_arn == ELASTICACHE_CLUSTER_ARN
assert result[0].resource_tags == ELASTICACHE_CLUSTER_TAGS
def test_elasticache_clusters_using_private_subnets(self):
# Mock ElastiCache Service
elasticache_service = MagicMock

View File

@@ -11,6 +11,9 @@ from tests.providers.aws.utils import (
)
make_api_call = botocore.client.BaseClient._make_api_call
cluster_arn = (
f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:cluster:db-cluster-1"
)
def mock_make_api_call(self, operation_name, kwarg):
@@ -160,10 +163,7 @@ class Test_rds_instance_transport_encrypted:
)
assert result[0].resource_id == "db-cluster-1"
assert result[0].region == AWS_REGION_US_EAST_1
assert (
result[0].resource_arn
== f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:cluster:db-cluster-1"
)
assert result[0].resource_arn == cluster_arn
assert result[0].resource_tags == []
@mock_aws
@@ -433,16 +433,6 @@ class Test_rds_instance_transport_encrypted:
MasterUserPassword="password",
Tags=[],
)
conn.modify_db_parameter_group(
DBParameterGroupName="test",
Parameters=[
{
"ParameterName": "rds.force_ssl",
"ParameterValue": "1",
"ApplyMethod": "immediate",
},
],
)
from prowler.providers.aws.services.rds.rds_service import RDS
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
@@ -454,12 +444,14 @@ class Test_rds_instance_transport_encrypted:
with mock.patch(
"prowler.providers.aws.services.rds.rds_instance_transport_encrypted.rds_instance_transport_encrypted.rds_client",
new=RDS(aws_provider),
):
) as rds_client:
# Test Check
from prowler.providers.aws.services.rds.rds_instance_transport_encrypted.rds_instance_transport_encrypted import (
rds_instance_transport_encrypted,
)
# Change DB Cluster parameter group to support SSL since Moto does not support it
rds_client.db_clusters[cluster_arn].require_secure_transport = "ON"
check = rds_instance_transport_encrypted()
result = check.execute()
@@ -471,10 +463,7 @@ class Test_rds_instance_transport_encrypted:
)
assert result[0].resource_id == "db-cluster-1"
assert result[0].region == AWS_REGION_US_EAST_1
assert (
result[0].resource_arn
== f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:cluster:db-cluster-1"
)
assert result[0].resource_arn == cluster_arn
assert result[0].resource_tags == []
@mock_aws
@@ -517,12 +506,14 @@ class Test_rds_instance_transport_encrypted:
with mock.patch(
"prowler.providers.aws.services.rds.rds_instance_transport_encrypted.rds_instance_transport_encrypted.rds_client",
new=RDS(aws_provider),
):
) as rds_client:
# Test Check
from prowler.providers.aws.services.rds.rds_instance_transport_encrypted.rds_instance_transport_encrypted import (
rds_instance_transport_encrypted,
)
# Change DB Cluster parameter group to support SSL since Moto does not support it
rds_client.db_clusters[cluster_arn].require_secure_transport = "ON"
check = rds_instance_transport_encrypted()
result = check.execute()
@@ -534,8 +525,5 @@ class Test_rds_instance_transport_encrypted:
)
assert result[0].resource_id == "db-cluster-1"
assert result[0].region == AWS_REGION_US_EAST_1
assert (
result[0].resource_arn
== f"arn:aws:rds:{AWS_REGION_US_EAST_1}:{AWS_ACCOUNT_NUMBER}:cluster:db-cluster-1"
)
assert result[0].resource_arn == cluster_arn
assert result[0].resource_tags == []

View File

@@ -211,8 +211,8 @@ class Test_RDS_Service:
def test__describe_db_clusters__(self):
conn = client("rds", region_name=AWS_REGION_US_EAST_1)
cluster_id = "db-master-1"
conn.create_db_parameter_group(
DBParameterGroupName="test",
conn.create_db_cluster_parameter_group(
DBClusterParameterGroupName="test",
DBParameterGroupFamily="default.postgres9.3",
Description="test parameter group",
)
@@ -260,6 +260,8 @@ class Test_RDS_Service:
{"Key": "test", "Value": "test"},
]
assert rds.db_clusters[db_cluster_arn].parameter_group == "test"
assert rds.db_clusters[db_cluster_arn].force_ssl == "0"
assert rds.db_clusters[db_cluster_arn].require_secure_transport == "OFF"
# Test RDS Describe DB Cluster Snapshots
@mock_aws

View File

@@ -166,3 +166,31 @@ class Test_vpc_subnet_different_az:
assert result.region == AWS_REGION_US_EAST_1
if not found:
assert False
@mock_aws
def test_vpc_no_subnets_but_unused(self):
ec2_client = client("ec2", region_name=AWS_REGION_US_EAST_1)
ec2_client.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default")
from prowler.providers.aws.services.vpc.vpc_service import VPC
aws_provider = set_mocked_aws_provider(
audited_regions=[AWS_REGION_US_EAST_1], scan_unused_services=False
)
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.vpc.vpc_subnet_different_az.vpc_subnet_different_az.vpc_client",
new=VPC(aws_provider),
):
from prowler.providers.aws.services.vpc.vpc_subnet_different_az.vpc_subnet_different_az import (
vpc_subnet_different_az,
)
check = vpc_subnet_different_az()
results = check.execute()
assert len(results) == 0

View File

@@ -102,3 +102,51 @@ class Test_vpc_subnet_no_public_ip_by_default:
result.status_extended
== f"VPC subnet {subnet_private['Subnet']['SubnetId']} does NOT assign public IP by default."
)
@mock_aws
def test_vpc_with_map_ip_on_launch_but_unused(self):
ec2_client = client("ec2", region_name=AWS_REGION_US_EAST_1)
vpc = ec2_client.create_vpc(
CidrBlock="172.28.7.0/24", InstanceTenancy="default"
)
subnet_private = ec2_client.create_subnet(
VpcId=vpc["Vpc"]["VpcId"],
CidrBlock="172.28.7.192/26",
AvailabilityZone=f"{AWS_REGION_US_EAST_1}a",
TagSpecifications=[
{
"ResourceType": "subnet",
"Tags": [
{"Key": "Name", "Value": "subnet_name"},
],
},
],
)
ec2_client.modify_subnet_attribute(
SubnetId=subnet_private["Subnet"]["SubnetId"],
MapPublicIpOnLaunch={"Value": True},
)
from prowler.providers.aws.services.vpc.vpc_service import VPC
aws_provider = set_mocked_aws_provider(
audited_regions=[AWS_REGION_US_EAST_1], scan_unused_services=False
)
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.vpc.vpc_subnet_no_public_ip_by_default.vpc_subnet_no_public_ip_by_default.vpc_client",
new=VPC(aws_provider),
):
from prowler.providers.aws.services.vpc.vpc_subnet_no_public_ip_by_default.vpc_subnet_no_public_ip_by_default import (
vpc_subnet_no_public_ip_by_default,
)
check = vpc_subnet_no_public_ip_by_default()
results = check.execute()
assert len(results) == 0

View File

@@ -129,6 +129,48 @@ class Test_vpc_subnet_separate_private_public:
if not found:
assert False
@mock_aws
def test_vpc_subnet_only_public_but_unused(self):
# Create EC2 Mocked Resources
ec2 = resource("ec2", region_name=AWS_REGION_US_EAST_1)
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/18")
ec2_client = client("ec2", region_name=AWS_REGION_US_EAST_1)
# Create IGW and attach to VPC
igw = ec2.create_internet_gateway()
vpc.attach_internet_gateway(InternetGatewayId=igw.id)
# Set IGW as default route for public subnet
route_table = ec2.create_route_table(VpcId=vpc.id)
route_table.associate_with_subnet(SubnetId=subnet.id)
ec2_client.create_route(
RouteTableId=route_table.id,
DestinationCidrBlock="0.0.0.0/0",
GatewayId=igw.id,
)
from prowler.providers.aws.services.vpc.vpc_service import VPC
aws_provider = set_mocked_aws_provider(
audited_regions=[AWS_REGION_US_EAST_1], scan_unused_services=False
)
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
):
with mock.patch(
"prowler.providers.aws.services.vpc.vpc_subnet_separate_private_public.vpc_subnet_separate_private_public.vpc_client",
new=VPC(aws_provider),
):
from prowler.providers.aws.services.vpc.vpc_subnet_separate_private_public.vpc_subnet_separate_private_public import (
vpc_subnet_separate_private_public,
)
check = vpc_subnet_separate_private_public()
results = check.execute()
assert len(results) == 0
@mock_aws
def test_vpc_subnet_private_and_public(self):
ec2_client = client("ec2", region_name=AWS_REGION_US_EAST_1)