mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-01-25 02:08:11 +00:00
Compare commits
201 Commits
update-api
...
3.12.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c2762c6602 | ||
|
|
1df84ef6e4 | ||
|
|
80b88a9365 | ||
|
|
558b7a54c7 | ||
|
|
9522d0c733 | ||
|
|
396d6e5c0e | ||
|
|
a69d7471b3 | ||
|
|
eb56e1417c | ||
|
|
3d032a8efe | ||
|
|
d712470047 | ||
|
|
423f96b95f | ||
|
|
d1bd097079 | ||
|
|
ceabe8ecba | ||
|
|
0fff0568fa | ||
|
|
10e822238e | ||
|
|
1cf1c827f1 | ||
|
|
5bada440fa | ||
|
|
04bb95e044 | ||
|
|
819140bc59 | ||
|
|
d490bcc955 | ||
|
|
cb94960178 | ||
|
|
7361c10cb9 | ||
|
|
b47408e94e | ||
|
|
806a3590aa | ||
|
|
e953fe021d | ||
|
|
e570d94a6e | ||
|
|
78505cb0a8 | ||
|
|
f8d77d9a30 | ||
|
|
1a4887f028 | ||
|
|
71042b5919 | ||
|
|
435976800a | ||
|
|
18f4c7205b | ||
|
|
06eeefb8bf | ||
|
|
1737d7cf42 | ||
|
|
cd03fa6d46 | ||
|
|
a10a73962e | ||
|
|
99d6fee7a0 | ||
|
|
c8831f0f50 | ||
|
|
fdeb523581 | ||
|
|
9a868464ee | ||
|
|
051ec75e01 | ||
|
|
fc3909491a | ||
|
|
2437fe270c | ||
|
|
c937b193d0 | ||
|
|
8b5c995486 | ||
|
|
4410f2a582 | ||
|
|
bbb816868e | ||
|
|
2441cca810 | ||
|
|
3c3dfb380b | ||
|
|
0f165f0bf0 | ||
|
|
7fcff548eb | ||
|
|
8fa7b9ba00 | ||
|
|
b101e15985 | ||
|
|
b4e412a37f | ||
|
|
ac0e2bbdb2 | ||
|
|
ba16330e20 | ||
|
|
c9cb9774c6 | ||
|
|
7b5b14dbd0 | ||
|
|
bd13973cf5 | ||
|
|
a7f8656e89 | ||
|
|
1be52fab06 | ||
|
|
c9baff1a7f | ||
|
|
d1bc68086d | ||
|
|
44a4c0670b | ||
|
|
4785056740 | ||
|
|
694aa448a4 | ||
|
|
ee215b1ced | ||
|
|
018e87884c | ||
|
|
a81cbbc325 | ||
|
|
3962c9d816 | ||
|
|
e187875da5 | ||
|
|
f0d1a799a2 | ||
|
|
5452d535d7 | ||
|
|
7a776532a8 | ||
|
|
e704d57957 | ||
|
|
c9a6eb5a1a | ||
|
|
c071812160 | ||
|
|
3f95ad9ada | ||
|
|
250f59c9f5 | ||
|
|
c17bbea2c7 | ||
|
|
0262f8757a | ||
|
|
dbc2c481dc | ||
|
|
e432c39eec | ||
|
|
7383ae4f9c | ||
|
|
d217e33678 | ||
|
|
d1daceff91 | ||
|
|
dbbd556830 | ||
|
|
d483f1d90f | ||
|
|
80684a998f | ||
|
|
0c4f0fde48 | ||
|
|
071115cd52 | ||
|
|
9136a755fe | ||
|
|
6ff864fc04 | ||
|
|
828a6f4696 | ||
|
|
417aa550a6 | ||
|
|
78ffc2e238 | ||
|
|
c9f22db1b5 | ||
|
|
41da560b64 | ||
|
|
b49e0b95f7 | ||
|
|
50ef2729e6 | ||
|
|
6a901bb7de | ||
|
|
f0da63c850 | ||
|
|
b861c1dd3c | ||
|
|
45faa2e9e8 | ||
|
|
b2e1eed684 | ||
|
|
4018221da6 | ||
|
|
28ec3886f9 | ||
|
|
ed323f4602 | ||
|
|
f72d360384 | ||
|
|
682bba452b | ||
|
|
e2ce5ae2af | ||
|
|
039a0da69e | ||
|
|
c9ad12b87e | ||
|
|
094be2e2e6 | ||
|
|
1b3029d833 | ||
|
|
d00d5e863b | ||
|
|
3d19e89710 | ||
|
|
247cd6fc44 | ||
|
|
ba244c887f | ||
|
|
f77d92492a | ||
|
|
1b85af95c0 | ||
|
|
9236f5d058 | ||
|
|
39ba8cd230 | ||
|
|
e67328945f | ||
|
|
bcee2b0b6d | ||
|
|
be9a1b2f9a | ||
|
|
4f9c2aadc2 | ||
|
|
25d419ac7f | ||
|
|
57cfb508f1 | ||
|
|
c88445f90d | ||
|
|
9b6d6c3a42 | ||
|
|
d26c1405ce | ||
|
|
4bb35ab92d | ||
|
|
cdd983aa04 | ||
|
|
e83ce86eb3 | ||
|
|
bcc590a3ee | ||
|
|
5fdffb93d1 | ||
|
|
db20b2c04f | ||
|
|
4e037c0f43 | ||
|
|
fdcc2ac5cb | ||
|
|
9099bd79f8 | ||
|
|
a01683d8f6 | ||
|
|
6d2b2a9a93 | ||
|
|
de4166bf0d | ||
|
|
1cbef30788 | ||
|
|
89c6e27489 | ||
|
|
f74ffc530d | ||
|
|
441d4d6a38 | ||
|
|
3c6b9d63a6 | ||
|
|
254d8616b7 | ||
|
|
d3bc6fda74 | ||
|
|
e4a5d9376f | ||
|
|
523605e3e7 | ||
|
|
ed33fac337 | ||
|
|
bf0e62aca5 | ||
|
|
60c0b79b10 | ||
|
|
f9d2e7aa93 | ||
|
|
0646748e24 | ||
|
|
f6408e9df7 | ||
|
|
5769bc815c | ||
|
|
5a3e3e9b1f | ||
|
|
26cbafa204 | ||
|
|
d14541d1de | ||
|
|
3955ebd56c | ||
|
|
e212645cf0 | ||
|
|
db9c1c24d3 | ||
|
|
0a305c281f | ||
|
|
43c96a7875 | ||
|
|
3a93aba7d7 | ||
|
|
3d563356e5 | ||
|
|
9205ef30f8 | ||
|
|
19c2dccc6d | ||
|
|
8f819048ed | ||
|
|
3a3bb44f11 | ||
|
|
f8e713a544 | ||
|
|
573f1eba56 | ||
|
|
a36be258d8 | ||
|
|
690ec057c3 | ||
|
|
2681feb1f6 | ||
|
|
e662adb8c5 | ||
|
|
c94bd96c93 | ||
|
|
6d85433194 | ||
|
|
7a6092a779 | ||
|
|
4c84529aed | ||
|
|
512d3e018f | ||
|
|
c6aff985c9 | ||
|
|
7fadf31a2b | ||
|
|
e7d098ed1e | ||
|
|
21fba27355 | ||
|
|
74e37307f7 | ||
|
|
d9d7c009a5 | ||
|
|
2220cf9733 | ||
|
|
3325b72b86 | ||
|
|
9182d56246 | ||
|
|
299ece19a8 | ||
|
|
0a0732d7c0 | ||
|
|
28011d97a9 | ||
|
|
e71b0d1b6a | ||
|
|
ec01b62a82 | ||
|
|
12b45c6896 | ||
|
|
51c60dd4ee |
4
.github/workflows/codeql.yml
vendored
4
.github/workflows/codeql.yml
vendored
@@ -13,10 +13,10 @@ name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master", prowler-2, prowler-3.0-dev ]
|
||||
branches: [ "master", "prowler-4.0-dev" ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ "master" ]
|
||||
branches: [ "master", "prowler-4.0-dev" ]
|
||||
schedule:
|
||||
- cron: '00 12 * * *'
|
||||
|
||||
|
||||
5
.github/workflows/pull-request.yml
vendored
5
.github/workflows/pull-request.yml
vendored
@@ -4,9 +4,11 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- "master"
|
||||
- "prowler-4.0-dev"
|
||||
pull_request:
|
||||
branches:
|
||||
- "master"
|
||||
- "prowler-4.0-dev"
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -18,7 +20,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Test if changes are in not ignored paths
|
||||
id: are-non-ignored-files-changed
|
||||
uses: tj-actions/changed-files@v39
|
||||
uses: tj-actions/changed-files@v41
|
||||
with:
|
||||
files: ./**
|
||||
files_ignore: |
|
||||
@@ -26,6 +28,7 @@ jobs:
|
||||
README.md
|
||||
docs/**
|
||||
permissions/**
|
||||
mkdocs.yml
|
||||
- name: Install poetry
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
|
||||
@@ -178,11 +178,7 @@ Prowler will follow the same credentials search as [Google authentication librar
|
||||
2. [User credentials set up by using the Google Cloud CLI](https://cloud.google.com/docs/authentication/application-default-credentials#personal)
|
||||
3. [The attached service account, returned by the metadata server](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa)
|
||||
|
||||
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the following roles to the member associated with the credentials:
|
||||
|
||||
- Viewer
|
||||
- Security Reviewer
|
||||
- Stackdriver Account Viewer
|
||||
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the `Viewer` role to the member associated with the credentials.
|
||||
|
||||
> By default, `prowler` will scan all accessible GCP Projects, use flag `--project-ids` to specify the projects to be scanned.
|
||||
|
||||
|
||||
@@ -97,10 +97,6 @@ Prowler will follow the same credentials search as [Google authentication librar
|
||||
2. [User credentials set up by using the Google Cloud CLI](https://cloud.google.com/docs/authentication/application-default-credentials#personal)
|
||||
3. [The attached service account, returned by the metadata server](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa)
|
||||
|
||||
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the following roles to the member associated with the credentials:
|
||||
|
||||
- Viewer
|
||||
- Security Reviewer
|
||||
- Stackdriver Account Viewer
|
||||
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the `Viewer` role to the member associated with the credentials.
|
||||
|
||||
> By default, `prowler` will scan all accessible GCP Projects, use flag `--project-ids` to specify the projects to be scanned.
|
||||
|
||||
@@ -136,26 +136,16 @@ Prowler is available as a project in [PyPI](https://pypi.org/project/prowler-clo
|
||||
|
||||
=== "AWS CloudShell"
|
||||
|
||||
Prowler can be easely executed in AWS CloudShell but it has some prerequsites to be able to to so. AWS CloudShell is a container running with `Amazon Linux release 2 (Karoo)` that comes with Python 3.7, since Prowler requires Python >= 3.9 we need to first install a newer version of Python. Follow the steps below to successfully execute Prowler v3 in AWS CloudShell:
|
||||
After the migration of AWS CloudShell from Amazon Linux 2 to Amazon Linux 2023 [[1]](https://aws.amazon.com/about-aws/whats-new/2023/12/aws-cloudshell-migrated-al2023/) [2](https://docs.aws.amazon.com/cloudshell/latest/userguide/cloudshell-AL2023-migration.html), there is no longer a need to manually compile Python 3.9 as it's already included in AL2023. Prowler can thus be easily installed following the Generic method of installation via pip. Follow the steps below to successfully execute Prowler v3 in AWS CloudShell:
|
||||
|
||||
_Requirements_:
|
||||
|
||||
* First install all dependences and then Python, in this case we need to compile it because there is not a package available at the time this document is written:
|
||||
```
|
||||
sudo yum -y install gcc openssl-devel bzip2-devel libffi-devel
|
||||
wget https://www.python.org/ftp/python/3.9.16/Python-3.9.16.tgz
|
||||
tar zxf Python-3.9.16.tgz
|
||||
cd Python-3.9.16/
|
||||
./configure --enable-optimizations
|
||||
sudo make altinstall
|
||||
python3.9 --version
|
||||
cd
|
||||
```
|
||||
* Open AWS CloudShell `bash`.
|
||||
|
||||
_Commands_:
|
||||
|
||||
* Once Python 3.9 is available we can install Prowler from pip:
|
||||
```
|
||||
pip3.9 install prowler
|
||||
pip install prowler
|
||||
prowler -v
|
||||
```
|
||||
|
||||
|
||||
@@ -32,3 +32,14 @@ Prowler's AWS Provider uses the Boto3 [Standard](https://boto3.amazonaws.com/v1/
|
||||
- Retry attempts on nondescriptive, transient error codes. Specifically, these HTTP status codes: 500, 502, 503, 504.
|
||||
|
||||
- Any retry attempt will include an exponential backoff by a base factor of 2 for a maximum backoff time of 20 seconds.
|
||||
|
||||
## Notes for validating retry attempts
|
||||
|
||||
If you are making changes to Prowler, and want to validate if requests are being retried or given up on, you can take the following approach
|
||||
|
||||
* Run prowler with `--log-level DEBUG` and `--log-file debuglogs.txt`
|
||||
* Search for retry attempts using `grep -i 'Retry needed' debuglogs.txt`
|
||||
|
||||
This is based off of the [AWS documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#checking-retry-attempts-in-your-client-logs), which states that if a retry is performed, you will see a message starting with "Retry needed".
|
||||
|
||||
You can determine the total number of calls made using `grep -i 'Sending http request' debuglogs.txt | wc -l`
|
||||
|
||||
@@ -1,26 +1,26 @@
|
||||
# AWS CloudShell
|
||||
|
||||
Prowler can be easily executed in AWS CloudShell but it has some prerequisites to be able to to so. AWS CloudShell is a container running with `Amazon Linux release 2 (Karoo)` that comes with Python 3.7, since Prowler requires Python >= 3.9 we need to first install a newer version of Python. Follow the steps below to successfully execute Prowler v3 in AWS CloudShell:
|
||||
|
||||
- First install all dependences and then Python, in this case we need to compile it because there is not a package available at the time this document is written:
|
||||
```
|
||||
sudo yum -y install gcc openssl-devel bzip2-devel libffi-devel
|
||||
wget https://www.python.org/ftp/python/3.9.16/Python-3.9.16.tgz
|
||||
tar zxf Python-3.9.16.tgz
|
||||
cd Python-3.9.16/
|
||||
./configure --enable-optimizations
|
||||
sudo make altinstall
|
||||
python3.9 --version
|
||||
cd
|
||||
```
|
||||
- Once Python 3.9 is available we can install Prowler from pip:
|
||||
```
|
||||
pip3.9 install prowler
|
||||
```
|
||||
- Now enjoy Prowler:
|
||||
```
|
||||
## Installation
|
||||
After the migration of AWS CloudShell from Amazon Linux 2 to Amazon Linux 2023 [[1]](https://aws.amazon.com/about-aws/whats-new/2023/12/aws-cloudshell-migrated-al2023/) [[2]](https://docs.aws.amazon.com/cloudshell/latest/userguide/cloudshell-AL2023-migration.html), there is no longer a need to manually compile Python 3.9 as it's already included in AL2023. Prowler can thus be easily installed following the Generic method of installation via pip. Follow the steps below to successfully execute Prowler v3 in AWS CloudShell:
|
||||
```shell
|
||||
pip install prowler
|
||||
prowler -v
|
||||
prowler
|
||||
```
|
||||
|
||||
- To download the results from AWS CloudShell, select Actions -> Download File and add the full path of each file. For the CSV file it will be something like `/home/cloudshell-user/output/prowler-output-123456789012-20221220191331.csv`
|
||||
## Download Files
|
||||
|
||||
To download the results from AWS CloudShell, select Actions -> Download File and add the full path of each file. For the CSV file it will be something like `/home/cloudshell-user/output/prowler-output-123456789012-20221220191331.csv`
|
||||
|
||||
## Clone Prowler from Github
|
||||
|
||||
The limited storage that AWS CloudShell provides for the user's home directory causes issues when installing the poetry dependencies to run Prowler from GitHub. Here is a workaround:
|
||||
```shell
|
||||
git clone https://github.com/prowler-cloud/prowler.git
|
||||
cd prowler
|
||||
pip install poetry
|
||||
mkdir /tmp/pypoetry
|
||||
poetry config cache-dir /tmp/pypoetry
|
||||
poetry shell
|
||||
poetry install
|
||||
python prowler.py -v
|
||||
```
|
||||
|
||||
@@ -23,6 +23,15 @@ prowler aws -R arn:aws:iam::<account_id>:role/<role_name>
|
||||
prowler aws -T/--session-duration <seconds> -I/--external-id <external_id> -R arn:aws:iam::<account_id>:role/<role_name>
|
||||
```
|
||||
|
||||
## Custom Role Session Name
|
||||
|
||||
Prowler can use your custom Role Session name with:
|
||||
```console
|
||||
prowler aws --role-session-name <role_session_name>
|
||||
```
|
||||
|
||||
> It defaults to `ProwlerAssessmentSession`
|
||||
|
||||
## STS Endpoint Region
|
||||
|
||||
If you are using Prowler in AWS regions that are not enabled by default you need to use the argument `--sts-endpoint-region` to point the AWS STS API calls `assume-role` and `get-caller-identity` to the non-default region, e.g.: `prowler aws --sts-endpoint-region eu-south-2`.
|
||||
|
||||
16
docs/tutorials/azure/use-non-default-cloud.md
Normal file
16
docs/tutorials/azure/use-non-default-cloud.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Use non default Azure regions
|
||||
|
||||
Microsoft provides clouds for compliance with regional laws, which are available for your use.
|
||||
By default, Prowler uses `AzureCloud` cloud which is the comercial one. (you can list all the available with `az cloud list --output table`).
|
||||
|
||||
At the time of writing this documentation the available Azure Clouds from different regions are the following:
|
||||
- AzureCloud
|
||||
- AzureChinaCloud
|
||||
- AzureUSGovernment
|
||||
- AzureGermanCloud
|
||||
|
||||
If you want to change the default one you must include the flag `--azure-region`, i.e.:
|
||||
|
||||
```console
|
||||
prowler azure --az-cli-auth --azure-region AzureChinaCloud
|
||||
```
|
||||
43
docs/tutorials/custom-checks-metadata.md
Normal file
43
docs/tutorials/custom-checks-metadata.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Custom Checks Metadata
|
||||
|
||||
In certain organizations, the severity of specific checks might differ from the default values defined in the check's metadata. For instance, while `s3_bucket_level_public_access_block` could be deemed `critical` for some organizations, others might assign a different severity level.
|
||||
|
||||
The custom metadata option offers a means to override default metadata set by Prowler
|
||||
|
||||
You can utilize `--custom-checks-metadata-file` followed by the path to your custom checks metadata YAML file.
|
||||
|
||||
## Available Fields
|
||||
|
||||
The list of supported check's metadata fields that can be override are listed as follows:
|
||||
|
||||
- Severity
|
||||
|
||||
## File Syntax
|
||||
|
||||
This feature is available for all the providers supported in Prowler since the metadata format is common between all the providers. The following is the YAML format for the custom checks metadata file:
|
||||
```yaml title="custom_checks_metadata.yaml"
|
||||
CustomChecksMetadata:
|
||||
aws:
|
||||
Checks:
|
||||
s3_bucket_level_public_access_block:
|
||||
Severity: high
|
||||
s3_bucket_no_mfa_delete:
|
||||
Severity: high
|
||||
azure:
|
||||
Checks:
|
||||
storage_infrastructure_encryption_is_enabled:
|
||||
Severity: medium
|
||||
gcp:
|
||||
Checks:
|
||||
compute_instance_public_ip:
|
||||
Severity: critical
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Executing the following command will assess all checks and generate a report while overriding the metadata for those checks:
|
||||
```sh
|
||||
prowler <provider> --custom-checks-metadata-file <path/to/custom/metadata>
|
||||
```
|
||||
|
||||
This customization feature enables organizations to tailor the severity of specific checks based on their unique requirements, providing greater flexibility in security assessment and reporting.
|
||||
@@ -22,8 +22,4 @@ Prowler will follow the same credentials search as [Google authentication librar
|
||||
2. [User credentials set up by using the Google Cloud CLI](https://cloud.google.com/docs/authentication/application-default-credentials#personal)
|
||||
3. [The attached service account, returned by the metadata server](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa)
|
||||
|
||||
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the following roles to the member associated with the credentials:
|
||||
|
||||
- Viewer
|
||||
- Security Reviewer
|
||||
- Stackdriver Account Viewer
|
||||
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the `Viewer` role to the member associated with the credentials.
|
||||
|
||||
187
docs/tutorials/parallel-execution.md
Normal file
187
docs/tutorials/parallel-execution.md
Normal file
@@ -0,0 +1,187 @@
|
||||
# Parallel Execution
|
||||
|
||||
The strategy used here will be to execute Prowler once per service. You can modify this approach as per your requirements.
|
||||
|
||||
This can help for really large accounts, but please be aware of AWS API rate limits:
|
||||
|
||||
1. **Service-Specific Limits**: Each AWS service has its own rate limits. For instance, Amazon EC2 might have different rate limits for launching instances versus making API calls to describe instances.
|
||||
2. **API Rate Limits**: Most of the rate limits in AWS are applied at the API level. Each API call to an AWS service counts towards the rate limit for that service.
|
||||
3. **Throttling Responses**: When you exceed the rate limit for a service, AWS responds with a throttling error. In AWS SDKs, these are typically represented as `ThrottlingException` or `RateLimitExceeded` errors.
|
||||
|
||||
For information on Prowler's retrier configuration please refer to this [page](https://docs.prowler.cloud/en/latest/tutorials/aws/boto3-configuration/).
|
||||
|
||||
> Note: You might need to increase the `--aws-retries-max-attempts` parameter from the default value of 3. The retrier follows an exponential backoff strategy.
|
||||
|
||||
## Linux
|
||||
|
||||
Generate a list of services that Prowler supports, and populate this info into a file:
|
||||
|
||||
```bash
|
||||
prowler aws --list-services | awk -F"- " '{print $2}' | sed '/^$/d' > services
|
||||
```
|
||||
|
||||
Make any modifications for services you would like to skip scanning by modifying this file.
|
||||
|
||||
Then create a new PowerShell script file `parallel-prowler.sh` and add the following contents. Update the `$profile` variable to the AWS CLI profile you want to run Prowler with.
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# Change these variables as needed
|
||||
profile="your_profile"
|
||||
account_id=$(aws sts get-caller-identity --profile "${profile}" --query 'Account' --output text)
|
||||
|
||||
echo "Executing in account: ${account_id}"
|
||||
|
||||
# Maximum number of concurrent processes
|
||||
MAX_PROCESSES=5
|
||||
|
||||
# Loop through the services
|
||||
while read service; do
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S'): Starting job for service: ${service}"
|
||||
|
||||
# Run the command in the background
|
||||
(prowler -p "$profile" -s "$service" -F "${account_id}-${service}" --ignore-unused-services --only-logs; echo "$(date '+%Y-%m-%d %H:%M:%S') - ${service} has completed") &
|
||||
|
||||
# Check if we have reached the maximum number of processes
|
||||
while [ $(jobs -r | wc -l) -ge ${MAX_PROCESSES} ]; do
|
||||
# Wait for a second before checking again
|
||||
sleep 1
|
||||
done
|
||||
done < ./services
|
||||
|
||||
# Wait for all background processes to finish
|
||||
wait
|
||||
echo "All jobs completed"
|
||||
```
|
||||
|
||||
Output will be stored in the `output/` folder that is in the same directory from which you executed the script.
|
||||
|
||||
## Windows
|
||||
|
||||
Generate a list of services that Prowler supports, and populate this info into a file:
|
||||
|
||||
```powershell
|
||||
prowler aws --list-services | ForEach-Object {
|
||||
# Capture lines that are likely service names
|
||||
if ($_ -match '^\- \w+$') {
|
||||
$_.Trim().Substring(2)
|
||||
}
|
||||
} | Where-Object {
|
||||
# Filter out empty or null lines
|
||||
$_ -ne $null -and $_ -ne ''
|
||||
} | Set-Content -Path "services"
|
||||
```
|
||||
|
||||
Make any modifications for services you would like to skip scanning by modifying this file.
|
||||
|
||||
Then create a new PowerShell script file `parallel-prowler.ps1` and add the following contents. Update the `$profile` variable to the AWS CLI profile you want to run prowler with.
|
||||
|
||||
Change any parameters you would like when calling prowler in the `Start-Job -ScriptBlock` section. Note that you need to keep the `--only-logs` parameter, else some encoding issue occurs when trying to render the progress-bar and prowler won't successfully execute.
|
||||
|
||||
```powershell
|
||||
$profile = "your_profile"
|
||||
$account_id = Invoke-Expression -Command "aws sts get-caller-identity --profile $profile --query 'Account' --output text"
|
||||
|
||||
Write-Host "Executing Prowler in $account_id"
|
||||
|
||||
# Maximum number of concurrent jobs
|
||||
$MAX_PROCESSES = 5
|
||||
|
||||
# Read services from a file
|
||||
$services = Get-Content -Path "services"
|
||||
|
||||
# Array to keep track of started jobs
|
||||
$jobs = @()
|
||||
|
||||
foreach ($service in $services) {
|
||||
# Start the command as a job
|
||||
$job = Start-Job -ScriptBlock {
|
||||
prowler -p ${using:profile} -s ${using:service} -F "${using:account_id}-${using:service}" --ignore-unused-services --only-logs
|
||||
$endTimestamp = Get-Date -Format "yyyy-MM-dd HH:mm:ss"
|
||||
Write-Output "${endTimestamp} - $using:service has completed"
|
||||
}
|
||||
$jobs += $job
|
||||
Write-Host "$(Get-Date -Format 'yyyy-MM-dd HH:mm:ss') - Starting job for service: $service"
|
||||
|
||||
# Check if we have reached the maximum number of jobs
|
||||
while (($jobs | Where-Object { $_.State -eq 'Running' }).Count -ge $MAX_PROCESSES) {
|
||||
Start-Sleep -Seconds 1
|
||||
# Check for any completed jobs and receive their output
|
||||
$completedJobs = $jobs | Where-Object { $_.State -eq 'Completed' }
|
||||
foreach ($completedJob in $completedJobs) {
|
||||
Receive-Job -Job $completedJob -Keep | ForEach-Object { Write-Host $_ }
|
||||
$jobs = $jobs | Where-Object { $_.Id -ne $completedJob.Id }
|
||||
Remove-Job -Job $completedJob
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Check for any remaining completed jobs
|
||||
$remainingCompletedJobs = $jobs | Where-Object { $_.State -eq 'Completed' }
|
||||
foreach ($remainingJob in $remainingCompletedJobs) {
|
||||
Receive-Job -Job $remainingJob -Keep | ForEach-Object { Write-Host $_ }
|
||||
Remove-Job -Job $remainingJob
|
||||
}
|
||||
|
||||
Write-Host "$(Get-Date -Format 'yyyy-MM-dd HH:mm:ss') - All jobs completed"
|
||||
```
|
||||
|
||||
Output will be stored in `C:\Users\YOUR-USER\Documents\output\`
|
||||
|
||||
## Combining the output files
|
||||
|
||||
Guidance is provided for the CSV file format. From the ouput directory, execute either the following Bash or PowerShell script. The script will collect the output from the CSV files, only include the header from the first file, and then output the result as CombinedCSV.csv in the current working directory.
|
||||
|
||||
There is no logic implemented in terms of which CSV files it will combine. If you have additional CSV files from other actions, such as running a quick inventory, you will need to move that out of the current (or any nested) directory, or move the output you want to combine into its own folder and run the script from there.
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# Initialize a variable to indicate the first file
|
||||
firstFile=true
|
||||
|
||||
# Find all CSV files and loop through them
|
||||
find . -name "*.csv" -print0 | while IFS= read -r -d '' file; do
|
||||
if [ "$firstFile" = true ]; then
|
||||
# For the first file, keep the header
|
||||
cat "$file" > CombinedCSV.csv
|
||||
firstFile=false
|
||||
else
|
||||
# For subsequent files, skip the header
|
||||
tail -n +2 "$file" >> CombinedCSV.csv
|
||||
fi
|
||||
done
|
||||
```
|
||||
|
||||
```powershell
|
||||
# Get all CSV files from current directory and its subdirectories
|
||||
$csvFiles = Get-ChildItem -Recurse -Filter "*.csv"
|
||||
|
||||
# Initialize a variable to track if it's the first file
|
||||
$firstFile = $true
|
||||
|
||||
# Loop through each CSV file
|
||||
foreach ($file in $csvFiles) {
|
||||
if ($firstFile) {
|
||||
# For the first file, keep the header and change the flag
|
||||
$combinedCsv = Import-Csv -Path $file.FullName
|
||||
$firstFile = $false
|
||||
} else {
|
||||
# For subsequent files, skip the header
|
||||
$tempCsv = Import-Csv -Path $file.FullName
|
||||
$combinedCsv += $tempCsv | Select-Object * -Skip 1
|
||||
}
|
||||
}
|
||||
|
||||
# Export the combined data to a new CSV file
|
||||
$combinedCsv | Export-Csv -Path "CombinedCSV.csv" -NoTypeInformation
|
||||
```
|
||||
|
||||
## TODO: Additional Improvements
|
||||
|
||||
Some services need to instantiate another service to perform a check. For instance, `cloudwatch` will instantiate Prowler's `iam` service to perform the `cloudwatch_cross_account_sharing_disabled` check. When the `iam` service is instantiated, it will perform the `__init__` function, and pull all the information required for that service. This provides an opportunity for an improvement in the above script to group related services together so that the `iam` services (or any other cross-service references) isn't repeatedily instantiated by grouping dependant services together. A complete mapping between these services still needs to be further investigated, but these are the cross-references that have been noted:
|
||||
|
||||
* inspector2 needs lambda and ec2
|
||||
* cloudwatch needs iam
|
||||
* dlm needs ec2
|
||||
@@ -43,46 +43,71 @@ Hereunder is the structure for each of the supported report formats by Prowler:
|
||||

|
||||
### CSV
|
||||
|
||||
The following are the columns present in the CSV format:
|
||||
CSV format has a set of common columns for all the providers, and then provider specific columns.
|
||||
The common columns are the following:
|
||||
|
||||
- ASSESSMENT_START_TIME
|
||||
- FINDING_UNIQUE_ID
|
||||
- PROVIDER
|
||||
- CHECK_ID
|
||||
- CHECK_TITLE
|
||||
- CHECK_TYPE
|
||||
- STATUS
|
||||
- STATUS_EXTENDED
|
||||
- SERVICE_NAME
|
||||
- SUBSERVICE_NAME
|
||||
- SEVERITY
|
||||
- RESOURCE_TYPE
|
||||
- RESOURCE_DETAILS
|
||||
- RESOURCE_TAGS
|
||||
- DESCRIPTION
|
||||
- RISK
|
||||
- RELATED_URL
|
||||
- REMEDIATION_RECOMMENDATION_TEXT
|
||||
- REMEDIATION_RECOMMENDATION_URL
|
||||
- REMEDIATION_RECOMMENDATION_CODE_NATIVEIAC
|
||||
- REMEDIATION_RECOMMENDATION_CODE_TERRAFORM
|
||||
- REMEDIATION_RECOMMENDATION_CODE_CLI
|
||||
- REMEDIATION_RECOMMENDATION_CODE_OTHER
|
||||
- COMPLIANCE
|
||||
- CATEGORIES
|
||||
- DEPENDS_ON
|
||||
- RELATED_TO
|
||||
- NOTES
|
||||
|
||||
And then by the provider specific columns:
|
||||
|
||||
#### AWS
|
||||
|
||||
- PROFILE
|
||||
- ACCOUNT_ID
|
||||
- ACCOUNT_NAME
|
||||
- ACCOUNT_EMAIL
|
||||
- ACCOUNT_ARN
|
||||
- ACCOUNT_ORG
|
||||
- ACCOUNT_TAGS
|
||||
- REGION
|
||||
- CHECK_ID
|
||||
- CHECK_TITLE
|
||||
- CHECK_TYPE
|
||||
- STATUS
|
||||
- STATUS_EXTENDED
|
||||
- SERVICE_NAME
|
||||
- SUBSERVICE_NAME
|
||||
- SEVERITY
|
||||
- RESOURCE_ID
|
||||
- RESOURCE_ARN
|
||||
- RESOURCE_TYPE
|
||||
- RESOURCE_DETAILS
|
||||
- RESOURCE_TAGS
|
||||
- DESCRIPTION
|
||||
- COMPLIANCE
|
||||
- RISK
|
||||
- RELATED_URL
|
||||
- REMEDIATION_RECOMMENDATION_TEXT
|
||||
- REMEDIATION_RECOMMENDATION_URL
|
||||
- REMEDIATION_RECOMMENDATION_CODE_NATIVEIAC
|
||||
- REMEDIATION_RECOMMENDATION_CODE_TERRAFORM
|
||||
- REMEDIATION_RECOMMENDATION_CODE_CLI
|
||||
- REMEDIATION_RECOMMENDATION_CODE_OTHER
|
||||
- CATEGORIES
|
||||
- DEPENDS_ON
|
||||
- RELATED_TO
|
||||
- NOTES
|
||||
- ACCOUNT_NAME
|
||||
- ACCOUNT_EMAIL
|
||||
- ACCOUNT_ARN
|
||||
- ACCOUNT_ORG
|
||||
- ACCOUNT_TAGS
|
||||
- REGION
|
||||
- RESOURCE_ID
|
||||
- RESOURCE_ARN
|
||||
|
||||
|
||||
#### AZURE
|
||||
|
||||
- TENANT_DOMAIN
|
||||
- SUBSCRIPTION
|
||||
- RESOURCE_ID
|
||||
- RESOURCE_NAME
|
||||
|
||||
|
||||
#### GCP
|
||||
|
||||
- PROJECT_ID
|
||||
- LOCATION
|
||||
- RESOURCE_ID
|
||||
- RESOURCE_NAME
|
||||
|
||||
|
||||
|
||||
|
||||
> Since Prowler v3 the CSV column delimiter is the semicolon (`;`)
|
||||
### JSON
|
||||
|
||||
@@ -38,8 +38,10 @@ nav:
|
||||
- Logging: tutorials/logging.md
|
||||
- Allowlist: tutorials/allowlist.md
|
||||
- Check Aliases: tutorials/check-aliases.md
|
||||
- Custom Metadata: tutorials/custom-checks-metadata.md
|
||||
- Ignore Unused Services: tutorials/ignore-unused-services.md
|
||||
- Pentesting: tutorials/pentesting.md
|
||||
- Parallel Execution: tutorials/parallel-execution.md
|
||||
- Developer Guide: developer-guide/introduction.md
|
||||
- AWS:
|
||||
- Authentication: tutorials/aws/authentication.md
|
||||
@@ -56,6 +58,7 @@ nav:
|
||||
- Boto3 Configuration: tutorials/aws/boto3-configuration.md
|
||||
- Azure:
|
||||
- Authentication: tutorials/azure/authentication.md
|
||||
- Non default clouds: tutorials/azure/use-non-default-cloud.md
|
||||
- Subscriptions: tutorials/azure/subscriptions.md
|
||||
- Google Cloud:
|
||||
- Authentication: tutorials/gcp/authentication.md
|
||||
|
||||
735
poetry.lock
generated
735
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -26,6 +26,10 @@ from prowler.lib.check.check import (
|
||||
)
|
||||
from prowler.lib.check.checks_loader import load_checks_to_execute
|
||||
from prowler.lib.check.compliance import update_checks_metadata_with_compliance
|
||||
from prowler.lib.check.custom_checks_metadata import (
|
||||
parse_custom_checks_metadata_file,
|
||||
update_checks_metadata,
|
||||
)
|
||||
from prowler.lib.cli.parser import ProwlerArgumentParser
|
||||
from prowler.lib.logger import logger, set_logging_config
|
||||
from prowler.lib.outputs.compliance import display_compliance_table
|
||||
@@ -67,6 +71,7 @@ def prowler():
|
||||
checks_folder = args.checks_folder
|
||||
severities = args.severity
|
||||
compliance_framework = args.compliance
|
||||
custom_checks_metadata_file = args.custom_checks_metadata_file
|
||||
|
||||
if not args.no_banner:
|
||||
print_banner(args)
|
||||
@@ -96,9 +101,19 @@ def prowler():
|
||||
|
||||
bulk_compliance_frameworks = bulk_load_compliance_frameworks(provider)
|
||||
# Complete checks metadata with the compliance framework specification
|
||||
update_checks_metadata_with_compliance(
|
||||
bulk_checks_metadata = update_checks_metadata_with_compliance(
|
||||
bulk_compliance_frameworks, bulk_checks_metadata
|
||||
)
|
||||
# Update checks metadata if the --custom-checks-metadata-file is present
|
||||
custom_checks_metadata = None
|
||||
if custom_checks_metadata_file:
|
||||
custom_checks_metadata = parse_custom_checks_metadata_file(
|
||||
provider, custom_checks_metadata_file
|
||||
)
|
||||
bulk_checks_metadata = update_checks_metadata(
|
||||
bulk_checks_metadata, custom_checks_metadata
|
||||
)
|
||||
|
||||
if args.list_compliance:
|
||||
print_compliance_frameworks(bulk_compliance_frameworks)
|
||||
sys.exit()
|
||||
@@ -174,7 +189,11 @@ def prowler():
|
||||
findings = []
|
||||
if len(checks_to_execute):
|
||||
findings = execute_checks(
|
||||
checks_to_execute, provider, audit_info, audit_output_options
|
||||
checks_to_execute,
|
||||
provider,
|
||||
audit_info,
|
||||
audit_output_options,
|
||||
custom_checks_metadata,
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
@@ -246,7 +265,10 @@ def prowler():
|
||||
for region in security_hub_regions:
|
||||
# Save the regions where AWS Security Hub is enabled
|
||||
if verify_security_hub_integration_enabled_per_region(
|
||||
region, audit_info.audit_session
|
||||
audit_info.audited_partition,
|
||||
region,
|
||||
audit_info.audit_session,
|
||||
audit_info.audited_account,
|
||||
):
|
||||
aws_security_enabled_regions.append(region)
|
||||
|
||||
|
||||
@@ -211,6 +211,31 @@
|
||||
"iam_avoid_root_usage"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "op.acc.4.aws.iam.8",
|
||||
"Description": "Proceso de gestión de derechos de acceso",
|
||||
"Attributes": [
|
||||
{
|
||||
"IdGrupoControl": "op.acc.4",
|
||||
"Marco": "operacional",
|
||||
"Categoria": "control de acceso",
|
||||
"DescripcionControl": "Se restringirá todo acceso a las acciones especificadas para el usuario root de una cuenta.",
|
||||
"Nivel": "alto",
|
||||
"Tipo": "requisito",
|
||||
"Dimensiones": [
|
||||
"confidencialidad",
|
||||
"integridad",
|
||||
"trazabilidad",
|
||||
"autenticidad"
|
||||
],
|
||||
"ModoEjecucion": "automático"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"organizations_account_part_of_organizations",
|
||||
"organizations_scp_check_deny_regions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "op.acc.4.aws.iam.9",
|
||||
"Description": "Proceso de gestión de derechos de acceso",
|
||||
@@ -1121,6 +1146,30 @@
|
||||
"cloudtrail_insights_exist"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "op.exp.8.r1.aws.ct.3",
|
||||
"Description": "Revisión de los registros",
|
||||
"Attributes": [
|
||||
{
|
||||
"IdGrupoControl": "op.exp.8.r1",
|
||||
"Marco": "operacional",
|
||||
"Categoria": "explotación",
|
||||
"DescripcionControl": "Registrar los eventos de lectura y escritura de datos.",
|
||||
"Nivel": "alto",
|
||||
"Tipo": "refuerzo",
|
||||
"Dimensiones": [
|
||||
"trazabilidad"
|
||||
],
|
||||
"ModoEjecucion": "automático"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled",
|
||||
"cloudtrail_s3_dataevents_write_enabled",
|
||||
"cloudtrail_s3_dataevents_read_enabled",
|
||||
"cloudtrail_insights_exist"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "op.exp.8.r1.aws.ct.4",
|
||||
"Description": "Revisión de los registros",
|
||||
@@ -1233,6 +1282,33 @@
|
||||
"iam_role_cross_service_confused_deputy_prevention"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "op.exp.8.r4.aws.ct.1",
|
||||
"Description": "Control de acceso",
|
||||
"Attributes": [
|
||||
{
|
||||
"IdGrupoControl": "op.exp.8.r4",
|
||||
"Marco": "operacional",
|
||||
"Categoria": "explotación",
|
||||
"DescripcionControl": "Asignar correctamente las políticas AWS IAM para el acceso y borrado de los registros y sus copias de seguridad haciendo uso del principio de mínimo privilegio.",
|
||||
"Nivel": "alto",
|
||||
"Tipo": "refuerzo",
|
||||
"Dimensiones": [
|
||||
"trazabilidad"
|
||||
],
|
||||
"ModoEjecucion": "automático"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"iam_policy_allows_privilege_escalation",
|
||||
"iam_customer_attached_policy_no_administrative_privileges",
|
||||
"iam_customer_unattached_policy_no_administrative_privilege",
|
||||
"iam_no_custom_policy_permissive_role_assumption",
|
||||
"iam_policy_attached_only_to_group_or_roles",
|
||||
"iam_role_cross_service_confused_deputy_prevention",
|
||||
"iam_policy_no_full_access_to_cloudtrail"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "op.exp.8.r4.aws.ct.2",
|
||||
"Description": "Control de acceso",
|
||||
@@ -2110,7 +2186,7 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"networkfirewall_in_all_vpc"
|
||||
"fms_policy_compliant"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -2251,6 +2327,31 @@
|
||||
"cloudfront_distributions_https_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "mp.com.4.aws.ws.1",
|
||||
"Description": "Separación de flujos de información en la red",
|
||||
"Attributes": [
|
||||
{
|
||||
"IdGrupoControl": "mp.com.4",
|
||||
"Marco": "medidas de protección",
|
||||
"Categoria": "segregación de redes",
|
||||
"DescripcionControl": "Se deberán abrir solo los puertos necesarios para el uso del servicio AWS WorkSpaces.",
|
||||
"Nivel": "alto",
|
||||
"Tipo": "requisito",
|
||||
"Dimensiones": [
|
||||
"confidencialidad",
|
||||
"integridad",
|
||||
"trazabilidad",
|
||||
"autenticidad",
|
||||
"disponibilidad"
|
||||
],
|
||||
"ModoEjecucion": "automático"
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"workspaces_vpc_2private_1public_subnets_nat"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "mp.com.4.aws.vpc.1",
|
||||
"Description": "Separación de flujos de información en la red",
|
||||
@@ -2323,7 +2424,8 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"vpc_subnet_separate_private_public"
|
||||
"vpc_subnet_separate_private_public",
|
||||
"vpc_different_regions"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -2370,7 +2472,8 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"vpc_subnet_different_az"
|
||||
"vpc_subnet_different_az",
|
||||
"vpc_different_regions"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -11,7 +11,7 @@ from prowler.lib.logger import logger
|
||||
|
||||
timestamp = datetime.today()
|
||||
timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)
|
||||
prowler_version = "3.11.0"
|
||||
prowler_version = "3.12.0"
|
||||
html_logo_url = "https://github.com/prowler-cloud/prowler/"
|
||||
html_logo_img = "https://user-images.githubusercontent.com/3985464/113734260-7ba06900-96fb-11eb-82bc-d4f68a1e2710.png"
|
||||
square_logo_img = "https://user-images.githubusercontent.com/38561120/235905862-9ece5bd7-9aa3-4e48-807a-3a9035eb8bfb.png"
|
||||
@@ -22,6 +22,9 @@ gcp_logo = "https://user-images.githubusercontent.com/38561120/235928332-eb4accd
|
||||
orange_color = "\033[38;5;208m"
|
||||
banner_color = "\033[1;92m"
|
||||
|
||||
# Severities
|
||||
valid_severities = ["critical", "high", "medium", "low", "informational"]
|
||||
|
||||
# Compliance
|
||||
actual_directory = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
@@ -70,7 +73,9 @@ def check_current_version():
|
||||
if latest_version != prowler_version:
|
||||
return f"{prowler_version_string} (latest is {latest_version}, upgrade for the latest features)"
|
||||
else:
|
||||
return f"{prowler_version_string} (it is the latest version, yay!)"
|
||||
return (
|
||||
f"{prowler_version_string} (You are running the latest version, yay!)"
|
||||
)
|
||||
except requests.RequestException:
|
||||
return f"{prowler_version_string}"
|
||||
except Exception:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
aws:
|
||||
|
||||
# AWS Global Configuration
|
||||
# aws.allowlist_non_default_regions --> Set to True to allowlist failed findings in non-default regions for GuardDuty, SecurityHub, DRS and Config
|
||||
# aws.allowlist_non_default_regions --> Set to True to allowlist failed findings in non-default regions for AccessAnalyzer, GuardDuty, SecurityHub, DRS and Config
|
||||
allowlist_non_default_regions: False
|
||||
# If you want to allowlist/mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w allowlist.yaml`:
|
||||
# Allowlist:
|
||||
@@ -69,8 +69,8 @@ aws:
|
||||
# AWS Organizations
|
||||
# organizations_scp_check_deny_regions
|
||||
# organizations_enabled_regions: [
|
||||
# 'eu-central-1',
|
||||
# 'eu-west-1',
|
||||
# "eu-central-1",
|
||||
# "eu-west-1",
|
||||
# "us-east-1"
|
||||
# ]
|
||||
organizations_enabled_regions: []
|
||||
|
||||
15
prowler/config/custom_checks_metadata_example.yaml
Normal file
15
prowler/config/custom_checks_metadata_example.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
CustomChecksMetadata:
|
||||
aws:
|
||||
Checks:
|
||||
s3_bucket_level_public_access_block:
|
||||
Severity: high
|
||||
s3_bucket_no_mfa_delete:
|
||||
Severity: high
|
||||
azure:
|
||||
Checks:
|
||||
storage_infrastructure_encryption_is_enabled:
|
||||
Severity: medium
|
||||
gcp:
|
||||
Checks:
|
||||
compute_instance_public_ip:
|
||||
Severity: critical
|
||||
@@ -16,6 +16,7 @@ from colorama import Fore, Style
|
||||
import prowler
|
||||
from prowler.config.config import orange_color
|
||||
from prowler.lib.check.compliance_models import load_compliance_framework
|
||||
from prowler.lib.check.custom_checks_metadata import update_check_metadata
|
||||
from prowler.lib.check.models import Check, load_check_metadata
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.outputs.outputs import report
|
||||
@@ -106,14 +107,20 @@ def exclude_services_to_run(
|
||||
|
||||
# Load checks from checklist.json
|
||||
def parse_checks_from_file(input_file: str, provider: str) -> set:
|
||||
checks_to_execute = set()
|
||||
with open_file(input_file) as f:
|
||||
json_file = parse_json_file(f)
|
||||
"""parse_checks_from_file returns a set of checks read from the given file"""
|
||||
try:
|
||||
checks_to_execute = set()
|
||||
with open_file(input_file) as f:
|
||||
json_file = parse_json_file(f)
|
||||
|
||||
for check_name in json_file[provider]:
|
||||
checks_to_execute.add(check_name)
|
||||
for check_name in json_file[provider]:
|
||||
checks_to_execute.add(check_name)
|
||||
|
||||
return checks_to_execute
|
||||
return checks_to_execute
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
|
||||
)
|
||||
|
||||
|
||||
# Load checks from custom folder
|
||||
@@ -309,7 +316,7 @@ def print_checks(
|
||||
def parse_checks_from_compliance_framework(
|
||||
compliance_frameworks: list, bulk_compliance_frameworks: dict
|
||||
) -> list:
|
||||
"""Parse checks from compliance frameworks specification"""
|
||||
"""parse_checks_from_compliance_framework returns a set of checks from the given compliance_frameworks"""
|
||||
checks_to_execute = set()
|
||||
try:
|
||||
for framework in compliance_frameworks:
|
||||
@@ -416,6 +423,7 @@ def execute_checks(
|
||||
provider: str,
|
||||
audit_info: Any,
|
||||
audit_output_options: Provider_Output_Options,
|
||||
custom_checks_metadata: Any,
|
||||
) -> list:
|
||||
# List to store all the check's findings
|
||||
all_findings = []
|
||||
@@ -461,6 +469,7 @@ def execute_checks(
|
||||
audit_info,
|
||||
services_executed,
|
||||
checks_executed,
|
||||
custom_checks_metadata,
|
||||
)
|
||||
all_findings.extend(check_findings)
|
||||
|
||||
@@ -506,6 +515,7 @@ def execute_checks(
|
||||
audit_info,
|
||||
services_executed,
|
||||
checks_executed,
|
||||
custom_checks_metadata,
|
||||
)
|
||||
all_findings.extend(check_findings)
|
||||
|
||||
@@ -531,6 +541,7 @@ def execute(
|
||||
audit_info: Any,
|
||||
services_executed: set,
|
||||
checks_executed: set,
|
||||
custom_checks_metadata: Any,
|
||||
):
|
||||
# Import check module
|
||||
check_module_path = (
|
||||
@@ -541,6 +552,10 @@ def execute(
|
||||
check_to_execute = getattr(lib, check_name)
|
||||
c = check_to_execute()
|
||||
|
||||
# Update check metadata to reflect that in the outputs
|
||||
if custom_checks_metadata and custom_checks_metadata["Checks"].get(c.CheckID):
|
||||
c = update_check_metadata(c, custom_checks_metadata["Checks"][c.CheckID])
|
||||
|
||||
# Run check
|
||||
check_findings = run_check(c, audit_output_options)
|
||||
|
||||
@@ -598,22 +613,32 @@ def update_audit_metadata(
|
||||
)
|
||||
|
||||
|
||||
def recover_checks_from_service(service_list: list, provider: str) -> list:
|
||||
checks = set()
|
||||
service_list = [
|
||||
"awslambda" if service == "lambda" else service for service in service_list
|
||||
]
|
||||
for service in service_list:
|
||||
modules = recover_checks_from_provider(provider, service)
|
||||
if not modules:
|
||||
logger.error(f"Service '{service}' does not have checks.")
|
||||
def recover_checks_from_service(service_list: list, provider: str) -> set:
|
||||
"""
|
||||
Recover all checks from the selected provider and service
|
||||
|
||||
else:
|
||||
for check_module in modules:
|
||||
# Recover check name and module name from import path
|
||||
# Format: "providers.{provider}.services.{service}.{check_name}.{check_name}"
|
||||
check_name = check_module[0].split(".")[-1]
|
||||
# If the service is present in the group list passed as parameters
|
||||
# if service_name in group_list: checks_from_arn.add(check_name)
|
||||
checks.add(check_name)
|
||||
return checks
|
||||
Returns a set of checks from the given services
|
||||
"""
|
||||
try:
|
||||
checks = set()
|
||||
service_list = [
|
||||
"awslambda" if service == "lambda" else service for service in service_list
|
||||
]
|
||||
for service in service_list:
|
||||
service_checks = recover_checks_from_provider(provider, service)
|
||||
if not service_checks:
|
||||
logger.error(f"Service '{service}' does not have checks.")
|
||||
|
||||
else:
|
||||
for check in service_checks:
|
||||
# Recover check name and module name from import path
|
||||
# Format: "providers.{provider}.services.{service}.{check_name}.{check_name}"
|
||||
check_name = check[0].split(".")[-1]
|
||||
# If the service is present in the group list passed as parameters
|
||||
# if service_name in group_list: checks_from_arn.add(check_name)
|
||||
checks.add(check_name)
|
||||
return checks
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from colorama import Fore, Style
|
||||
|
||||
from prowler.config.config import valid_severities
|
||||
from prowler.lib.check.check import (
|
||||
parse_checks_from_compliance_framework,
|
||||
parse_checks_from_file,
|
||||
@@ -10,7 +11,6 @@ from prowler.lib.logger import logger
|
||||
|
||||
|
||||
# Generate the list of checks to execute
|
||||
# PENDING Test for this function
|
||||
def load_checks_to_execute(
|
||||
bulk_checks_metadata: dict,
|
||||
bulk_compliance_frameworks: dict,
|
||||
@@ -22,69 +22,93 @@ def load_checks_to_execute(
|
||||
categories: set,
|
||||
provider: str,
|
||||
) -> set:
|
||||
"""Generate the list of checks to execute based on the cloud provider and input arguments specified"""
|
||||
checks_to_execute = set()
|
||||
"""Generate the list of checks to execute based on the cloud provider and the input arguments given"""
|
||||
try:
|
||||
# Local subsets
|
||||
checks_to_execute = set()
|
||||
check_aliases = {}
|
||||
check_severities = {key: [] for key in valid_severities}
|
||||
check_categories = {}
|
||||
|
||||
# Handle if there are checks passed using -c/--checks
|
||||
if check_list:
|
||||
for check_name in check_list:
|
||||
checks_to_execute.add(check_name)
|
||||
# First, loop over the bulk_checks_metadata to extract the needed subsets
|
||||
for check, metadata in bulk_checks_metadata.items():
|
||||
# Aliases
|
||||
for alias in metadata.CheckAliases:
|
||||
check_aliases[alias] = check
|
||||
|
||||
# Handle if there are some severities passed using --severity
|
||||
elif severities:
|
||||
for check in bulk_checks_metadata:
|
||||
# Check check's severity
|
||||
if bulk_checks_metadata[check].Severity in severities:
|
||||
checks_to_execute.add(check)
|
||||
# Severities
|
||||
if metadata.Severity:
|
||||
check_severities[metadata.Severity].append(check)
|
||||
|
||||
# Handle if there are checks passed using -C/--checks-file
|
||||
elif checks_file:
|
||||
try:
|
||||
# Categories
|
||||
for category in metadata.Categories:
|
||||
if category not in check_categories:
|
||||
check_categories[category] = []
|
||||
check_categories[category].append(check)
|
||||
|
||||
# Handle if there are checks passed using -c/--checks
|
||||
if check_list:
|
||||
for check_name in check_list:
|
||||
checks_to_execute.add(check_name)
|
||||
|
||||
# Handle if there are some severities passed using --severity
|
||||
elif severities:
|
||||
for severity in severities:
|
||||
checks_to_execute.update(check_severities[severity])
|
||||
|
||||
if service_list:
|
||||
checks_to_execute = (
|
||||
recover_checks_from_service(service_list, provider)
|
||||
& checks_to_execute
|
||||
)
|
||||
|
||||
# Handle if there are checks passed using -C/--checks-file
|
||||
elif checks_file:
|
||||
checks_to_execute = parse_checks_from_file(checks_file, provider)
|
||||
except Exception as e:
|
||||
logger.error(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}")
|
||||
|
||||
# Handle if there are services passed using -s/--services
|
||||
elif service_list:
|
||||
checks_to_execute = recover_checks_from_service(service_list, provider)
|
||||
# Handle if there are services passed using -s/--services
|
||||
elif service_list:
|
||||
checks_to_execute = recover_checks_from_service(service_list, provider)
|
||||
|
||||
# Handle if there are compliance frameworks passed using --compliance
|
||||
elif compliance_frameworks:
|
||||
try:
|
||||
# Handle if there are compliance frameworks passed using --compliance
|
||||
elif compliance_frameworks:
|
||||
checks_to_execute = parse_checks_from_compliance_framework(
|
||||
compliance_frameworks, bulk_compliance_frameworks
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}")
|
||||
|
||||
# Handle if there are categories passed using --categories
|
||||
elif categories:
|
||||
for cat in categories:
|
||||
for check in bulk_checks_metadata:
|
||||
# Check check's categories
|
||||
if cat in bulk_checks_metadata[check].Categories:
|
||||
checks_to_execute.add(check)
|
||||
# Handle if there are categories passed using --categories
|
||||
elif categories:
|
||||
for category in categories:
|
||||
checks_to_execute.update(check_categories[category])
|
||||
|
||||
# If there are no checks passed as argument
|
||||
else:
|
||||
try:
|
||||
# If there are no checks passed as argument
|
||||
else:
|
||||
# Get all check modules to run with the specific provider
|
||||
checks = recover_checks_from_provider(provider)
|
||||
except Exception as e:
|
||||
logger.error(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}")
|
||||
else:
|
||||
|
||||
for check_info in checks:
|
||||
# Recover check name from import path (last part)
|
||||
# Format: "providers.{provider}.services.{service}.{check_name}.{check_name}"
|
||||
check_name = check_info[0]
|
||||
checks_to_execute.add(check_name)
|
||||
|
||||
# Get Check Aliases mapping
|
||||
check_aliases = {}
|
||||
for check, metadata in bulk_checks_metadata.items():
|
||||
for alias in metadata.CheckAliases:
|
||||
check_aliases[alias] = check
|
||||
# Check Aliases
|
||||
checks_to_execute = update_checks_to_execute_with_aliases(
|
||||
checks_to_execute, check_aliases
|
||||
)
|
||||
|
||||
return checks_to_execute
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
|
||||
)
|
||||
|
||||
|
||||
def update_checks_to_execute_with_aliases(
|
||||
checks_to_execute: set, check_aliases: dict
|
||||
) -> set:
|
||||
"""update_checks_to_execute_with_aliases returns the checks_to_execute updated using the check aliases."""
|
||||
# Verify if any input check is an alias of another check
|
||||
for input_check in checks_to_execute:
|
||||
if (
|
||||
@@ -97,5 +121,4 @@ def load_checks_to_execute(
|
||||
print(
|
||||
f"\nUsing alias {Fore.YELLOW}{input_check}{Style.RESET_ALL} for check {Fore.YELLOW}{check_aliases[input_check]}{Style.RESET_ALL}...\n"
|
||||
)
|
||||
|
||||
return checks_to_execute
|
||||
|
||||
77
prowler/lib/check/custom_checks_metadata.py
Normal file
77
prowler/lib/check/custom_checks_metadata.py
Normal file
@@ -0,0 +1,77 @@
|
||||
import sys
|
||||
|
||||
import yaml
|
||||
from jsonschema import validate
|
||||
|
||||
from prowler.config.config import valid_severities
|
||||
from prowler.lib.logger import logger
|
||||
|
||||
custom_checks_metadata_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Checks": {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
".*": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"Severity": {
|
||||
"type": "string",
|
||||
"enum": valid_severities,
|
||||
}
|
||||
},
|
||||
"required": ["Severity"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
}
|
||||
},
|
||||
"required": ["Checks"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
|
||||
def parse_custom_checks_metadata_file(provider: str, parse_custom_checks_metadata_file):
|
||||
"""parse_custom_checks_metadata_file returns the custom_checks_metadata object if it is valid, otherwise aborts the execution returning the ValidationError."""
|
||||
try:
|
||||
with open(parse_custom_checks_metadata_file) as f:
|
||||
custom_checks_metadata = yaml.safe_load(f)["CustomChecksMetadata"][provider]
|
||||
validate(custom_checks_metadata, schema=custom_checks_metadata_schema)
|
||||
return custom_checks_metadata
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def update_checks_metadata(bulk_checks_metadata, custom_checks_metadata):
|
||||
"""update_checks_metadata returns the bulk_checks_metadata with the check's metadata updated based on the custom_checks_metadata provided."""
|
||||
try:
|
||||
# Update checks metadata from CustomChecksMetadata file
|
||||
for check, custom_metadata in custom_checks_metadata["Checks"].items():
|
||||
check_metadata = bulk_checks_metadata.get(check)
|
||||
if check_metadata:
|
||||
bulk_checks_metadata[check] = update_check_metadata(
|
||||
check_metadata, custom_metadata
|
||||
)
|
||||
return bulk_checks_metadata
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def update_check_metadata(check_metadata, custom_metadata):
|
||||
"""update_check_metadata updates the check_metadata fields present in the custom_metadata and returns the updated version of the check_metadata. If some field is not present or valid the check_metadata is returned with the original fields."""
|
||||
try:
|
||||
if custom_metadata:
|
||||
for attribute in custom_metadata:
|
||||
try:
|
||||
setattr(check_metadata, attribute, custom_metadata[attribute])
|
||||
except ValueError:
|
||||
pass
|
||||
finally:
|
||||
return check_metadata
|
||||
@@ -7,6 +7,7 @@ from prowler.config.config import (
|
||||
check_current_version,
|
||||
default_config_file_path,
|
||||
default_output_directory,
|
||||
valid_severities,
|
||||
)
|
||||
from prowler.providers.common.arguments import (
|
||||
init_providers_parser,
|
||||
@@ -49,6 +50,7 @@ Detailed documentation at https://docs.prowler.cloud
|
||||
self.__init_exclude_checks_parser__()
|
||||
self.__init_list_checks_parser__()
|
||||
self.__init_config_parser__()
|
||||
self.__init_custom_checks_metadata_parser__()
|
||||
|
||||
# Init Providers Arguments
|
||||
init_providers_parser(self)
|
||||
@@ -220,11 +222,11 @@ Detailed documentation at https://docs.prowler.cloud
|
||||
group.add_argument(
|
||||
"-s", "--services", nargs="+", help="List of services to be executed."
|
||||
)
|
||||
group.add_argument(
|
||||
common_checks_parser.add_argument(
|
||||
"--severity",
|
||||
nargs="+",
|
||||
help="List of severities to be executed [informational, low, medium, high, critical]",
|
||||
choices=["informational", "low", "medium", "high", "critical"],
|
||||
help=f"List of severities to be executed {valid_severities}",
|
||||
choices=valid_severities,
|
||||
)
|
||||
group.add_argument(
|
||||
"--compliance",
|
||||
@@ -286,3 +288,15 @@ Detailed documentation at https://docs.prowler.cloud
|
||||
default=default_config_file_path,
|
||||
help="Set configuration file path",
|
||||
)
|
||||
|
||||
def __init_custom_checks_metadata_parser__(self):
|
||||
# CustomChecksMetadata
|
||||
custom_checks_metadata_subparser = (
|
||||
self.common_providers_parser.add_argument_group("Custom Checks Metadata")
|
||||
)
|
||||
custom_checks_metadata_subparser.add_argument(
|
||||
"--custom-checks-metadata-file",
|
||||
nargs="?",
|
||||
default=None,
|
||||
help="Path for the custom checks metadata YAML file. See example prowler/config/custom_checks_metadata_example.yaml for reference and format. See more in https://docs.prowler.cloud/en/latest/tutorials/custom-checks-metadata/",
|
||||
)
|
||||
|
||||
@@ -401,7 +401,8 @@ def display_compliance_table(
|
||||
"Bajo": 0,
|
||||
}
|
||||
if finding.status == "FAIL":
|
||||
fail_count += 1
|
||||
if attribute.Tipo != "recomendacion":
|
||||
fail_count += 1
|
||||
marcos[marco_categoria][
|
||||
"Estado"
|
||||
] = f"{Fore.RED}NO CUMPLE{Style.RESET_ALL}"
|
||||
|
||||
@@ -12,8 +12,6 @@ from prowler.config.config import (
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.outputs.html import add_html_header
|
||||
from prowler.lib.outputs.models import (
|
||||
Aws_Check_Output_CSV,
|
||||
Azure_Check_Output_CSV,
|
||||
Check_Output_CSV_AWS_CIS,
|
||||
Check_Output_CSV_AWS_ISO27001_2013,
|
||||
Check_Output_CSV_AWS_Well_Architected,
|
||||
@@ -21,19 +19,18 @@ from prowler.lib.outputs.models import (
|
||||
Check_Output_CSV_GCP_CIS,
|
||||
Check_Output_CSV_Generic_Compliance,
|
||||
Check_Output_MITRE_ATTACK,
|
||||
Gcp_Check_Output_CSV,
|
||||
generate_csv_fields,
|
||||
)
|
||||
from prowler.lib.utils.utils import file_exists, open_file
|
||||
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
|
||||
from prowler.providers.azure.lib.audit_info.models import Azure_Audit_Info
|
||||
from prowler.providers.common.outputs import get_provider_output_model
|
||||
from prowler.providers.gcp.lib.audit_info.models import GCP_Audit_Info
|
||||
|
||||
|
||||
def initialize_file_descriptor(
|
||||
filename: str,
|
||||
output_mode: str,
|
||||
audit_info: AWS_Audit_Info,
|
||||
audit_info: Any,
|
||||
format: Any = None,
|
||||
) -> TextIOWrapper:
|
||||
"""Open/Create the output file. If needed include headers or the required format"""
|
||||
@@ -75,27 +72,15 @@ def fill_file_descriptors(output_modes, output_directory, output_filename, audit
|
||||
for output_mode in output_modes:
|
||||
if output_mode == "csv":
|
||||
filename = f"{output_directory}/{output_filename}{csv_file_suffix}"
|
||||
if isinstance(audit_info, AWS_Audit_Info):
|
||||
file_descriptor = initialize_file_descriptor(
|
||||
filename,
|
||||
output_mode,
|
||||
audit_info,
|
||||
Aws_Check_Output_CSV,
|
||||
)
|
||||
if isinstance(audit_info, Azure_Audit_Info):
|
||||
file_descriptor = initialize_file_descriptor(
|
||||
filename,
|
||||
output_mode,
|
||||
audit_info,
|
||||
Azure_Check_Output_CSV,
|
||||
)
|
||||
if isinstance(audit_info, GCP_Audit_Info):
|
||||
file_descriptor = initialize_file_descriptor(
|
||||
filename,
|
||||
output_mode,
|
||||
audit_info,
|
||||
Gcp_Check_Output_CSV,
|
||||
)
|
||||
output_model = get_provider_output_model(
|
||||
audit_info.__class__.__name__
|
||||
)
|
||||
file_descriptor = initialize_file_descriptor(
|
||||
filename,
|
||||
output_mode,
|
||||
audit_info,
|
||||
output_model,
|
||||
)
|
||||
file_descriptors.update({output_mode: file_descriptor})
|
||||
|
||||
elif output_mode == "json":
|
||||
|
||||
@@ -338,8 +338,9 @@ def add_html_footer(output_filename, output_directory):
|
||||
def get_aws_html_assessment_summary(audit_info):
|
||||
try:
|
||||
if isinstance(audit_info, AWS_Audit_Info):
|
||||
if not audit_info.profile:
|
||||
audit_info.profile = "ENV"
|
||||
profile = (
|
||||
audit_info.profile if audit_info.profile is not None else "default"
|
||||
)
|
||||
if isinstance(audit_info.audited_regions, list):
|
||||
audited_regions = " ".join(audit_info.audited_regions)
|
||||
elif not audit_info.audited_regions:
|
||||
@@ -361,7 +362,7 @@ def get_aws_html_assessment_summary(audit_info):
|
||||
</li>
|
||||
<li class="list-group-item">
|
||||
<b>AWS-CLI Profile:</b> """
|
||||
+ audit_info.profile
|
||||
+ profile
|
||||
+ """
|
||||
</li>
|
||||
<li class="list-group-item">
|
||||
@@ -406,7 +407,7 @@ def get_azure_html_assessment_summary(audit_info):
|
||||
if isinstance(audit_info, Azure_Audit_Info):
|
||||
printed_subscriptions = []
|
||||
for key, value in audit_info.identity.subscriptions.items():
|
||||
intermediate = key + " : " + value
|
||||
intermediate = f"{key} : {value}"
|
||||
printed_subscriptions.append(intermediate)
|
||||
|
||||
# check if identity is str(coming from SP) or dict(coming from browser or)
|
||||
|
||||
@@ -31,6 +31,7 @@ from prowler.lib.outputs.models import (
|
||||
unroll_dict_to_list,
|
||||
)
|
||||
from prowler.lib.utils.utils import hash_sha512, open_file, outputs_unix_timestamp
|
||||
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
|
||||
|
||||
|
||||
def fill_json_asff(finding_output, audit_info, finding, output_options):
|
||||
@@ -155,6 +156,11 @@ def fill_json_ocsf(audit_info, finding, output_options) -> Check_Output_JSON_OCS
|
||||
aws_org_uid = ""
|
||||
account = None
|
||||
org = None
|
||||
profile = ""
|
||||
if isinstance(audit_info, AWS_Audit_Info):
|
||||
profile = (
|
||||
audit_info.profile if audit_info.profile is not None else "default"
|
||||
)
|
||||
if (
|
||||
hasattr(audit_info, "organizations_metadata")
|
||||
and audit_info.organizations_metadata
|
||||
@@ -249,9 +255,7 @@ def fill_json_ocsf(audit_info, finding, output_options) -> Check_Output_JSON_OCS
|
||||
original_time=outputs_unix_timestamp(
|
||||
output_options.unix_timestamp, timestamp
|
||||
),
|
||||
profiles=[audit_info.profile]
|
||||
if hasattr(audit_info, "organizations_metadata")
|
||||
else [],
|
||||
profiles=[profile],
|
||||
)
|
||||
compliance = Compliance_OCSF(
|
||||
status=generate_json_ocsf_status(finding.status),
|
||||
|
||||
@@ -13,7 +13,7 @@ def send_slack_message(token, channel, stats, provider, audit_info):
|
||||
response = client.chat_postMessage(
|
||||
username="Prowler",
|
||||
icon_url=square_logo_img,
|
||||
channel="#" + channel,
|
||||
channel=f"#{channel}",
|
||||
blocks=create_message_blocks(identity, logo, stats),
|
||||
)
|
||||
return response
|
||||
@@ -35,7 +35,7 @@ def create_message_identity(provider, audit_info):
|
||||
elif provider == "azure":
|
||||
printed_subscriptions = []
|
||||
for key, value in audit_info.identity.subscriptions.items():
|
||||
intermediate = "- *" + key + ": " + value + "*\n"
|
||||
intermediate = f"- *{key}: {value}*\n"
|
||||
printed_subscriptions.append(intermediate)
|
||||
identity = f"Azure Subscriptions:\n{''.join(printed_subscriptions)}"
|
||||
logo = azure_logo
|
||||
|
||||
@@ -10,7 +10,10 @@ from prowler.config.config import aws_services_json_file
|
||||
from prowler.lib.check.check import list_modules, recover_checks_from_service
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.utils.utils import open_file, parse_json_file
|
||||
from prowler.providers.aws.config import AWS_STS_GLOBAL_ENDPOINT_REGION
|
||||
from prowler.providers.aws.config import (
|
||||
AWS_STS_GLOBAL_ENDPOINT_REGION,
|
||||
ROLE_SESSION_NAME,
|
||||
)
|
||||
from prowler.providers.aws.lib.audit_info.models import AWS_Assume_Role, AWS_Audit_Info
|
||||
from prowler.providers.aws.lib.credentials.credentials import create_sts_session
|
||||
|
||||
@@ -113,9 +116,15 @@ def assume_role(
|
||||
sts_endpoint_region: str = None,
|
||||
) -> dict:
|
||||
try:
|
||||
role_session_name = (
|
||||
assumed_role_info.role_session_name
|
||||
if assumed_role_info.role_session_name
|
||||
else ROLE_SESSION_NAME
|
||||
)
|
||||
|
||||
assume_role_arguments = {
|
||||
"RoleArn": assumed_role_info.role_arn,
|
||||
"RoleSessionName": "ProwlerAsessmentSession",
|
||||
"RoleSessionName": role_session_name,
|
||||
"DurationSeconds": assumed_role_info.session_duration,
|
||||
}
|
||||
|
||||
@@ -152,23 +161,31 @@ def input_role_mfa_token_and_code() -> tuple[str]:
|
||||
|
||||
|
||||
def generate_regional_clients(
|
||||
service: str, audit_info: AWS_Audit_Info, global_service: bool = False
|
||||
service: str,
|
||||
audit_info: AWS_Audit_Info,
|
||||
) -> dict:
|
||||
"""generate_regional_clients returns a dict with the following format for the given service:
|
||||
|
||||
Example:
|
||||
{"eu-west-1": boto3_service_client}
|
||||
"""
|
||||
try:
|
||||
regional_clients = {}
|
||||
service_regions = get_available_aws_service_regions(service, audit_info)
|
||||
# Check if it is global service to gather only one region
|
||||
if global_service:
|
||||
if service_regions:
|
||||
if audit_info.profile_region in service_regions:
|
||||
service_regions = [audit_info.profile_region]
|
||||
service_regions = service_regions[:1]
|
||||
for region in service_regions:
|
||||
|
||||
# Get the regions enabled for the account and get the intersection with the service available regions
|
||||
if audit_info.enabled_regions:
|
||||
enabled_regions = service_regions.intersection(audit_info.enabled_regions)
|
||||
else:
|
||||
enabled_regions = service_regions
|
||||
|
||||
for region in enabled_regions:
|
||||
regional_client = audit_info.audit_session.client(
|
||||
service, region_name=region, config=audit_info.session_config
|
||||
)
|
||||
regional_client.region = region
|
||||
regional_clients[region] = regional_client
|
||||
|
||||
return regional_clients
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
@@ -176,6 +193,26 @@ def generate_regional_clients(
|
||||
)
|
||||
|
||||
|
||||
def get_aws_enabled_regions(audit_info: AWS_Audit_Info) -> set:
|
||||
"""get_aws_enabled_regions returns a set of enabled AWS regions"""
|
||||
|
||||
# EC2 Client to check enabled regions
|
||||
service = "ec2"
|
||||
default_region = get_default_region(service, audit_info)
|
||||
ec2_client = audit_info.audit_session.client(service, region_name=default_region)
|
||||
|
||||
enabled_regions = set()
|
||||
try:
|
||||
# With AllRegions=False we only get the enabled regions for the account
|
||||
for region in ec2_client.describe_regions(AllRegions=False).get("Regions", []):
|
||||
enabled_regions.add(region.get("RegionName"))
|
||||
except Exception as error:
|
||||
logger.warning(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
return enabled_regions
|
||||
|
||||
|
||||
def get_aws_available_regions():
|
||||
try:
|
||||
actual_directory = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
|
||||
@@ -216,6 +253,8 @@ def get_checks_from_input_arn(audit_resources: list, provider: str) -> set:
|
||||
service = "efs"
|
||||
elif service == "logs":
|
||||
service = "cloudwatch"
|
||||
elif service == "cognito":
|
||||
service = "cognito-idp"
|
||||
# Check if Prowler has checks in service
|
||||
try:
|
||||
list_modules(provider, service)
|
||||
@@ -267,17 +306,18 @@ def get_regions_from_audit_resources(audit_resources: list) -> set:
|
||||
return audited_regions
|
||||
|
||||
|
||||
def get_available_aws_service_regions(service: str, audit_info: AWS_Audit_Info) -> list:
|
||||
def get_available_aws_service_regions(service: str, audit_info: AWS_Audit_Info) -> set:
|
||||
# Get json locally
|
||||
actual_directory = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
|
||||
with open_file(f"{actual_directory}/{aws_services_json_file}") as f:
|
||||
data = parse_json_file(f)
|
||||
# Check if it is a subservice
|
||||
json_regions = data["services"][service]["regions"][audit_info.audited_partition]
|
||||
if audit_info.audited_regions: # Check for input aws audit_info.audited_regions
|
||||
regions = list(
|
||||
set(json_regions).intersection(audit_info.audited_regions)
|
||||
) # Get common regions between input and json
|
||||
json_regions = set(
|
||||
data["services"][service]["regions"][audit_info.audited_partition]
|
||||
)
|
||||
# Check for input aws audit_info.audited_regions
|
||||
if audit_info.audited_regions:
|
||||
# Get common regions between input and json
|
||||
regions = json_regions.intersection(audit_info.audited_regions)
|
||||
else: # Get all regions from json of the service and partition
|
||||
regions = json_regions
|
||||
return regions
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,3 @@
|
||||
AWS_STS_GLOBAL_ENDPOINT_REGION = "us-east-1"
|
||||
BOTO3_USER_AGENT_EXTRA = "APN_1826889"
|
||||
ROLE_SESSION_NAME = "ProwlerAssessmentSession"
|
||||
|
||||
@@ -135,32 +135,31 @@ def allowlist_findings(
|
||||
|
||||
|
||||
def is_allowlisted(
|
||||
allowlist: dict, audited_account: str, check: str, region: str, resource: str, tags
|
||||
allowlist: dict,
|
||||
audited_account: str,
|
||||
check: str,
|
||||
finding_region: str,
|
||||
finding_resource: str,
|
||||
finding_tags,
|
||||
):
|
||||
try:
|
||||
allowlisted_checks = {}
|
||||
# By default is not allowlisted
|
||||
is_finding_allowlisted = False
|
||||
# First set account key from allowlist dict
|
||||
if audited_account in allowlist["Accounts"]:
|
||||
allowlisted_checks = allowlist["Accounts"][audited_account]["Checks"]
|
||||
# If there is a *, it affects to all accounts
|
||||
# This cannot be elif since in the case of * and single accounts we
|
||||
# want to merge allowlisted checks from * to the other accounts check list
|
||||
if "*" in allowlist["Accounts"]:
|
||||
checks_multi_account = allowlist["Accounts"]["*"]["Checks"]
|
||||
allowlisted_checks.update(checks_multi_account)
|
||||
# Test if it is allowlisted
|
||||
if is_allowlisted_in_check(
|
||||
allowlisted_checks,
|
||||
audited_account,
|
||||
audited_account,
|
||||
check,
|
||||
region,
|
||||
resource,
|
||||
tags,
|
||||
):
|
||||
is_finding_allowlisted = True
|
||||
|
||||
# We always check all the accounts present in the allowlist
|
||||
# if one allowlists the finding we set the finding as allowlisted
|
||||
for account in allowlist["Accounts"]:
|
||||
if account == audited_account or account == "*":
|
||||
if is_allowlisted_in_check(
|
||||
allowlist["Accounts"][account]["Checks"],
|
||||
audited_account,
|
||||
check,
|
||||
finding_region,
|
||||
finding_resource,
|
||||
finding_tags,
|
||||
):
|
||||
is_finding_allowlisted = True
|
||||
break
|
||||
|
||||
return is_finding_allowlisted
|
||||
except Exception as error:
|
||||
@@ -171,23 +170,29 @@ def is_allowlisted(
|
||||
|
||||
|
||||
def is_allowlisted_in_check(
|
||||
allowlisted_checks, audited_account, account, check, region, resource, tags
|
||||
allowlisted_checks,
|
||||
audited_account,
|
||||
check,
|
||||
finding_region,
|
||||
finding_resource,
|
||||
finding_tags,
|
||||
):
|
||||
try:
|
||||
# Default value is not allowlisted
|
||||
is_check_allowlisted = False
|
||||
|
||||
for allowlisted_check, allowlisted_check_info in allowlisted_checks.items():
|
||||
# map lambda to awslambda
|
||||
allowlisted_check = re.sub("^lambda", "awslambda", allowlisted_check)
|
||||
# extract the exceptions
|
||||
|
||||
# Check if the finding is excepted
|
||||
exceptions = allowlisted_check_info.get("Exceptions")
|
||||
# Check if there are exceptions
|
||||
if is_excepted(
|
||||
exceptions,
|
||||
audited_account,
|
||||
region,
|
||||
resource,
|
||||
tags,
|
||||
finding_region,
|
||||
finding_resource,
|
||||
finding_tags,
|
||||
):
|
||||
# Break loop and return default value since is excepted
|
||||
break
|
||||
@@ -201,13 +206,27 @@ def is_allowlisted_in_check(
|
||||
or check == allowlisted_check
|
||||
or re.search(allowlisted_check, check)
|
||||
):
|
||||
if is_allowlisted_in_region(
|
||||
allowlisted_regions,
|
||||
allowlisted_resources,
|
||||
allowlisted_tags,
|
||||
region,
|
||||
resource,
|
||||
tags,
|
||||
allowlisted_in_check = True
|
||||
allowlisted_in_region = is_allowlisted_in_region(
|
||||
allowlisted_regions, finding_region
|
||||
)
|
||||
allowlisted_in_resource = is_allowlisted_in_resource(
|
||||
allowlisted_resources, finding_resource
|
||||
)
|
||||
allowlisted_in_tags = is_allowlisted_in_tags(
|
||||
allowlisted_tags, finding_tags
|
||||
)
|
||||
|
||||
# For a finding to be allowlisted requires the following set to True:
|
||||
# - allowlisted_in_check -> True
|
||||
# - allowlisted_in_region -> True
|
||||
# - allowlisted_in_tags -> True or allowlisted_in_resource -> True
|
||||
# - excepted -> False
|
||||
|
||||
if (
|
||||
allowlisted_in_check
|
||||
and allowlisted_in_region
|
||||
and (allowlisted_in_tags or allowlisted_in_resource)
|
||||
):
|
||||
is_check_allowlisted = True
|
||||
|
||||
@@ -220,25 +239,11 @@ def is_allowlisted_in_check(
|
||||
|
||||
|
||||
def is_allowlisted_in_region(
|
||||
allowlist_regions, allowlist_resources, allowlisted_tags, region, resource, tags
|
||||
allowlisted_regions,
|
||||
finding_region,
|
||||
):
|
||||
try:
|
||||
# By default is not allowlisted
|
||||
is_region_allowlisted = False
|
||||
# If there is a *, it affects to all regions
|
||||
if "*" in allowlist_regions or region in allowlist_regions:
|
||||
for elem in allowlist_resources:
|
||||
if is_allowlisted_in_tags(
|
||||
allowlisted_tags,
|
||||
elem,
|
||||
resource,
|
||||
tags,
|
||||
):
|
||||
is_region_allowlisted = True
|
||||
# if we find the element there is no point in continuing with the loop
|
||||
break
|
||||
|
||||
return is_region_allowlisted
|
||||
return __is_item_matched__(allowlisted_regions, finding_region)
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
|
||||
@@ -246,25 +251,9 @@ def is_allowlisted_in_region(
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def is_allowlisted_in_tags(allowlisted_tags, elem, resource, tags):
|
||||
def is_allowlisted_in_tags(allowlisted_tags, finding_tags):
|
||||
try:
|
||||
# By default is not allowlisted
|
||||
is_tag_allowlisted = False
|
||||
# Check if it is an *
|
||||
if elem == "*":
|
||||
elem = ".*"
|
||||
# Check if there are allowlisted tags
|
||||
if allowlisted_tags:
|
||||
for allowlisted_tag in allowlisted_tags:
|
||||
if re.search(allowlisted_tag, tags):
|
||||
is_tag_allowlisted = True
|
||||
break
|
||||
|
||||
else:
|
||||
if re.search(elem, resource):
|
||||
is_tag_allowlisted = True
|
||||
|
||||
return is_tag_allowlisted
|
||||
return __is_item_matched__(allowlisted_tags, finding_tags)
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
|
||||
@@ -272,7 +261,25 @@ def is_allowlisted_in_tags(allowlisted_tags, elem, resource, tags):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def is_excepted(exceptions, audited_account, region, resource, tags):
|
||||
def is_allowlisted_in_resource(allowlisted_resources, finding_resource):
|
||||
try:
|
||||
return __is_item_matched__(allowlisted_resources, finding_resource)
|
||||
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def is_excepted(
|
||||
exceptions,
|
||||
audited_account,
|
||||
finding_region,
|
||||
finding_resource,
|
||||
finding_tags,
|
||||
):
|
||||
"""is_excepted returns True if the account, region, resource and tags are excepted"""
|
||||
try:
|
||||
excepted = False
|
||||
is_account_excepted = False
|
||||
@@ -281,39 +288,50 @@ def is_excepted(exceptions, audited_account, region, resource, tags):
|
||||
is_tag_excepted = False
|
||||
if exceptions:
|
||||
excepted_accounts = exceptions.get("Accounts", [])
|
||||
is_account_excepted = __is_item_matched__(
|
||||
excepted_accounts, audited_account
|
||||
)
|
||||
|
||||
excepted_regions = exceptions.get("Regions", [])
|
||||
is_region_excepted = __is_item_matched__(excepted_regions, finding_region)
|
||||
|
||||
excepted_resources = exceptions.get("Resources", [])
|
||||
is_resource_excepted = __is_item_matched__(
|
||||
excepted_resources, finding_resource
|
||||
)
|
||||
|
||||
excepted_tags = exceptions.get("Tags", [])
|
||||
if exceptions:
|
||||
if audited_account in excepted_accounts:
|
||||
is_account_excepted = True
|
||||
if region in excepted_regions:
|
||||
is_region_excepted = True
|
||||
for excepted_resource in excepted_resources:
|
||||
if re.search(excepted_resource, resource):
|
||||
is_resource_excepted = True
|
||||
for tag in excepted_tags:
|
||||
if tag in tags:
|
||||
is_tag_excepted = True
|
||||
if (
|
||||
(
|
||||
(excepted_accounts and is_account_excepted)
|
||||
or not excepted_accounts
|
||||
)
|
||||
and (
|
||||
(excepted_regions and is_region_excepted)
|
||||
or not excepted_regions
|
||||
)
|
||||
and (
|
||||
(excepted_resources and is_resource_excepted)
|
||||
or not excepted_resources
|
||||
)
|
||||
and ((excepted_tags and is_tag_excepted) or not excepted_tags)
|
||||
):
|
||||
excepted = True
|
||||
is_tag_excepted = __is_item_matched__(excepted_tags, finding_tags)
|
||||
|
||||
if (
|
||||
(is_account_excepted or not excepted_accounts)
|
||||
and (is_region_excepted or not excepted_regions)
|
||||
and (is_resource_excepted or not excepted_resources)
|
||||
and (is_tag_excepted or not excepted_tags)
|
||||
):
|
||||
excepted = True
|
||||
return excepted
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def __is_item_matched__(matched_items, finding_items):
|
||||
"""__is_item_matched__ return True if any of the matched_items are present in the finding_items, otherwise returns False."""
|
||||
try:
|
||||
is_item_matched = False
|
||||
if matched_items and (finding_items or finding_items == ""):
|
||||
for item in matched_items:
|
||||
if item == "*":
|
||||
item = ".*"
|
||||
if re.search(item, finding_items):
|
||||
is_item_matched = True
|
||||
break
|
||||
return is_item_matched
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__} -- {error}[{error.__traceback__.tb_lineno}]"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from argparse import ArgumentTypeError, Namespace
|
||||
from re import fullmatch, search
|
||||
|
||||
from prowler.providers.aws.aws_provider import get_aws_available_regions
|
||||
from prowler.providers.aws.config import ROLE_SESSION_NAME
|
||||
from prowler.providers.aws.lib.arn.arn import arn_type
|
||||
|
||||
|
||||
@@ -26,6 +28,13 @@ def init_parser(self):
|
||||
help="ARN of the role to be assumed",
|
||||
# Pending ARN validation
|
||||
)
|
||||
aws_auth_subparser.add_argument(
|
||||
"--role-session-name",
|
||||
nargs="?",
|
||||
default=ROLE_SESSION_NAME,
|
||||
help="An identifier for the assumed role session. Defaults to ProwlerAssessmentSession",
|
||||
type=validate_role_session_name,
|
||||
)
|
||||
aws_auth_subparser.add_argument(
|
||||
"--sts-endpoint-region",
|
||||
nargs="?",
|
||||
@@ -84,6 +93,11 @@ def init_parser(self):
|
||||
action="store_true",
|
||||
help="Skip updating previous findings of Prowler in Security Hub",
|
||||
)
|
||||
aws_security_hub_subparser.add_argument(
|
||||
"--send-sh-only-fails",
|
||||
action="store_true",
|
||||
help="Send only Prowler failed findings to SecurityHub",
|
||||
)
|
||||
# AWS Quick Inventory
|
||||
aws_quick_inventory_subparser = aws_parser.add_argument_group("Quick Inventory")
|
||||
aws_quick_inventory_subparser.add_argument(
|
||||
@@ -99,6 +113,7 @@ def init_parser(self):
|
||||
"-B",
|
||||
"--output-bucket",
|
||||
nargs="?",
|
||||
type=validate_bucket,
|
||||
default=None,
|
||||
help="Custom output bucket, requires -M <mode> and it can work also with -o flag.",
|
||||
)
|
||||
@@ -106,6 +121,7 @@ def init_parser(self):
|
||||
"-D",
|
||||
"--output-bucket-no-assume",
|
||||
nargs="?",
|
||||
type=validate_bucket,
|
||||
default=None,
|
||||
help="Same as -B but do not use the assumed role credentials to put objects to the bucket, instead uses the initial credentials.",
|
||||
)
|
||||
@@ -126,6 +142,7 @@ def init_parser(self):
|
||||
default=None,
|
||||
help="Path for allowlist yaml file. See example prowler/config/aws_allowlist.yaml for reference and format. It also accepts AWS DynamoDB Table or Lambda ARNs or S3 URIs, see more in https://docs.prowler.cloud/en/latest/tutorials/allowlist/",
|
||||
)
|
||||
|
||||
# Based Scans
|
||||
aws_based_scans_subparser = aws_parser.add_argument_group("AWS Based Scans")
|
||||
aws_based_scans_parser = aws_based_scans_subparser.add_mutually_exclusive_group()
|
||||
@@ -178,9 +195,37 @@ def validate_arguments(arguments: Namespace) -> tuple[bool, str]:
|
||||
|
||||
# Handle if session_duration is not the default value or external_id is set
|
||||
if (
|
||||
arguments.session_duration and arguments.session_duration != 3600
|
||||
) or arguments.external_id:
|
||||
(arguments.session_duration and arguments.session_duration != 3600)
|
||||
or arguments.external_id
|
||||
or arguments.role_session_name != ROLE_SESSION_NAME
|
||||
):
|
||||
if not arguments.role:
|
||||
return (False, "To use -I/-T options -R option is needed")
|
||||
return (
|
||||
False,
|
||||
"To use -I/--external-id, -T/--session-duration or --role-session-name options -R/--role option is needed",
|
||||
)
|
||||
|
||||
return (True, "")
|
||||
|
||||
|
||||
def validate_bucket(bucket_name):
|
||||
"""validate_bucket validates that the input bucket_name is valid"""
|
||||
if search("(?!(^xn--|.+-s3alias$))^[a-z0-9][a-z0-9-]{1,61}[a-z0-9]$", bucket_name):
|
||||
return bucket_name
|
||||
else:
|
||||
raise ArgumentTypeError(
|
||||
"Bucket name must be valid (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html)"
|
||||
)
|
||||
|
||||
|
||||
def validate_role_session_name(session_name):
|
||||
"""
|
||||
validates that the role session name is valid
|
||||
Documentation: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html
|
||||
"""
|
||||
if fullmatch("[\w+=,.@-]{2,64}", session_name):
|
||||
return session_name
|
||||
else:
|
||||
raise ArgumentTypeError(
|
||||
"Role Session Name must be 2-64 characters long and consist only of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@-"
|
||||
)
|
||||
|
||||
@@ -30,6 +30,7 @@ current_audit_info = AWS_Audit_Info(
|
||||
session_duration=None,
|
||||
external_id=None,
|
||||
mfa_enabled=None,
|
||||
role_session_name=None,
|
||||
),
|
||||
mfa_enabled=None,
|
||||
audit_resources=None,
|
||||
@@ -38,4 +39,5 @@ current_audit_info = AWS_Audit_Info(
|
||||
audit_metadata=None,
|
||||
audit_config=None,
|
||||
ignore_unused_services=False,
|
||||
enabled_regions=set(),
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
|
||||
@@ -20,6 +20,7 @@ class AWS_Assume_Role:
|
||||
session_duration: int
|
||||
external_id: str
|
||||
mfa_enabled: bool
|
||||
role_session_name: str
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -53,3 +54,4 @@ class AWS_Audit_Info:
|
||||
audit_metadata: Optional[Any] = None
|
||||
audit_config: Optional[dict] = None
|
||||
ignore_unused_services: bool = False
|
||||
enabled_regions: set = field(default_factory=set)
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
def is_account_only_allowed_in_condition(
|
||||
condition_statement: dict, source_account: str
|
||||
def is_condition_block_restrictive(
|
||||
condition_statement: dict, source_account: str, is_cross_account_allowed=False
|
||||
):
|
||||
"""
|
||||
is_account_only_allowed_in_condition parses the IAM Condition policy block and returns True if the source_account passed as argument is within, False if not.
|
||||
is_condition_block_restrictive parses the IAM Condition policy block and, by default, returns True if the source_account passed as argument is within, False if not.
|
||||
|
||||
If argument is_cross_account_allowed is True it tests if the Condition block includes any of the operators allowlisted returning True if does, False if not.
|
||||
|
||||
|
||||
@param condition_statement: dict with an IAM Condition block, e.g.:
|
||||
{
|
||||
@@ -54,23 +57,32 @@ def is_account_only_allowed_in_condition(
|
||||
condition_statement[condition_operator][value],
|
||||
list,
|
||||
):
|
||||
# if there is an arn/account without the source account -> we do not consider it safe
|
||||
# here by default we assume is true and look for false entries
|
||||
is_condition_valid = True
|
||||
for item in condition_statement[condition_operator][value]:
|
||||
if source_account not in item:
|
||||
is_condition_valid = False
|
||||
break
|
||||
is_condition_key_restrictive = True
|
||||
# if cross account is not allowed check for each condition block looking for accounts
|
||||
# different than default
|
||||
if not is_cross_account_allowed:
|
||||
# if there is an arn/account without the source account -> we do not consider it safe
|
||||
# here by default we assume is true and look for false entries
|
||||
for item in condition_statement[condition_operator][value]:
|
||||
if source_account not in item:
|
||||
is_condition_key_restrictive = False
|
||||
break
|
||||
|
||||
if is_condition_key_restrictive:
|
||||
is_condition_valid = True
|
||||
|
||||
# value is a string
|
||||
elif isinstance(
|
||||
condition_statement[condition_operator][value],
|
||||
str,
|
||||
):
|
||||
if (
|
||||
source_account
|
||||
in condition_statement[condition_operator][value]
|
||||
):
|
||||
if is_cross_account_allowed:
|
||||
is_condition_valid = True
|
||||
else:
|
||||
if (
|
||||
source_account
|
||||
in condition_statement[condition_operator][value]
|
||||
):
|
||||
is_condition_valid = True
|
||||
|
||||
return is_condition_valid
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import sys
|
||||
|
||||
from prowler.config.config import (
|
||||
csv_file_suffix,
|
||||
html_file_suffix,
|
||||
@@ -41,10 +39,9 @@ def send_to_s3_bucket(
|
||||
s3_client.upload_file(file_name, output_bucket_name, object_name)
|
||||
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_s3_object_path(output_directory: str) -> str:
|
||||
|
||||
@@ -14,9 +14,11 @@ def prepare_security_hub_findings(
|
||||
findings: [], audit_info: AWS_Audit_Info, output_options, enabled_regions: []
|
||||
) -> dict:
|
||||
security_hub_findings_per_region = {}
|
||||
# Create a key per region
|
||||
for region in audit_info.audited_regions:
|
||||
|
||||
# Create a key per audited region
|
||||
for region in enabled_regions:
|
||||
security_hub_findings_per_region[region] = []
|
||||
|
||||
for finding in findings:
|
||||
# We don't send the INFO findings to AWS Security Hub
|
||||
if finding.status == "INFO":
|
||||
@@ -27,7 +29,9 @@ def prepare_security_hub_findings(
|
||||
continue
|
||||
|
||||
# Handle quiet mode
|
||||
if output_options.is_quiet and finding.status != "FAIL":
|
||||
if (
|
||||
output_options.is_quiet or output_options.send_sh_only_fails
|
||||
) and finding.status != "FAIL":
|
||||
continue
|
||||
|
||||
# Get the finding region
|
||||
@@ -47,8 +51,10 @@ def prepare_security_hub_findings(
|
||||
|
||||
|
||||
def verify_security_hub_integration_enabled_per_region(
|
||||
partition: str,
|
||||
region: str,
|
||||
session: session.Session,
|
||||
aws_account_number: str,
|
||||
) -> bool:
|
||||
f"""verify_security_hub_integration_enabled returns True if the {SECURITY_HUB_INTEGRATION_NAME} is enabled for the given region. Otherwise returns false."""
|
||||
prowler_integration_enabled = False
|
||||
@@ -62,7 +68,8 @@ def verify_security_hub_integration_enabled_per_region(
|
||||
security_hub_client.describe_hub()
|
||||
|
||||
# Check if Prowler integration is enabled in Security Hub
|
||||
if "prowler/prowler" not in str(
|
||||
security_hub_prowler_integration_arn = f"arn:{partition}:securityhub:{region}:{aws_account_number}:product-subscription/{SECURITY_HUB_INTEGRATION_NAME}"
|
||||
if security_hub_prowler_integration_arn not in str(
|
||||
security_hub_client.list_enabled_products_for_import()
|
||||
):
|
||||
logger.error(
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
import threading
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.aws.aws_provider import (
|
||||
generate_regional_clients,
|
||||
get_default_region,
|
||||
)
|
||||
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
|
||||
|
||||
MAX_WORKERS = 10
|
||||
|
||||
|
||||
class AWSService:
|
||||
"""The AWSService class offers a parent class for each AWS Service to generate:
|
||||
- AWS Regional Clients
|
||||
- Shared information like the account ID and ARN, the the AWS partition and the checks audited
|
||||
- AWS Session
|
||||
- Thread pool for the __threading_call__
|
||||
- Also handles if the AWS Service is Global
|
||||
"""
|
||||
|
||||
@@ -34,9 +38,7 @@ class AWSService:
|
||||
|
||||
# Generate Regional Clients
|
||||
if not global_service:
|
||||
self.regional_clients = generate_regional_clients(
|
||||
self.service, audit_info, global_service
|
||||
)
|
||||
self.regional_clients = generate_regional_clients(self.service, audit_info)
|
||||
|
||||
# Get a single region and client if the service needs it (e.g. AWS Global Service)
|
||||
# We cannot include this within an else because some services needs both the regional_clients
|
||||
@@ -44,14 +46,40 @@ class AWSService:
|
||||
self.region = get_default_region(self.service, audit_info)
|
||||
self.client = self.session.client(self.service, self.region)
|
||||
|
||||
# Thread pool for __threading_call__
|
||||
self.thread_pool = ThreadPoolExecutor(max_workers=MAX_WORKERS)
|
||||
|
||||
def __get_session__(self):
|
||||
return self.session
|
||||
|
||||
def __threading_call__(self, call):
|
||||
threads = []
|
||||
for regional_client in self.regional_clients.values():
|
||||
threads.append(threading.Thread(target=call, args=(regional_client,)))
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
def __threading_call__(self, call, iterator=None):
|
||||
# Use the provided iterator, or default to self.regional_clients
|
||||
items = iterator if iterator is not None else self.regional_clients.values()
|
||||
# Determine the total count for logging
|
||||
item_count = len(items)
|
||||
|
||||
# Trim leading and trailing underscores from the call's name
|
||||
call_name = call.__name__.strip("_")
|
||||
# Add Capitalization
|
||||
call_name = " ".join([x.capitalize() for x in call_name.split("_")])
|
||||
|
||||
# Print a message based on the call's name, and if its regional or processing a list of items
|
||||
if iterator is None:
|
||||
logger.info(
|
||||
f"{self.service.upper()} - Starting threads for '{call_name}' function across {item_count} regions..."
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
f"{self.service.upper()} - Starting threads for '{call_name}' function to process {item_count} items..."
|
||||
)
|
||||
|
||||
# Submit tasks to the thread pool
|
||||
futures = [self.thread_pool.submit(call, item) for item in items]
|
||||
|
||||
# Wait for all tasks to complete
|
||||
for future in as_completed(futures):
|
||||
try:
|
||||
future.result() # Raises exceptions from the thread, if any
|
||||
except Exception:
|
||||
# Handle exceptions if necessary
|
||||
pass # Replace 'pass' with any additional exception handling logic. Currently handled within the called function
|
||||
|
||||
@@ -19,17 +19,23 @@ class accessanalyzer_enabled(Check):
|
||||
f"IAM Access Analyzer {analyzer.name} is enabled."
|
||||
)
|
||||
|
||||
elif analyzer.status == "NOT_AVAILABLE":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"IAM Access Analyzer in account {analyzer.name} is not enabled."
|
||||
)
|
||||
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"IAM Access Analyzer {analyzer.name} is not active."
|
||||
)
|
||||
if analyzer.status == "NOT_AVAILABLE":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"IAM Access Analyzer in account {analyzer.name} is not enabled."
|
||||
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"IAM Access Analyzer {analyzer.name} is not active."
|
||||
)
|
||||
if (
|
||||
accessanalyzer_client.audit_config.get(
|
||||
"allowlist_non_default_regions", False
|
||||
)
|
||||
and not analyzer.region == accessanalyzer_client.region
|
||||
):
|
||||
report.status = "WARNING"
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -85,21 +85,36 @@ class AccessAnalyzer(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
# TODO: We need to include ListFindingsV2
|
||||
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/accessanalyzer/client/list_findings_v2.html
|
||||
def __list_findings__(self):
|
||||
logger.info("AccessAnalyzer - Listing Findings per Analyzer...")
|
||||
try:
|
||||
for analyzer in self.analyzers:
|
||||
if analyzer.status == "ACTIVE":
|
||||
regional_client = self.regional_clients[analyzer.region]
|
||||
list_findings_paginator = regional_client.get_paginator(
|
||||
"list_findings"
|
||||
try:
|
||||
if analyzer.status == "ACTIVE":
|
||||
regional_client = self.regional_clients[analyzer.region]
|
||||
list_findings_paginator = regional_client.get_paginator(
|
||||
"list_findings"
|
||||
)
|
||||
for page in list_findings_paginator.paginate(
|
||||
analyzerArn=analyzer.arn
|
||||
):
|
||||
for finding in page["findings"]:
|
||||
analyzer.findings.append(Finding(id=finding["id"]))
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "ValidationException":
|
||||
logger.warning(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
for page in list_findings_paginator.paginate(
|
||||
analyzerArn=analyzer.arn
|
||||
):
|
||||
for finding in page["findings"]:
|
||||
analyzer.findings.append(Finding(id=finding["id"]))
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "apigateway_restapi_authorizers_enabled",
|
||||
"CheckTitle": "Check if API Gateway has configured authorizers.",
|
||||
"CheckTitle": "Check if API Gateway has configured authorizers at api or method level.",
|
||||
"CheckAliases": [
|
||||
"apigateway_authorizers_enabled"
|
||||
],
|
||||
@@ -13,7 +13,7 @@
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsApiGatewayRestApi",
|
||||
"Description": "Check if API Gateway has configured authorizers.",
|
||||
"Description": "Check if API Gateway has configured authorizers at api or method level.",
|
||||
"Risk": "If no authorizer is enabled anyone can use the service.",
|
||||
"RelatedUrl": "",
|
||||
"Remediation": {
|
||||
|
||||
@@ -13,12 +13,41 @@ class apigateway_restapi_authorizers_enabled(Check):
|
||||
report.resource_id = rest_api.name
|
||||
report.resource_arn = rest_api.arn
|
||||
report.resource_tags = rest_api.tags
|
||||
# it there are not authorizers at api level and resources without methods (default case) ->
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"API Gateway {rest_api.name} ID {rest_api.id} does not have an authorizer configured at api level."
|
||||
if rest_api.authorizer:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"API Gateway {rest_api.name} ID {rest_api.id} has an authorizer configured."
|
||||
report.status_extended = f"API Gateway {rest_api.name} ID {rest_api.id} has an authorizer configured at api level"
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"API Gateway {rest_api.name} ID {rest_api.id} does not have an authorizer configured."
|
||||
# we want to know if api has not authorizers and all the resources don't have methods configured
|
||||
resources_have_methods = False
|
||||
all_methods_authorized = True
|
||||
resource_paths_with_unathorized_methods = []
|
||||
for resource in rest_api.resources:
|
||||
# if the resource has methods test if they have all configured authorizer
|
||||
if resource.resource_methods:
|
||||
resources_have_methods = True
|
||||
for (
|
||||
http_method,
|
||||
authorization_method,
|
||||
) in resource.resource_methods.items():
|
||||
if authorization_method == "NONE":
|
||||
all_methods_authorized = False
|
||||
unauthorized_method = (
|
||||
f"{resource.path} -> {http_method}"
|
||||
)
|
||||
resource_paths_with_unathorized_methods.append(
|
||||
unauthorized_method
|
||||
)
|
||||
# if there are methods in at least one resource and are all authorized
|
||||
if all_methods_authorized and resources_have_methods:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"API Gateway {rest_api.name} ID {rest_api.id} has all methods authorized"
|
||||
# if there are methods in at least one result but some of then are not authorized-> list it
|
||||
elif not all_methods_authorized:
|
||||
report.status_extended = f"API Gateway {rest_api.name} ID {rest_api.id} does not have authorizers at api level and the following paths and methods are unauthorized: {'; '.join(resource_paths_with_unathorized_methods)}."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -17,6 +17,7 @@ class APIGateway(AWSService):
|
||||
self.__get_authorizers__()
|
||||
self.__get_rest_api__()
|
||||
self.__get_stages__()
|
||||
self.__get_resources__()
|
||||
|
||||
def __get_rest_apis__(self, regional_client):
|
||||
logger.info("APIGateway - Getting Rest APIs...")
|
||||
@@ -53,7 +54,9 @@ class APIGateway(AWSService):
|
||||
if authorizers:
|
||||
rest_api.authorizer = True
|
||||
except Exception as error:
|
||||
logger.error(f"{error.__class__.__name__}: {error}")
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_rest_api__(self):
|
||||
logger.info("APIGateway - Describing Rest API...")
|
||||
@@ -64,7 +67,9 @@ class APIGateway(AWSService):
|
||||
if rest_api_info["endpointConfiguration"]["types"] == ["PRIVATE"]:
|
||||
rest_api.public_endpoint = False
|
||||
except Exception as error:
|
||||
logger.error(f"{error.__class__.__name__}: {error}")
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_stages__(self):
|
||||
logger.info("APIGateway - Getting stages for Rest APIs...")
|
||||
@@ -95,7 +100,46 @@ class APIGateway(AWSService):
|
||||
)
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(f"{error.__class__.__name__}: {error}")
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_resources__(self):
|
||||
logger.info("APIGateway - Getting API resources...")
|
||||
try:
|
||||
for rest_api in self.rest_apis:
|
||||
regional_client = self.regional_clients[rest_api.region]
|
||||
get_resources_paginator = regional_client.get_paginator("get_resources")
|
||||
for page in get_resources_paginator.paginate(restApiId=rest_api.id):
|
||||
for resource in page["items"]:
|
||||
id = resource["id"]
|
||||
resource_methods = []
|
||||
methods_auth = {}
|
||||
for resource_method in resource.get(
|
||||
"resourceMethods", {}
|
||||
).keys():
|
||||
resource_methods.append(resource_method)
|
||||
|
||||
for resource_method in resource_methods:
|
||||
if resource_method != "OPTIONS":
|
||||
method_config = regional_client.get_method(
|
||||
restApiId=rest_api.id,
|
||||
resourceId=id,
|
||||
httpMethod=resource_method,
|
||||
)
|
||||
auth_type = method_config["authorizationType"]
|
||||
methods_auth.update({resource_method: auth_type})
|
||||
|
||||
rest_api.resources.append(
|
||||
PathResourceMethods(
|
||||
path=resource["path"], resource_methods=methods_auth
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
|
||||
class Stage(BaseModel):
|
||||
@@ -107,6 +151,11 @@ class Stage(BaseModel):
|
||||
tags: Optional[list] = []
|
||||
|
||||
|
||||
class PathResourceMethods(BaseModel):
|
||||
path: str
|
||||
resource_methods: dict
|
||||
|
||||
|
||||
class RestAPI(BaseModel):
|
||||
id: str
|
||||
arn: str
|
||||
@@ -116,3 +165,4 @@ class RestAPI(BaseModel):
|
||||
public_endpoint: bool = True
|
||||
stages: list[Stage] = []
|
||||
tags: Optional[list] = []
|
||||
resources: list[PathResourceMethods] = []
|
||||
|
||||
@@ -11,57 +11,55 @@ from prowler.providers.aws.services.awslambda.awslambda_client import awslambda_
|
||||
class awslambda_function_no_secrets_in_code(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for function in awslambda_client.functions.values():
|
||||
if function.code:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = function.region
|
||||
report.resource_id = function.name
|
||||
report.resource_arn = function.arn
|
||||
report.resource_tags = function.tags
|
||||
if awslambda_client.functions:
|
||||
for function, function_code in awslambda_client.__get_function_code__():
|
||||
if function_code:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = function.region
|
||||
report.resource_id = function.name
|
||||
report.resource_arn = function.arn
|
||||
report.resource_tags = function.tags
|
||||
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"No secrets found in Lambda function {function.name} code."
|
||||
)
|
||||
with tempfile.TemporaryDirectory() as tmp_dir_name:
|
||||
function.code.code_zip.extractall(tmp_dir_name)
|
||||
# List all files
|
||||
files_in_zip = next(os.walk(tmp_dir_name))[2]
|
||||
secrets_findings = []
|
||||
for file in files_in_zip:
|
||||
secrets = SecretsCollection()
|
||||
with default_settings():
|
||||
secrets.scan_file(f"{tmp_dir_name}/{file}")
|
||||
detect_secrets_output = secrets.json()
|
||||
if detect_secrets_output:
|
||||
for (
|
||||
file_name
|
||||
) in (
|
||||
detect_secrets_output.keys()
|
||||
): # Appears that only 1 file is being scanned at a time, so could rework this
|
||||
output_file_name = file_name.replace(
|
||||
f"{tmp_dir_name}/", ""
|
||||
)
|
||||
secrets_string = ", ".join(
|
||||
[
|
||||
f"{secret['type']} on line {secret['line_number']}"
|
||||
for secret in detect_secrets_output[file_name]
|
||||
]
|
||||
)
|
||||
secrets_findings.append(
|
||||
f"{output_file_name}: {secrets_string}"
|
||||
)
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"No secrets found in Lambda function {function.name} code."
|
||||
)
|
||||
with tempfile.TemporaryDirectory() as tmp_dir_name:
|
||||
function_code.code_zip.extractall(tmp_dir_name)
|
||||
# List all files
|
||||
files_in_zip = next(os.walk(tmp_dir_name))[2]
|
||||
secrets_findings = []
|
||||
for file in files_in_zip:
|
||||
secrets = SecretsCollection()
|
||||
with default_settings():
|
||||
secrets.scan_file(f"{tmp_dir_name}/{file}")
|
||||
detect_secrets_output = secrets.json()
|
||||
if detect_secrets_output:
|
||||
for (
|
||||
file_name
|
||||
) in (
|
||||
detect_secrets_output.keys()
|
||||
): # Appears that only 1 file is being scanned at a time, so could rework this
|
||||
output_file_name = file_name.replace(
|
||||
f"{tmp_dir_name}/", ""
|
||||
)
|
||||
secrets_string = ", ".join(
|
||||
[
|
||||
f"{secret['type']} on line {secret['line_number']}"
|
||||
for secret in detect_secrets_output[
|
||||
file_name
|
||||
]
|
||||
]
|
||||
)
|
||||
secrets_findings.append(
|
||||
f"{output_file_name}: {secrets_string}"
|
||||
)
|
||||
|
||||
if secrets_findings:
|
||||
final_output_string = "; ".join(secrets_findings)
|
||||
report.status = "FAIL"
|
||||
# report.status_extended = f"Potential {'secrets' if len(secrets_findings)>1 else 'secret'} found in Lambda function {function.name} code. {final_output_string}."
|
||||
if len(secrets_findings) > 1:
|
||||
report.status_extended = f"Potential secrets found in Lambda function {function.name} code -> {final_output_string}."
|
||||
else:
|
||||
report.status_extended = f"Potential secret found in Lambda function {function.name} code -> {final_output_string}."
|
||||
# break // Don't break as there may be additional findings
|
||||
if secrets_findings:
|
||||
final_output_string = "; ".join(secrets_findings)
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Potential {'secrets' if len(secrets_findings) > 1 else 'secret'} found in Lambda function {function.name} code -> {final_output_string}."
|
||||
|
||||
findings.append(report)
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import io
|
||||
import json
|
||||
import zipfile
|
||||
from concurrent.futures import as_completed
|
||||
from enum import Enum
|
||||
from typing import Any, Optional
|
||||
|
||||
@@ -21,15 +22,6 @@ class Lambda(AWSService):
|
||||
self.functions = {}
|
||||
self.__threading_call__(self.__list_functions__)
|
||||
self.__list_tags_for_resource__()
|
||||
|
||||
# We only want to retrieve the Lambda code if the
|
||||
# awslambda_function_no_secrets_in_code check is set
|
||||
if (
|
||||
"awslambda_function_no_secrets_in_code"
|
||||
in audit_info.audit_metadata.expected_checks
|
||||
):
|
||||
self.__threading_call__(self.__get_function__)
|
||||
|
||||
self.__threading_call__(self.__get_policy__)
|
||||
self.__threading_call__(self.__get_function_url_config__)
|
||||
|
||||
@@ -70,28 +62,45 @@ class Lambda(AWSService):
|
||||
f" {error}"
|
||||
)
|
||||
|
||||
def __get_function__(self, regional_client):
|
||||
logger.info("Lambda - Getting Function...")
|
||||
try:
|
||||
for function in self.functions.values():
|
||||
if function.region == regional_client.region:
|
||||
function_information = regional_client.get_function(
|
||||
FunctionName=function.name
|
||||
)
|
||||
if "Location" in function_information["Code"]:
|
||||
code_location_uri = function_information["Code"]["Location"]
|
||||
raw_code_zip = requests.get(code_location_uri).content
|
||||
self.functions[function.arn].code = LambdaCode(
|
||||
location=code_location_uri,
|
||||
code_zip=zipfile.ZipFile(io.BytesIO(raw_code_zip)),
|
||||
)
|
||||
def __get_function_code__(self):
|
||||
logger.info("Lambda - Getting Function Code...")
|
||||
# Use a thread pool handle the queueing and execution of the __fetch_function_code__ tasks, up to max_workers tasks concurrently.
|
||||
lambda_functions_to_fetch = {
|
||||
self.thread_pool.submit(
|
||||
self.__fetch_function_code__, function.name, function.region
|
||||
): function
|
||||
for function in self.functions.values()
|
||||
}
|
||||
|
||||
for fetched_lambda_code in as_completed(lambda_functions_to_fetch):
|
||||
function = lambda_functions_to_fetch[fetched_lambda_code]
|
||||
try:
|
||||
function_code = fetched_lambda_code.result()
|
||||
if function_code:
|
||||
yield function, function_code
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{function.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __fetch_function_code__(self, function_name, function_region):
|
||||
try:
|
||||
regional_client = self.regional_clients[function_region]
|
||||
function_information = regional_client.get_function(
|
||||
FunctionName=function_name
|
||||
)
|
||||
if "Location" in function_information["Code"]:
|
||||
code_location_uri = function_information["Code"]["Location"]
|
||||
raw_code_zip = requests.get(code_location_uri).content
|
||||
return LambdaCode(
|
||||
location=code_location_uri,
|
||||
code_zip=zipfile.ZipFile(io.BytesIO(raw_code_zip)),
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} --"
|
||||
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
|
||||
f" {error}"
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
raise
|
||||
|
||||
def __get_policy__(self, regional_client):
|
||||
logger.info("Lambda - Getting Policy...")
|
||||
|
||||
@@ -140,7 +140,16 @@ class Cloudtrail(AWSService):
|
||||
error.response["Error"]["Code"]
|
||||
== "InsightNotEnabledException"
|
||||
):
|
||||
continue
|
||||
logger.warning(
|
||||
f"{client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
elif (
|
||||
error.response["Error"]["Code"]
|
||||
== "UnsupportedOperationException"
|
||||
):
|
||||
logger.warning(
|
||||
f"{client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_changes_to_network_acls_alarm_configured(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_changes_to_network_gateways_alarm_configured(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_changes_to_network_route_tables_alarm_configured(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_changes_to_vpcs_alarm_configured(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -24,26 +25,13 @@ class cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_change
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -24,26 +25,13 @@ class cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_change
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_log_metric_filter_authentication_failures(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_log_metric_filter_aws_organizations_changes(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk(Chec
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,14 @@ class cloudwatch_log_metric_filter_for_s3_bucket_policy_changes(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_log_metric_filter_policy_changes(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_log_metric_filter_root_usage(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_log_metric_filter_security_group_changes(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_log_metric_filter_sign_in_without_mfa(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
cloudtrail_client,
|
||||
@@ -7,6 +5,9 @@ from prowler.providers.aws.services.cloudtrail.cloudtrail_client import (
|
||||
from prowler.providers.aws.services.cloudwatch.cloudwatch_client import (
|
||||
cloudwatch_client,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.lib.metric_filters import (
|
||||
check_cloudwatch_log_metric_filter,
|
||||
)
|
||||
from prowler.providers.aws.services.cloudwatch.logs_client import logs_client
|
||||
|
||||
|
||||
@@ -22,26 +23,13 @@ class cloudwatch_log_metric_filter_unauthorized_api_calls(Check):
|
||||
report.region = cloudwatch_client.region
|
||||
report.resource_id = cloudtrail_client.audited_account
|
||||
report.resource_arn = cloudtrail_client.audited_account_arn
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in cloudtrail_client.trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in logs_client.metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in cloudwatch_client.metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
report = check_cloudwatch_log_metric_filter(
|
||||
pattern,
|
||||
cloudtrail_client.trails,
|
||||
logs_client.metric_filters,
|
||||
cloudwatch_client.metric_alarms,
|
||||
report,
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
import re
|
||||
|
||||
from prowler.lib.check.models import Check_Report_AWS
|
||||
|
||||
|
||||
def check_cloudwatch_log_metric_filter(
|
||||
metric_filter_pattern: str,
|
||||
trails: list,
|
||||
metric_filters: list,
|
||||
metric_alarms: list,
|
||||
report: Check_Report_AWS,
|
||||
):
|
||||
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
|
||||
log_groups = []
|
||||
for trail in trails:
|
||||
if trail.log_group_arn:
|
||||
log_groups.append(trail.log_group_arn.split(":")[6])
|
||||
# 2. Describe metric filters for previous log groups
|
||||
for metric_filter in metric_filters:
|
||||
if metric_filter.log_group in log_groups:
|
||||
if re.search(metric_filter_pattern, metric_filter.pattern, flags=re.DOTALL):
|
||||
report.resource_id = metric_filter.log_group
|
||||
report.resource_arn = metric_filter.arn
|
||||
report.region = metric_filter.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} but no alarms associated."
|
||||
# 3. Check if there is an alarm for the metric
|
||||
for alarm in metric_alarms:
|
||||
if alarm.metric == metric_filter.metric:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"CloudWatch log group {metric_filter.log_group} found with metric filter {metric_filter.name} and alarms set."
|
||||
break
|
||||
|
||||
return report
|
||||
@@ -16,7 +16,7 @@ class codeartifact_packages_external_public_publishing_disabled(Check):
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = repository.region
|
||||
report.resource_id = package.name
|
||||
report.resource_arn = repository.arn
|
||||
report.resource_arn = f"{repository.arn}/{package.namespace + ':' if package.namespace else ''}{package.name}"
|
||||
report.resource_tags = repository.tags
|
||||
|
||||
if package.latest_version.origin.origin_type in (
|
||||
|
||||
@@ -63,7 +63,7 @@ class CodeArtifact(AWSService):
|
||||
list_packages_parameters = {
|
||||
"domain": self.repositories[repository].domain_name,
|
||||
"domainOwner": self.repositories[repository].domain_owner,
|
||||
"repository": repository,
|
||||
"repository": self.repositories[repository].name,
|
||||
}
|
||||
packages = []
|
||||
for page in list_packages_paginator.paginate(
|
||||
@@ -83,18 +83,37 @@ class CodeArtifact(AWSService):
|
||||
]
|
||||
)
|
||||
# Get Latest Package Version
|
||||
latest_version_information = (
|
||||
regional_client.list_package_versions(
|
||||
domain=self.repositories[repository].domain_name,
|
||||
domainOwner=self.repositories[
|
||||
repository
|
||||
].domain_owner,
|
||||
repository=repository,
|
||||
format=package_format,
|
||||
package=package_name,
|
||||
sortBy="PUBLISHED_TIME",
|
||||
if package_namespace:
|
||||
latest_version_information = (
|
||||
regional_client.list_package_versions(
|
||||
domain=self.repositories[
|
||||
repository
|
||||
].domain_name,
|
||||
domainOwner=self.repositories[
|
||||
repository
|
||||
].domain_owner,
|
||||
repository=self.repositories[repository].name,
|
||||
format=package_format,
|
||||
namespace=package_namespace,
|
||||
package=package_name,
|
||||
sortBy="PUBLISHED_TIME",
|
||||
)
|
||||
)
|
||||
else:
|
||||
latest_version_information = (
|
||||
regional_client.list_package_versions(
|
||||
domain=self.repositories[
|
||||
repository
|
||||
].domain_name,
|
||||
domainOwner=self.repositories[
|
||||
repository
|
||||
].domain_owner,
|
||||
repository=self.repositories[repository].name,
|
||||
format=package_format,
|
||||
package=package_name,
|
||||
sortBy="PUBLISHED_TIME",
|
||||
)
|
||||
)
|
||||
)
|
||||
latest_version = ""
|
||||
latest_origin_type = "UNKNOWN"
|
||||
latest_status = "Published"
|
||||
|
||||
0
prowler/providers/aws/services/cognito/__init__.py
Normal file
0
prowler/providers/aws/services/cognito/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
|
||||
from prowler.providers.aws.services.cognito.cognito_service import CognitoIDP
|
||||
|
||||
cognito_idp_client = CognitoIDP(current_audit_info)
|
||||
122
prowler/providers/aws/services/cognito/cognito_service.py
Normal file
122
prowler/providers/aws/services/cognito/cognito_service.py
Normal file
@@ -0,0 +1,122 @@
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.lib.service.service import AWSService
|
||||
|
||||
|
||||
################## CognitoIDP
|
||||
class CognitoIDP(AWSService):
|
||||
def __init__(self, audit_info):
|
||||
super().__init__("cognito-idp", audit_info)
|
||||
self.user_pools = {}
|
||||
self.__threading_call__(self.__list_user_pools__)
|
||||
self.__describe_user_pools__()
|
||||
self.__get_user_pool_mfa_config__()
|
||||
|
||||
def __list_user_pools__(self, regional_client):
|
||||
logger.info("Cognito - Listing User Pools...")
|
||||
try:
|
||||
user_pools_paginator = regional_client.get_paginator("list_user_pools")
|
||||
for page in user_pools_paginator.paginate(MaxResults=60):
|
||||
for user_pool in page["UserPools"]:
|
||||
arn = f"arn:{self.audited_partition}:cognito-idp:{regional_client.region}:{self.audited_account}:userpool/{user_pool['Id']}"
|
||||
if not self.audit_resources or (
|
||||
is_resource_filtered(arn, self.audit_resources)
|
||||
):
|
||||
try:
|
||||
self.user_pools[arn] = UserPool(
|
||||
id=user_pool["Id"],
|
||||
arn=arn,
|
||||
name=user_pool["Name"],
|
||||
region=regional_client.region,
|
||||
last_modified=user_pool["LastModifiedDate"],
|
||||
creation_date=user_pool["CreationDate"],
|
||||
status=user_pool.get("Status", "Disabled"),
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_user_pools__(self):
|
||||
logger.info("Cognito - Describing User Pools...")
|
||||
try:
|
||||
for user_pool in self.user_pools.values():
|
||||
try:
|
||||
user_pool_details = self.regional_clients[
|
||||
user_pool.region
|
||||
].describe_user_pool(UserPoolId=user_pool.id)["UserPool"]
|
||||
user_pool.password_policy = user_pool_details.get(
|
||||
"Policies", {}
|
||||
).get("PasswordPolicy", {})
|
||||
user_pool.deletion_protection = user_pool_details.get(
|
||||
"DeletionProtection", "INACTIVE"
|
||||
)
|
||||
user_pool.advanced_security_mode = user_pool_details.get(
|
||||
"UserPoolAddOns", {}
|
||||
).get("AdvancedSecurityMode", "OFF")
|
||||
user_pool.tags = [user_pool_details.get("UserPoolTags", "")]
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{user_pool.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{user_pool.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_user_pool_mfa_config__(self):
|
||||
logger.info("Cognito - Getting User Pool MFA Configuration...")
|
||||
try:
|
||||
for user_pool in self.user_pools.values():
|
||||
try:
|
||||
mfa_config = self.regional_clients[
|
||||
user_pool.region
|
||||
].get_user_pool_mfa_config(UserPoolId=user_pool.id)
|
||||
if mfa_config["MfaConfiguration"] != "OFF":
|
||||
user_pool.mfa_config = MFAConfig(
|
||||
sms_authentication=mfa_config.get(
|
||||
"SmsMfaConfiguration", {}
|
||||
),
|
||||
software_token_mfa_authentication=mfa_config.get(
|
||||
"SoftwareTokenMfaConfiguration", {}
|
||||
),
|
||||
status=mfa_config["MfaConfiguration"],
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{user_pool.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{user_pool.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
|
||||
class MFAConfig(BaseModel):
|
||||
sms_authentication: Optional[dict]
|
||||
software_token_mfa_authentication: Optional[dict]
|
||||
status: str
|
||||
|
||||
|
||||
class UserPool(BaseModel):
|
||||
id: str
|
||||
arn: str
|
||||
name: str
|
||||
region: str
|
||||
advanced_security_mode: str = "OFF"
|
||||
deletion_protection: str = "INACTIVE"
|
||||
last_modified: datetime
|
||||
creation_date: datetime
|
||||
status: str
|
||||
password_policy: Optional[dict]
|
||||
mfa_config: Optional[MFAConfig]
|
||||
tags: Optional[list] = []
|
||||
@@ -18,10 +18,18 @@ class ec2_securitygroup_not_used(Check):
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Security group {security_group.name} ({security_group.id}) it is being used."
|
||||
sg_in_lambda = False
|
||||
sg_associated = False
|
||||
for function in awslambda_client.functions.values():
|
||||
if security_group.id in function.security_groups:
|
||||
sg_in_lambda = True
|
||||
if len(security_group.network_interfaces) == 0 and not sg_in_lambda:
|
||||
for sg in ec2_client.security_groups:
|
||||
if security_group.id in sg.associated_sgs:
|
||||
sg_associated = True
|
||||
if (
|
||||
len(security_group.network_interfaces) == 0
|
||||
and not sg_in_lambda
|
||||
and not sg_associated
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Security group {security_group.name} ({security_group.id}) it is not being used."
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ class EC2(AWSService):
|
||||
super().__init__(__class__.__name__, audit_info)
|
||||
self.instances = []
|
||||
self.__threading_call__(self.__describe_instances__)
|
||||
self.__get_instance_user_data__()
|
||||
self.__threading_call__(self.__get_instance_user_data__, self.instances)
|
||||
self.security_groups = []
|
||||
self.regions_with_sgs = []
|
||||
self.__threading_call__(self.__describe_security_groups__)
|
||||
@@ -27,7 +27,7 @@ class EC2(AWSService):
|
||||
self.volumes_with_snapshots = {}
|
||||
self.regions_with_snapshots = {}
|
||||
self.__threading_call__(self.__describe_snapshots__)
|
||||
self.__get_snapshot_public__()
|
||||
self.__threading_call__(self.__determine_public_snapshots__, self.snapshots)
|
||||
self.network_interfaces = []
|
||||
self.__threading_call__(self.__describe_public_network_interfaces__)
|
||||
self.__threading_call__(self.__describe_sg_network_interfaces__)
|
||||
@@ -36,12 +36,11 @@ class EC2(AWSService):
|
||||
self.volumes = []
|
||||
self.__threading_call__(self.__describe_volumes__)
|
||||
self.ebs_encryption_by_default = []
|
||||
self.__threading_call__(self.__get_ebs_encryption_by_default__)
|
||||
self.__threading_call__(self.__get_ebs_encryption_settings__)
|
||||
self.elastic_ips = []
|
||||
self.__threading_call__(self.__describe_addresses__)
|
||||
self.__threading_call__(self.__describe_ec2_addresses__)
|
||||
|
||||
def __describe_instances__(self, regional_client):
|
||||
logger.info("EC2 - Describing EC2 Instances...")
|
||||
try:
|
||||
describe_instances_paginator = regional_client.get_paginator(
|
||||
"describe_instances"
|
||||
@@ -106,7 +105,6 @@ class EC2(AWSService):
|
||||
)
|
||||
|
||||
def __describe_security_groups__(self, regional_client):
|
||||
logger.info("EC2 - Describing Security Groups...")
|
||||
try:
|
||||
describe_security_groups_paginator = regional_client.get_paginator(
|
||||
"describe_security_groups"
|
||||
@@ -117,6 +115,7 @@ class EC2(AWSService):
|
||||
if not self.audit_resources or (
|
||||
is_resource_filtered(arn, self.audit_resources)
|
||||
):
|
||||
associated_sgs = []
|
||||
# check if sg has public access to all ports
|
||||
all_public_ports = False
|
||||
for ingress_rule in sg["IpPermissions"]:
|
||||
@@ -128,7 +127,10 @@ class EC2(AWSService):
|
||||
in self.audited_checks
|
||||
):
|
||||
all_public_ports = True
|
||||
break
|
||||
# check associated security groups
|
||||
for sg_group in ingress_rule.get("UserIdGroupPairs", []):
|
||||
if sg_group.get("GroupId"):
|
||||
associated_sgs.append(sg_group["GroupId"])
|
||||
self.security_groups.append(
|
||||
SecurityGroup(
|
||||
name=sg["GroupName"],
|
||||
@@ -138,6 +140,7 @@ class EC2(AWSService):
|
||||
ingress_rules=sg["IpPermissions"],
|
||||
egress_rules=sg["IpPermissionsEgress"],
|
||||
public_ports=all_public_ports,
|
||||
associated_sgs=associated_sgs,
|
||||
vpc_id=sg["VpcId"],
|
||||
tags=sg.get("Tags"),
|
||||
)
|
||||
@@ -150,7 +153,6 @@ class EC2(AWSService):
|
||||
)
|
||||
|
||||
def __describe_network_acls__(self, regional_client):
|
||||
logger.info("EC2 - Describing Network ACLs...")
|
||||
try:
|
||||
describe_network_acls_paginator = regional_client.get_paginator(
|
||||
"describe_network_acls"
|
||||
@@ -181,7 +183,6 @@ class EC2(AWSService):
|
||||
)
|
||||
|
||||
def __describe_snapshots__(self, regional_client):
|
||||
logger.info("EC2 - Describing Snapshots...")
|
||||
try:
|
||||
snapshots_in_region = False
|
||||
describe_snapshots_paginator = regional_client.get_paginator(
|
||||
@@ -214,35 +215,30 @@ class EC2(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_snapshot_public__(self):
|
||||
logger.info("EC2 - Getting snapshot volume attribute permissions...")
|
||||
for snapshot in self.snapshots:
|
||||
try:
|
||||
regional_client = self.regional_clients[snapshot.region]
|
||||
snapshot_public = regional_client.describe_snapshot_attribute(
|
||||
Attribute="createVolumePermission", SnapshotId=snapshot.id
|
||||
)
|
||||
for permission in snapshot_public["CreateVolumePermissions"]:
|
||||
if "Group" in permission:
|
||||
if permission["Group"] == "all":
|
||||
snapshot.public = True
|
||||
def __determine_public_snapshots__(self, snapshot):
|
||||
try:
|
||||
regional_client = self.regional_clients[snapshot.region]
|
||||
snapshot_public = regional_client.describe_snapshot_attribute(
|
||||
Attribute="createVolumePermission", SnapshotId=snapshot.id
|
||||
)
|
||||
for permission in snapshot_public["CreateVolumePermissions"]:
|
||||
if "Group" in permission:
|
||||
if permission["Group"] == "all":
|
||||
snapshot.public = True
|
||||
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "InvalidSnapshot.NotFound":
|
||||
logger.warning(
|
||||
f"{snapshot.region} --"
|
||||
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
|
||||
f" {error}"
|
||||
)
|
||||
continue
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "InvalidSnapshot.NotFound":
|
||||
logger.warning(
|
||||
f"{snapshot.region} --"
|
||||
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
|
||||
f" {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_public_network_interfaces__(self, regional_client):
|
||||
logger.info("EC2 - Describing Network Interfaces...")
|
||||
try:
|
||||
# Get Network Interfaces with Public IPs
|
||||
describe_network_interfaces_paginator = regional_client.get_paginator(
|
||||
@@ -269,7 +265,6 @@ class EC2(AWSService):
|
||||
)
|
||||
|
||||
def __describe_sg_network_interfaces__(self, regional_client):
|
||||
logger.info("EC2 - Describing Network Interfaces...")
|
||||
try:
|
||||
# Get Network Interfaces for Security Groups
|
||||
for sg in self.security_groups:
|
||||
@@ -294,30 +289,25 @@ class EC2(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_instance_user_data__(self):
|
||||
logger.info("EC2 - Getting instance user data...")
|
||||
for instance in self.instances:
|
||||
try:
|
||||
regional_client = self.regional_clients[instance.region]
|
||||
user_data = regional_client.describe_instance_attribute(
|
||||
Attribute="userData", InstanceId=instance.id
|
||||
)["UserData"]
|
||||
if "Value" in user_data:
|
||||
instance.user_data = user_data["Value"]
|
||||
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "InvalidInstanceID.NotFound":
|
||||
logger.warning(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
continue
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
def __get_instance_user_data__(self, instance):
|
||||
try:
|
||||
regional_client = self.regional_clients[instance.region]
|
||||
user_data = regional_client.describe_instance_attribute(
|
||||
Attribute="userData", InstanceId=instance.id
|
||||
)["UserData"]
|
||||
if "Value" in user_data:
|
||||
instance.user_data = user_data["Value"]
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "InvalidInstanceID.NotFound":
|
||||
logger.warning(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_images__(self, regional_client):
|
||||
logger.info("EC2 - Describing Images...")
|
||||
try:
|
||||
for image in regional_client.describe_images(Owners=["self"])["Images"]:
|
||||
arn = f"arn:{self.audited_partition}:ec2:{regional_client.region}:{self.audited_account}:image/{image['ImageId']}"
|
||||
@@ -340,7 +330,6 @@ class EC2(AWSService):
|
||||
)
|
||||
|
||||
def __describe_volumes__(self, regional_client):
|
||||
logger.info("EC2 - Describing Volumes...")
|
||||
try:
|
||||
describe_volumes_paginator = regional_client.get_paginator(
|
||||
"describe_volumes"
|
||||
@@ -365,8 +354,7 @@ class EC2(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_addresses__(self, regional_client):
|
||||
logger.info("EC2 - Describing Elastic IPs...")
|
||||
def __describe_ec2_addresses__(self, regional_client):
|
||||
try:
|
||||
for address in regional_client.describe_addresses()["Addresses"]:
|
||||
public_ip = None
|
||||
@@ -397,8 +385,7 @@ class EC2(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_ebs_encryption_by_default__(self, regional_client):
|
||||
logger.info("EC2 - Get EBS Encryption By Default...")
|
||||
def __get_ebs_encryption_settings__(self, regional_client):
|
||||
try:
|
||||
volumes_in_region = False
|
||||
for volume in self.volumes:
|
||||
@@ -464,6 +451,7 @@ class SecurityGroup(BaseModel):
|
||||
id: str
|
||||
vpc_id: str
|
||||
public_ports: bool
|
||||
associated_sgs: list
|
||||
network_interfaces: list[str] = []
|
||||
ingress_rules: list[dict]
|
||||
egress_rules: list[dict]
|
||||
|
||||
@@ -4,7 +4,6 @@ from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.aws_provider import generate_regional_clients
|
||||
from prowler.providers.aws.lib.service.service import AWSService
|
||||
|
||||
|
||||
@@ -13,7 +12,6 @@ class EKS(AWSService):
|
||||
def __init__(self, audit_info):
|
||||
# Call AWSService's __init__
|
||||
super().__init__(__class__.__name__, audit_info)
|
||||
self.regional_clients = generate_regional_clients(self.service, audit_info)
|
||||
self.clusters = []
|
||||
self.__threading_call__(self.__list_clusters__)
|
||||
self.__describe_cluster__(self.regional_clients)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from typing import Optional
|
||||
|
||||
from botocore.exceptions import ClientError
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
@@ -73,7 +74,15 @@ class ElastiCache(AWSService):
|
||||
cluster.tags = regional_client.list_tags_for_resource(
|
||||
ResourceName=cluster.arn
|
||||
)["TagList"]
|
||||
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "CacheClusterNotFound":
|
||||
logger.warning(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
|
||||
@@ -33,7 +33,7 @@ class elbv2_insecure_ssl_ciphers(Check):
|
||||
and listener.ssl_policy not in secure_ssl_policies
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ELBv2 {lb.name} has listeners with insecure SSL protocols or ciphers."
|
||||
report.status_extended = f"ELBv2 {lb.name} has listeners with insecure SSL protocols or ciphers ({listener.ssl_policy})."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -13,17 +13,21 @@ class fms_policy_compliant(Check):
|
||||
report.status = "PASS"
|
||||
report.status_extended = "FMS enabled with all compliant accounts."
|
||||
non_compliant_policy = False
|
||||
for policy in fms_client.fms_policies:
|
||||
for policy_to_account in policy.compliance_status:
|
||||
if policy_to_account.status == "NON_COMPLIANT":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"FMS with non-compliant policy {policy.name} for account {policy_to_account.account_id}."
|
||||
report.resource_id = policy.id
|
||||
report.resource_arn = policy.arn
|
||||
non_compliant_policy = True
|
||||
if fms_client.fms_policies:
|
||||
for policy in fms_client.fms_policies:
|
||||
for policy_to_account in policy.compliance_status:
|
||||
if policy_to_account.status == "NON_COMPLIANT":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"FMS with non-compliant policy {policy.name} for account {policy_to_account.account_id}."
|
||||
report.resource_id = policy.id
|
||||
report.resource_arn = policy.arn
|
||||
non_compliant_policy = True
|
||||
break
|
||||
if non_compliant_policy:
|
||||
break
|
||||
if non_compliant_policy:
|
||||
break
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"FMS without any compliant policy for account {fms_client.audited_account}."
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -5,8 +5,6 @@ from prowler.lib.logger import logger
|
||||
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.lib.service.service import AWSService
|
||||
|
||||
# from prowler.providers.aws.aws_provider import generate_regional_clients
|
||||
|
||||
|
||||
################## FMS
|
||||
class FMS(AWSService):
|
||||
@@ -68,7 +66,9 @@ class FMS(AWSService):
|
||||
for page in list_compliance_status_paginator.paginate(
|
||||
PolicyId=fms_policy.id
|
||||
):
|
||||
for fms_compliance_status in page["PolicyComplianceStatusList"]:
|
||||
for fms_compliance_status in page.get(
|
||||
"PolicyComplianceStatusList", []
|
||||
):
|
||||
fms_policy.compliance_status.append(
|
||||
PolicyAccountComplianceStatus(
|
||||
account_id=fms_compliance_status.get("MemberAccount"),
|
||||
|
||||
@@ -6,7 +6,7 @@ class guardduty_centrally_managed(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for detector in guardduty_client.detectors:
|
||||
if detector.id:
|
||||
if detector.id and detector.enabled_in_account:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = detector.region
|
||||
report.resource_id = detector.id
|
||||
|
||||
@@ -6,7 +6,7 @@ class guardduty_no_high_severity_findings(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for detector in guardduty_client.detectors:
|
||||
if detector.id:
|
||||
if detector.id and detector.enabled_in_account:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = detector.region
|
||||
report.resource_id = detector.id
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.lib.policy_condition_parser.policy_condition_parser import (
|
||||
is_account_only_allowed_in_condition,
|
||||
is_condition_block_restrictive,
|
||||
)
|
||||
from prowler.providers.aws.services.iam.iam_client import iam_client
|
||||
|
||||
@@ -30,7 +30,7 @@ class iam_role_cross_service_confused_deputy_prevention(Check):
|
||||
and "Service" in statement["Principal"]
|
||||
# Check to see if the appropriate condition statements have been implemented
|
||||
and "Condition" in statement
|
||||
and is_account_only_allowed_in_condition(
|
||||
and is_condition_block_restrictive(
|
||||
statement["Condition"], iam_client.audited_account
|
||||
)
|
||||
):
|
||||
|
||||
@@ -139,7 +139,10 @@ class IAM(AWSService):
|
||||
logger.warning(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -208,14 +211,24 @@ class IAM(AWSService):
|
||||
reuse_prevention=reuse_prevention,
|
||||
hard_expiry=hard_expiry,
|
||||
)
|
||||
except Exception as error:
|
||||
if "NoSuchEntity" in str(error):
|
||||
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntity":
|
||||
# Password policy does not exist
|
||||
stored_password_policy = None
|
||||
logger.warning(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
finally:
|
||||
return stored_password_policy
|
||||
|
||||
@@ -268,17 +281,22 @@ class IAM(AWSService):
|
||||
logger.info("IAM - List Attached Group Policies...")
|
||||
try:
|
||||
for group in self.groups:
|
||||
list_attached_group_policies_paginator = self.client.get_paginator(
|
||||
"list_attached_group_policies"
|
||||
)
|
||||
attached_group_policies = []
|
||||
for page in list_attached_group_policies_paginator.paginate(
|
||||
GroupName=group.name
|
||||
):
|
||||
for attached_group_policy in page["AttachedPolicies"]:
|
||||
attached_group_policies.append(attached_group_policy)
|
||||
try:
|
||||
list_attached_group_policies_paginator = self.client.get_paginator(
|
||||
"list_attached_group_policies"
|
||||
)
|
||||
attached_group_policies = []
|
||||
for page in list_attached_group_policies_paginator.paginate(
|
||||
GroupName=group.name
|
||||
):
|
||||
for attached_group_policy in page["AttachedPolicies"]:
|
||||
attached_group_policies.append(attached_group_policy)
|
||||
|
||||
group.attached_policies = attached_group_policies
|
||||
group.attached_policies = attached_group_policies
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -337,18 +355,33 @@ class IAM(AWSService):
|
||||
logger.info("IAM - List Attached User Policies...")
|
||||
try:
|
||||
for user in self.users:
|
||||
attached_user_policies = []
|
||||
get_user_attached_policies_paginator = self.client.get_paginator(
|
||||
"list_attached_user_policies"
|
||||
)
|
||||
for page in get_user_attached_policies_paginator.paginate(
|
||||
UserName=user.name
|
||||
):
|
||||
for policy in page["AttachedPolicies"]:
|
||||
attached_user_policies.append(policy)
|
||||
try:
|
||||
attached_user_policies = []
|
||||
get_user_attached_policies_paginator = self.client.get_paginator(
|
||||
"list_attached_user_policies"
|
||||
)
|
||||
for page in get_user_attached_policies_paginator.paginate(
|
||||
UserName=user.name
|
||||
):
|
||||
for policy in page["AttachedPolicies"]:
|
||||
attached_user_policies.append(policy)
|
||||
|
||||
user.attached_policies = attached_user_policies
|
||||
user.attached_policies = attached_user_policies
|
||||
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntity":
|
||||
logger.warning(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -371,10 +404,19 @@ class IAM(AWSService):
|
||||
|
||||
role.attached_policies = attached_role_policies
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntityException":
|
||||
if error.response["Error"]["Code"] == "NoSuchEntity":
|
||||
logger.warning(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
@@ -452,11 +494,30 @@ class IAM(AWSService):
|
||||
document=inline_group_policy_doc,
|
||||
)
|
||||
)
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntity":
|
||||
logger.warning(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
group.inline_policies = inline_group_policies
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntity":
|
||||
logger.warning(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
@@ -639,8 +700,16 @@ class IAM(AWSService):
|
||||
response = self.client.list_role_tags(RoleName=role.name)["Tags"]
|
||||
role.tags = response
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntityException":
|
||||
if error.response["Error"]["Code"] == "NoSuchEntity":
|
||||
role.tags = []
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
@@ -653,8 +722,12 @@ class IAM(AWSService):
|
||||
response = self.client.list_user_tags(UserName=user.name)["Tags"]
|
||||
user.tags = response
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntityException":
|
||||
if error.response["Error"]["Code"] == "NoSuchEntity":
|
||||
user.tags = []
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
@@ -664,13 +737,22 @@ class IAM(AWSService):
|
||||
try:
|
||||
for policy in self.policies:
|
||||
try:
|
||||
response = self.client.list_policy_tags(PolicyArn=policy.arn)[
|
||||
"Tags"
|
||||
]
|
||||
policy.tags = response
|
||||
if policy.type != "Inline":
|
||||
response = self.client.list_policy_tags(PolicyArn=policy.arn)[
|
||||
"Tags"
|
||||
]
|
||||
policy.tags = response
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntityException":
|
||||
if error.response["Error"]["Code"] == "NoSuchEntity":
|
||||
policy.tags = []
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
@@ -697,9 +779,19 @@ class IAM(AWSService):
|
||||
]
|
||||
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntity":
|
||||
logger.warning(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -717,6 +809,15 @@ class IAM(AWSService):
|
||||
"AccessKeyMetadata"
|
||||
]
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntity":
|
||||
logger.warning(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
@@ -48,11 +48,12 @@ class organizations_scp_check_deny_regions(Check):
|
||||
and "aws:RequestedRegion"
|
||||
in statement["Condition"]["StringNotEquals"]
|
||||
):
|
||||
if (
|
||||
organizations_enabled_regions
|
||||
== statement["Condition"]["StringNotEquals"][
|
||||
if all(
|
||||
region
|
||||
in statement["Condition"]["StringNotEquals"][
|
||||
"aws:RequestedRegion"
|
||||
]
|
||||
for region in organizations_enabled_regions
|
||||
):
|
||||
# All defined regions are restricted, we exit here, no need to continue.
|
||||
report.status = "PASS"
|
||||
@@ -73,11 +74,12 @@ class organizations_scp_check_deny_regions(Check):
|
||||
and "aws:RequestedRegion"
|
||||
in statement["Condition"]["StringEquals"]
|
||||
):
|
||||
if (
|
||||
organizations_enabled_regions
|
||||
== statement["Condition"]["StringEquals"][
|
||||
if all(
|
||||
region
|
||||
in statement["Condition"]["StringEquals"][
|
||||
"aws:RequestedRegion"
|
||||
]
|
||||
for region in organizations_enabled_regions
|
||||
):
|
||||
# All defined regions are restricted, we exit here, no need to continue.
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -13,9 +13,14 @@ class rds_instance_deprecated_engine_version(Check):
|
||||
report.resource_arn = db_instance.arn
|
||||
report.resource_tags = db_instance.tags
|
||||
report.status_extended = f"RDS instance {db_instance.id} is using a deprecated engine {db_instance.engine} with version {db_instance.engine_version}."
|
||||
|
||||
if (
|
||||
db_instance.engine_version
|
||||
hasattr(
|
||||
rds_client.db_engines.get(db_instance.region, {}).get(
|
||||
db_instance.engine, {}
|
||||
),
|
||||
"engine_versions",
|
||||
)
|
||||
and db_instance.engine_version
|
||||
in rds_client.db_engines[db_instance.region][
|
||||
db_instance.engine
|
||||
].engine_versions
|
||||
|
||||
@@ -28,6 +28,7 @@ class S3(AWSService):
|
||||
self.__threading_call__(self.__get_bucket_tagging__)
|
||||
|
||||
# In the S3 service we override the "__threading_call__" method because we spawn a process per bucket instead of per region
|
||||
# TODO: Replace the above function with the service __threading_call__ using the buckets as the iterator
|
||||
def __threading_call__(self, call):
|
||||
threads = []
|
||||
for bucket in self.buckets:
|
||||
@@ -101,6 +102,15 @@ class S3(AWSService):
|
||||
if "MFADelete" in bucket_versioning:
|
||||
if "Enabled" == bucket_versioning["MFADelete"]:
|
||||
bucket.mfa_delete = True
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchBucket":
|
||||
logger.warning(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
if bucket.region:
|
||||
logger.error(
|
||||
@@ -153,6 +163,15 @@ class S3(AWSService):
|
||||
bucket.logging_target_bucket = bucket_logging["LoggingEnabled"][
|
||||
"TargetBucket"
|
||||
]
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchBucket":
|
||||
logger.warning(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
if regional_client:
|
||||
logger.error(
|
||||
@@ -224,6 +243,15 @@ class S3(AWSService):
|
||||
grantee.permission = grant["Permission"]
|
||||
grantees.append(grantee)
|
||||
bucket.acl_grantees = grantees
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchBucket":
|
||||
logger.warning(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
if regional_client:
|
||||
logger.error(
|
||||
@@ -241,18 +269,26 @@ class S3(AWSService):
|
||||
bucket.policy = json.loads(
|
||||
regional_client.get_bucket_policy(Bucket=bucket.name)["Policy"]
|
||||
)
|
||||
except Exception as error:
|
||||
if "NoSuchBucketPolicy" in str(error):
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchBucketPolicy":
|
||||
bucket.policy = {}
|
||||
elif error.response["Error"]["Code"] == "NoSuchBucket":
|
||||
logger.warning(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
if regional_client:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
if regional_client:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_bucket_ownership_controls__(self, bucket):
|
||||
logger.info("S3 - Get buckets ownership controls...")
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.lib.policy_condition_parser.policy_condition_parser import (
|
||||
is_account_only_allowed_in_condition,
|
||||
is_condition_block_restrictive,
|
||||
)
|
||||
from prowler.providers.aws.services.sns.sns_client import sns_client
|
||||
|
||||
@@ -35,7 +35,7 @@ class sns_topics_not_publicly_accessible(Check):
|
||||
):
|
||||
if (
|
||||
"Condition" in statement
|
||||
and is_account_only_allowed_in_condition(
|
||||
and is_condition_block_restrictive(
|
||||
statement["Condition"], sns_client.audited_account
|
||||
)
|
||||
):
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.lib.policy_condition_parser.policy_condition_parser import (
|
||||
is_account_only_allowed_in_condition,
|
||||
is_condition_block_restrictive,
|
||||
)
|
||||
from prowler.providers.aws.services.sqs.sqs_client import sqs_client
|
||||
|
||||
@@ -32,8 +32,10 @@ class sqs_queues_not_publicly_accessible(Check):
|
||||
)
|
||||
):
|
||||
if "Condition" in statement:
|
||||
if is_account_only_allowed_in_condition(
|
||||
statement["Condition"], sqs_client.audited_account
|
||||
if is_condition_block_restrictive(
|
||||
statement["Condition"],
|
||||
sqs_client.audited_account,
|
||||
True,
|
||||
):
|
||||
report.status_extended = f"SQS queue {queue.id} is not public because its policy only allows access from the same account."
|
||||
else:
|
||||
|
||||
@@ -16,23 +16,30 @@ class SQS(AWSService):
|
||||
super().__init__(__class__.__name__, audit_info)
|
||||
self.queues = []
|
||||
self.__threading_call__(self.__list_queues__)
|
||||
self.__get_queue_attributes__(self.regional_clients)
|
||||
self.__get_queue_attributes__()
|
||||
self.__list_queue_tags__()
|
||||
|
||||
def __list_queues__(self, regional_client):
|
||||
logger.info("SQS - describing queues...")
|
||||
try:
|
||||
list_queues_paginator = regional_client.get_paginator("list_queues")
|
||||
for page in list_queues_paginator.paginate():
|
||||
# The SQS API uses nonstandard pagination
|
||||
# you must specify a PageSize if there are more than 1000 queues
|
||||
for page in list_queues_paginator.paginate(
|
||||
PaginationConfig={"PageSize": 1000}
|
||||
):
|
||||
if "QueueUrls" in page:
|
||||
for queue in page["QueueUrls"]:
|
||||
arn = f"arn:{self.audited_partition}:sqs:{regional_client.region}:{self.audited_account}:{queue}"
|
||||
# the queue name is the last path segment of the url
|
||||
queue_name = queue.split("/")[-1]
|
||||
arn = f"arn:{self.audited_partition}:sqs:{regional_client.region}:{self.audited_account}:{queue_name}"
|
||||
if not self.audit_resources or (
|
||||
is_resource_filtered(arn, self.audit_resources)
|
||||
):
|
||||
self.queues.append(
|
||||
Queue(
|
||||
arn=arn,
|
||||
name=queue_name,
|
||||
id=queue,
|
||||
region=regional_client.region,
|
||||
)
|
||||
@@ -42,28 +49,46 @@ class SQS(AWSService):
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_queue_attributes__(self, regional_clients):
|
||||
def __get_queue_attributes__(self):
|
||||
try:
|
||||
logger.info("SQS - describing queue attributes...")
|
||||
for queue in self.queues:
|
||||
regional_client = regional_clients[queue.region]
|
||||
queue_attributes = regional_client.get_queue_attributes(
|
||||
QueueUrl=queue.id, AttributeNames=["All"]
|
||||
)
|
||||
if "Attributes" in queue_attributes:
|
||||
if "Policy" in queue_attributes["Attributes"]:
|
||||
queue.policy = loads(queue_attributes["Attributes"]["Policy"])
|
||||
if "KmsMasterKeyId" in queue_attributes["Attributes"]:
|
||||
queue.kms_key_id = queue_attributes["Attributes"][
|
||||
"KmsMasterKeyId"
|
||||
]
|
||||
if "SqsManagedSseEnabled" in queue_attributes["Attributes"]:
|
||||
if (
|
||||
queue_attributes["Attributes"]["SqsManagedSseEnabled"]
|
||||
== "true"
|
||||
):
|
||||
queue.kms_key_id = "SqsManagedSseEnabled"
|
||||
|
||||
try:
|
||||
regional_client = self.regional_clients[queue.region]
|
||||
queue_attributes = regional_client.get_queue_attributes(
|
||||
QueueUrl=queue.id, AttributeNames=["All"]
|
||||
)
|
||||
if "Attributes" in queue_attributes:
|
||||
if "Policy" in queue_attributes["Attributes"]:
|
||||
queue.policy = loads(
|
||||
queue_attributes["Attributes"]["Policy"]
|
||||
)
|
||||
if "KmsMasterKeyId" in queue_attributes["Attributes"]:
|
||||
queue.kms_key_id = queue_attributes["Attributes"][
|
||||
"KmsMasterKeyId"
|
||||
]
|
||||
if "SqsManagedSseEnabled" in queue_attributes["Attributes"]:
|
||||
if (
|
||||
queue_attributes["Attributes"]["SqsManagedSseEnabled"]
|
||||
== "true"
|
||||
):
|
||||
queue.kms_key_id = "SqsManagedSseEnabled"
|
||||
except ClientError as error:
|
||||
if (
|
||||
error.response["Error"]["Code"]
|
||||
== "AWS.SimpleQueueService.NonExistentQueue"
|
||||
):
|
||||
logger.warning(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -87,6 +112,14 @@ class SQS(AWSService):
|
||||
logger.warning(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
@@ -96,6 +129,7 @@ class SQS(AWSService):
|
||||
|
||||
class Queue(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
arn: str
|
||||
region: str
|
||||
policy: dict = None
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:aws:iam::AWS_ACCOUNT_NUMBER:root",
|
||||
"Severity": "low",
|
||||
"ResourceType": "",
|
||||
"ResourceType": "Other",
|
||||
"Description": "Check if a Premium support plan is subscribed.",
|
||||
"Risk": "Ensure that the appropriate support level is enabled for the necessary AWS accounts. For example, if an AWS account is being used to host production systems and environments, it is highly recommended that the minimum AWS Support Plan should be Business.",
|
||||
"RelatedUrl": "https://aws.amazon.com/premiumsupport/plans/",
|
||||
|
||||
@@ -34,9 +34,9 @@ class TrustedAdvisor(AWSService):
|
||||
def __describe_trusted_advisor_checks__(self):
|
||||
logger.info("TrustedAdvisor - Describing Checks...")
|
||||
try:
|
||||
for check in self.client.describe_trusted_advisor_checks(language="en")[
|
||||
"checks"
|
||||
]:
|
||||
for check in self.client.describe_trusted_advisor_checks(language="en").get(
|
||||
"checks", []
|
||||
):
|
||||
self.checks.append(
|
||||
Check(
|
||||
id=check["id"],
|
||||
|
||||
@@ -5,22 +5,23 @@ from prowler.providers.aws.services.vpc.vpc_client import vpc_client
|
||||
class vpc_different_regions(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
vpc_regions = set()
|
||||
for vpc in vpc_client.vpcs.values():
|
||||
if not vpc.default:
|
||||
vpc_regions.add(vpc.region)
|
||||
if len(vpc_client.vpcs) > 0:
|
||||
vpc_regions = set()
|
||||
for vpc in vpc_client.vpcs.values():
|
||||
if not vpc.default:
|
||||
vpc_regions.add(vpc.region)
|
||||
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
# This is a global check under the vpc service: region, resource_id and tags are not relevant here but we keep them for consistency
|
||||
report.region = vpc_client.region
|
||||
report.resource_id = vpc_client.audited_account
|
||||
report.resource_arn = vpc_client.audited_account_arn
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "VPCs found only in one region."
|
||||
if len(vpc_regions) > 1:
|
||||
report.status = "PASS"
|
||||
report.status_extended = "VPCs found in more than one region."
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = vpc_client.region
|
||||
report.resource_id = vpc_client.audited_account
|
||||
report.resource_arn = vpc_client.audited_account_arn
|
||||
|
||||
findings.append(report)
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "VPCs found only in one region."
|
||||
|
||||
if len(vpc_regions) > 1:
|
||||
report.status = "PASS"
|
||||
report.status_extended = "VPCs found in more than one region."
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -2,7 +2,7 @@ from re import compile
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.lib.policy_condition_parser.policy_condition_parser import (
|
||||
is_account_only_allowed_in_condition,
|
||||
is_condition_block_restrictive,
|
||||
)
|
||||
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
|
||||
|
||||
@@ -35,7 +35,7 @@ class vpc_endpoint_connections_trust_boundaries(Check):
|
||||
|
||||
if "Condition" in statement:
|
||||
for account_id in trusted_account_ids:
|
||||
if is_account_only_allowed_in_condition(
|
||||
if is_condition_block_restrictive(
|
||||
statement["Condition"], account_id
|
||||
):
|
||||
access_from_trusted_accounts = True
|
||||
@@ -70,7 +70,7 @@ class vpc_endpoint_connections_trust_boundaries(Check):
|
||||
access_from_trusted_accounts = False
|
||||
if "Condition" in statement:
|
||||
for account_id in trusted_account_ids:
|
||||
if is_account_only_allowed_in_condition(
|
||||
if is_condition_block_restrictive(
|
||||
statement["Condition"], account_id
|
||||
):
|
||||
access_from_trusted_accounts = True
|
||||
@@ -102,7 +102,7 @@ class vpc_endpoint_connections_trust_boundaries(Check):
|
||||
|
||||
if "Condition" in statement:
|
||||
for account_id in trusted_account_ids:
|
||||
if is_account_only_allowed_in_condition(
|
||||
if is_condition_block_restrictive(
|
||||
statement["Condition"], account_id
|
||||
):
|
||||
access_from_trusted_accounts = True
|
||||
|
||||
@@ -7,6 +7,7 @@ from msgraph.core import GraphClient
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.azure.lib.audit_info.models import Azure_Identity_Info
|
||||
from prowler.providers.azure.lib.regions.regions import get_regions_config
|
||||
|
||||
|
||||
class Azure_Provider:
|
||||
@@ -18,12 +19,14 @@ class Azure_Provider:
|
||||
managed_entity_auth: bool,
|
||||
subscription_ids: list,
|
||||
tenant_id: str,
|
||||
region: str,
|
||||
):
|
||||
logger.info("Instantiating Azure Provider ...")
|
||||
self.credentials = self.__set_credentials__(
|
||||
self.region_config = self.__get_region_config__(region)
|
||||
self.credentials = self.__get_credentials__(
|
||||
az_cli_auth, sp_env_auth, browser_auth, managed_entity_auth, tenant_id
|
||||
)
|
||||
self.identity = self.__set_identity_info__(
|
||||
self.identity = self.__get_identity_info__(
|
||||
self.credentials,
|
||||
az_cli_auth,
|
||||
sp_env_auth,
|
||||
@@ -32,7 +35,10 @@ class Azure_Provider:
|
||||
subscription_ids,
|
||||
)
|
||||
|
||||
def __set_credentials__(
|
||||
def __get_region_config__(self, region):
|
||||
return get_regions_config(region)
|
||||
|
||||
def __get_credentials__(
|
||||
self, az_cli_auth, sp_env_auth, browser_auth, managed_entity_auth, tenant_id
|
||||
):
|
||||
# Browser auth creds cannot be set with DefaultAzureCredentials()
|
||||
@@ -52,6 +58,8 @@ class Azure_Provider:
|
||||
exclude_shared_token_cache_credential=True,
|
||||
# Azure Auth using PowerShell is not supported
|
||||
exclude_powershell_credential=True,
|
||||
# set Authority of a Microsoft Entra endpoint
|
||||
authority=self.region_config["authority"],
|
||||
)
|
||||
except Exception as error:
|
||||
logger.critical("Failed to retrieve azure credentials")
|
||||
@@ -61,7 +69,6 @@ class Azure_Provider:
|
||||
sys.exit(1)
|
||||
else:
|
||||
try:
|
||||
print(tenant_id)
|
||||
credentials = InteractiveBrowserCredential(tenant_id=tenant_id)
|
||||
except Exception as error:
|
||||
logger.critical("Failed to retrieve azure credentials")
|
||||
@@ -83,7 +90,7 @@ class Azure_Provider:
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
def __set_identity_info__(
|
||||
def __get_identity_info__(
|
||||
self,
|
||||
credentials,
|
||||
az_cli_auth,
|
||||
@@ -153,7 +160,11 @@ class Azure_Provider:
|
||||
logger.info(
|
||||
"Trying to subscriptions and tenant ids to populate identity structure ..."
|
||||
)
|
||||
subscriptions_client = SubscriptionClient(credential=credentials)
|
||||
subscriptions_client = SubscriptionClient(
|
||||
credential=credentials,
|
||||
base_url=self.region_config["base_url"],
|
||||
credential_scopes=self.region_config["credential_scopes"],
|
||||
)
|
||||
if not subscription_ids:
|
||||
logger.info("Scanning all the Azure subscriptions...")
|
||||
for subscription in subscriptions_client.subscriptions.list():
|
||||
@@ -195,3 +206,6 @@ class Azure_Provider:
|
||||
|
||||
def get_identity(self):
|
||||
return self.identity
|
||||
|
||||
def get_region_config(self):
|
||||
return self.region_config
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
from argparse import ArgumentTypeError
|
||||
|
||||
|
||||
def init_parser(self):
|
||||
"""Init the Azure Provider CLI parser"""
|
||||
azure_parser = self.subparsers.add_parser(
|
||||
@@ -40,3 +43,27 @@ def init_parser(self):
|
||||
default=None,
|
||||
help="Azure Tenant ID to be used with --browser-auth option",
|
||||
)
|
||||
# Regions
|
||||
azure_regions_subparser = azure_parser.add_argument_group("Regions")
|
||||
azure_regions_subparser.add_argument(
|
||||
"--azure-region",
|
||||
nargs="?",
|
||||
default="AzureCloud",
|
||||
type=validate_azure_region,
|
||||
help="Azure region from `az cloud list --output table`, by default AzureCloud",
|
||||
)
|
||||
|
||||
|
||||
def validate_azure_region(region):
|
||||
"""validate_azure_region validates if the region passed as argument is valid"""
|
||||
regions_allowed = [
|
||||
"AzureChinaCloud",
|
||||
"AzureUSGovernment",
|
||||
"AzureGermanCloud",
|
||||
"AzureCloud",
|
||||
]
|
||||
if region not in regions_allowed:
|
||||
raise ArgumentTypeError(
|
||||
f"Region {region} not allowed, allowed regions are {' '.join(regions_allowed)}"
|
||||
)
|
||||
return region
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from prowler.providers.azure.lib.audit_info.models import (
|
||||
Azure_Audit_Info,
|
||||
Azure_Identity_Info,
|
||||
Azure_Region_Config,
|
||||
)
|
||||
|
||||
azure_audit_info = Azure_Audit_Info(
|
||||
@@ -9,4 +10,5 @@ azure_audit_info = Azure_Audit_Info(
|
||||
audit_resources=None,
|
||||
audit_metadata=None,
|
||||
audit_config=None,
|
||||
azure_region_config=Azure_Region_Config(),
|
||||
)
|
||||
|
||||
@@ -13,6 +13,13 @@ class Azure_Identity_Info(BaseModel):
|
||||
subscriptions: dict = {}
|
||||
|
||||
|
||||
class Azure_Region_Config(BaseModel):
|
||||
name: str = ""
|
||||
authority: str = None
|
||||
base_url: str = ""
|
||||
credential_scopes: list = []
|
||||
|
||||
|
||||
@dataclass
|
||||
class Azure_Audit_Info:
|
||||
credentials: DefaultAzureCredential
|
||||
@@ -20,12 +27,20 @@ class Azure_Audit_Info:
|
||||
audit_resources: Optional[Any]
|
||||
audit_metadata: Optional[Any]
|
||||
audit_config: dict
|
||||
azure_region_config: Azure_Region_Config
|
||||
|
||||
def __init__(
|
||||
self, credentials, identity, audit_metadata, audit_resources, audit_config
|
||||
self,
|
||||
credentials,
|
||||
identity,
|
||||
audit_metadata,
|
||||
audit_resources,
|
||||
audit_config,
|
||||
azure_region_config,
|
||||
):
|
||||
self.credentials = credentials
|
||||
self.identity = identity
|
||||
self.audit_metadata = audit_metadata
|
||||
self.audit_resources = audit_resources
|
||||
self.audit_config = audit_config
|
||||
self.azure_region_config = azure_region_config
|
||||
|
||||
0
prowler/providers/azure/lib/exception/__init__.py
Normal file
0
prowler/providers/azure/lib/exception/__init__.py
Normal file
11
prowler/providers/azure/lib/exception/exception.py
Normal file
11
prowler/providers/azure/lib/exception/exception.py
Normal file
@@ -0,0 +1,11 @@
|
||||
class AzureException(Exception):
|
||||
"""
|
||||
Exception raised when dealing with Azure Provider/Azure audit info instance
|
||||
|
||||
Attributes:
|
||||
message -- message to be displayed
|
||||
"""
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
0
prowler/providers/azure/lib/regions/__init__.py
Normal file
0
prowler/providers/azure/lib/regions/__init__.py
Normal file
38
prowler/providers/azure/lib/regions/regions.py
Normal file
38
prowler/providers/azure/lib/regions/regions.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from azure.identity import AzureAuthorityHosts
|
||||
from msrestazure.azure_cloud import (
|
||||
AZURE_CHINA_CLOUD,
|
||||
AZURE_GERMAN_CLOUD,
|
||||
AZURE_US_GOV_CLOUD,
|
||||
)
|
||||
|
||||
|
||||
def get_regions_config(region):
|
||||
allowed_regions = {
|
||||
"AzureCloud": {
|
||||
"authority": None,
|
||||
"base_url": "https://management.azure.com",
|
||||
"credential_scopes": ["https://management.azure.com/.default"],
|
||||
},
|
||||
"AzureChinaCloud": {
|
||||
"authority": AzureAuthorityHosts.AZURE_CHINA,
|
||||
"base_url": AZURE_CHINA_CLOUD.endpoints.resource_manager,
|
||||
"credential_scopes": [
|
||||
AZURE_CHINA_CLOUD.endpoints.resource_manager + "/.default"
|
||||
],
|
||||
},
|
||||
"AzureUSGovernment": {
|
||||
"authority": AzureAuthorityHosts.AZURE_GOVERNMENT,
|
||||
"base_url": AZURE_US_GOV_CLOUD.endpoints.resource_manager,
|
||||
"credential_scopes": [
|
||||
AZURE_US_GOV_CLOUD.endpoints.resource_manager + "/.default"
|
||||
],
|
||||
},
|
||||
"AzureGermanCloud": {
|
||||
"authority": AzureAuthorityHosts.AZURE_GERMANY,
|
||||
"base_url": AZURE_GERMAN_CLOUD.endpoints.resource_manager,
|
||||
"credential_scopes": [
|
||||
AZURE_GERMAN_CLOUD.endpoints.resource_manager + "/.default"
|
||||
],
|
||||
},
|
||||
}
|
||||
return allowed_regions[region]
|
||||
@@ -9,17 +9,27 @@ class AzureService:
|
||||
audit_info: Azure_Audit_Info,
|
||||
):
|
||||
self.clients = self.__set_clients__(
|
||||
audit_info.identity.subscriptions, audit_info.credentials, service
|
||||
audit_info.identity.subscriptions,
|
||||
audit_info.credentials,
|
||||
service,
|
||||
audit_info.azure_region_config,
|
||||
)
|
||||
|
||||
self.subscriptions = audit_info.identity.subscriptions
|
||||
|
||||
def __set_clients__(self, subscriptions, credentials, service):
|
||||
def __set_clients__(self, subscriptions, credentials, service, region_config):
|
||||
clients = {}
|
||||
try:
|
||||
for display_name, id in subscriptions.items():
|
||||
clients.update(
|
||||
{display_name: service(credential=credentials, subscription_id=id)}
|
||||
{
|
||||
display_name: service(
|
||||
credential=credentials,
|
||||
subscription_id=id,
|
||||
base_url=region_config.base_url,
|
||||
credential_scopes=region_config.credential_scopes,
|
||||
)
|
||||
}
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
|
||||
@@ -8,6 +8,7 @@ from prowler.lib.logger import logger
|
||||
from prowler.providers.aws.aws_provider import (
|
||||
AWS_Provider,
|
||||
assume_role,
|
||||
get_aws_enabled_regions,
|
||||
get_checks_from_input_arn,
|
||||
get_regions_from_audit_resources,
|
||||
)
|
||||
@@ -26,7 +27,11 @@ from prowler.providers.aws.lib.resource_api_tagging.resource_api_tagging import
|
||||
)
|
||||
from prowler.providers.azure.azure_provider import Azure_Provider
|
||||
from prowler.providers.azure.lib.audit_info.audit_info import azure_audit_info
|
||||
from prowler.providers.azure.lib.audit_info.models import Azure_Audit_Info
|
||||
from prowler.providers.azure.lib.audit_info.models import (
|
||||
Azure_Audit_Info,
|
||||
Azure_Region_Config,
|
||||
)
|
||||
from prowler.providers.azure.lib.exception.exception import AzureException
|
||||
from prowler.providers.gcp.gcp_provider import GCP_Provider
|
||||
from prowler.providers.gcp.lib.audit_info.audit_info import gcp_audit_info
|
||||
from prowler.providers.gcp.lib.audit_info.models import GCP_Audit_Info
|
||||
@@ -58,12 +63,12 @@ GCP Account: {Fore.YELLOW}[{profile}]{Style.RESET_ALL} GCP Project IDs: {Fore.Y
|
||||
def print_azure_credentials(self, audit_info: Azure_Audit_Info):
|
||||
printed_subscriptions = []
|
||||
for key, value in audit_info.identity.subscriptions.items():
|
||||
intermediate = key + " : " + value
|
||||
intermediate = f"{key} : {value}"
|
||||
printed_subscriptions.append(intermediate)
|
||||
report = f"""
|
||||
This report is being generated using the identity below:
|
||||
|
||||
Azure Tenant IDs: {Fore.YELLOW}[{" ".join(audit_info.identity.tenant_ids)}]{Style.RESET_ALL} Azure Tenant Domain: {Fore.YELLOW}[{audit_info.identity.domain}]{Style.RESET_ALL}
|
||||
Azure Tenant IDs: {Fore.YELLOW}[{" ".join(audit_info.identity.tenant_ids)}]{Style.RESET_ALL} Azure Tenant Domain: {Fore.YELLOW}[{audit_info.identity.domain}]{Style.RESET_ALL} Azure Region: {Fore.YELLOW}[{audit_info.azure_region_config.name}]{Style.RESET_ALL}
|
||||
Azure Subscriptions: {Fore.YELLOW}{printed_subscriptions}{Style.RESET_ALL}
|
||||
Azure Identity Type: {Fore.YELLOW}[{audit_info.identity.identity_type}]{Style.RESET_ALL} Azure Identity ID: {Fore.YELLOW}[{audit_info.identity.identity_id}]{Style.RESET_ALL}
|
||||
"""
|
||||
@@ -80,6 +85,7 @@ Azure Identity Type: {Fore.YELLOW}[{audit_info.identity.identity_type}]{Style.RE
|
||||
current_audit_info.assumed_role_info.role_arn = input_role
|
||||
input_session_duration = arguments.get("session_duration")
|
||||
input_external_id = arguments.get("external_id")
|
||||
input_role_session_name = arguments.get("role_session_name")
|
||||
|
||||
# STS Endpoint Region
|
||||
sts_endpoint_region = arguments.get("sts_endpoint_region")
|
||||
@@ -148,6 +154,9 @@ Azure Identity Type: {Fore.YELLOW}[{audit_info.identity.identity_type}]{Style.RE
|
||||
)
|
||||
current_audit_info.assumed_role_info.external_id = input_external_id
|
||||
current_audit_info.assumed_role_info.mfa_enabled = input_mfa
|
||||
current_audit_info.assumed_role_info.role_session_name = (
|
||||
input_role_session_name
|
||||
)
|
||||
|
||||
# Check if role arn is valid
|
||||
try:
|
||||
@@ -253,6 +262,9 @@ Azure Identity Type: {Fore.YELLOW}[{audit_info.identity.identity_type}]{Style.RE
|
||||
if arguments.get("resource_arn"):
|
||||
current_audit_info.audit_resources = arguments.get("resource_arn")
|
||||
|
||||
# Get Enabled Regions
|
||||
current_audit_info.enabled_regions = get_aws_enabled_regions(current_audit_info)
|
||||
|
||||
return current_audit_info
|
||||
|
||||
def set_aws_execution_parameters(self, provider, audit_info) -> list[str]:
|
||||
@@ -282,17 +294,21 @@ Azure Identity Type: {Fore.YELLOW}[{audit_info.identity.identity_type}]{Style.RE
|
||||
browser_auth = arguments.get("browser_auth")
|
||||
managed_entity_auth = arguments.get("managed_entity_auth")
|
||||
tenant_id = arguments.get("tenant_id")
|
||||
|
||||
logger.info("Checking if region is different than default one")
|
||||
region = arguments.get("azure_region")
|
||||
|
||||
if (
|
||||
not az_cli_auth
|
||||
and not sp_env_auth
|
||||
and not browser_auth
|
||||
and not managed_entity_auth
|
||||
):
|
||||
raise Exception(
|
||||
raise AzureException(
|
||||
"Azure provider requires at least one authentication method set: [--az-cli-auth | --sp-env-auth | --browser-auth | --managed-identity-auth]"
|
||||
)
|
||||
if (not browser_auth and tenant_id) or (browser_auth and not tenant_id):
|
||||
raise Exception(
|
||||
raise AzureException(
|
||||
"Azure Tenant ID (--tenant-id) is required only for browser authentication mode"
|
||||
)
|
||||
|
||||
@@ -303,9 +319,17 @@ Azure Identity Type: {Fore.YELLOW}[{audit_info.identity.identity_type}]{Style.RE
|
||||
managed_entity_auth,
|
||||
subscription_ids,
|
||||
tenant_id,
|
||||
region,
|
||||
)
|
||||
azure_audit_info.credentials = azure_provider.get_credentials()
|
||||
azure_audit_info.identity = azure_provider.get_identity()
|
||||
region_config = azure_provider.get_region_config()
|
||||
azure_audit_info.azure_region_config = Azure_Region_Config(
|
||||
name=region,
|
||||
authority=region_config["authority"],
|
||||
base_url=region_config["base_url"],
|
||||
credential_scopes=region_config["credential_scopes"],
|
||||
)
|
||||
|
||||
if not arguments.get("only_logs"):
|
||||
self.print_azure_credentials(azure_audit_info)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user