Compare commits
79 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fad5a1937c | ||
|
|
635c257502 | ||
|
|
58a38c08d7 | ||
|
|
8fbee7737b | ||
|
|
e84f5f184e | ||
|
|
0bd26b19d7 | ||
|
|
64f82d5d51 | ||
|
|
f63ff994ce | ||
|
|
a10ee43271 | ||
|
|
54ed29e08d | ||
|
|
cc097e7a3f | ||
|
|
5de92ada43 | ||
|
|
0c546211cf | ||
|
|
4dc5a3a67c | ||
|
|
c51b226ceb | ||
|
|
0a5ca6cf74 | ||
|
|
96957219e4 | ||
|
|
32b7620db3 | ||
|
|
347f65e089 | ||
|
|
16628a427e | ||
|
|
ed16034a25 | ||
|
|
0c5f144e41 | ||
|
|
acc7d6e7dc | ||
|
|
84b4139052 | ||
|
|
9943643958 | ||
|
|
9ceaefb663 | ||
|
|
ec03ea5bc1 | ||
|
|
5855633c1f | ||
|
|
a53bc2bc2e | ||
|
|
88445820ed | ||
|
|
044ed3ae98 | ||
|
|
6f48012234 | ||
|
|
d344318dd4 | ||
|
|
6273dd3d83 | ||
|
|
0f3f3cbffd | ||
|
|
3244123b21 | ||
|
|
cba2ee3622 | ||
|
|
25ed925df5 | ||
|
|
8c5bd60bab | ||
|
|
c5510556a7 | ||
|
|
bbcfca84ef | ||
|
|
1260e94c2a | ||
|
|
8a02574303 | ||
|
|
c930f08348 | ||
|
|
5204acb5d0 | ||
|
|
784aaa98c9 | ||
|
|
745e2494bc | ||
|
|
c00792519d | ||
|
|
142fe5a12c | ||
|
|
5b127f232e | ||
|
|
c22bf01003 | ||
|
|
05e4911d6f | ||
|
|
9b551ef0ba | ||
|
|
56a8bb2349 | ||
|
|
8503c6a64d | ||
|
|
820f18da4d | ||
|
|
51a2432ebf | ||
|
|
6639534e97 | ||
|
|
0621577c7d | ||
|
|
26a507e3db | ||
|
|
244b540fe0 | ||
|
|
030ca4c173 | ||
|
|
88a2810f29 | ||
|
|
9164ee363a | ||
|
|
4cd47fdcc5 | ||
|
|
708852a3cb | ||
|
|
4a93bdf3ea | ||
|
|
22e7d2a811 | ||
|
|
93eca1dff2 | ||
|
|
9afe7408cd | ||
|
|
5dc2347a25 | ||
|
|
e3a0124b10 | ||
|
|
16af89c281 | ||
|
|
621e4258c8 | ||
|
|
ac6272e739 | ||
|
|
6e84f517a9 | ||
|
|
fdbdb3ad86 | ||
|
|
7adcf5ca46 | ||
|
|
fe6716cf76 |
1
.github/workflows/pypi-release.yml
vendored
@@ -35,6 +35,7 @@ jobs:
|
||||
git commit -m "chore(release): ${{ env.RELEASE_TAG }}" --no-verify
|
||||
git tag -fa ${{ env.RELEASE_TAG }} -m "chore(release): ${{ env.RELEASE_TAG }}"
|
||||
git push -f origin ${{ env.RELEASE_TAG }}
|
||||
git checkout -B release-${{ env.RELEASE_TAG }}
|
||||
poetry build
|
||||
- name: Publish prowler package to PyPI
|
||||
run: |
|
||||
|
||||
@@ -61,6 +61,7 @@ repos:
|
||||
hooks:
|
||||
- id: poetry-check
|
||||
- id: poetry-lock
|
||||
args: ["--no-update"]
|
||||
|
||||
- repo: https://github.com/hadolint/hadolint
|
||||
rev: v2.12.1-beta
|
||||
@@ -75,6 +76,15 @@ repos:
|
||||
entry: bash -c 'pylint --disable=W,C,R,E -j 0 -rn -sn prowler/'
|
||||
language: system
|
||||
|
||||
- id: trufflehog
|
||||
name: TruffleHog
|
||||
description: Detect secrets in your data.
|
||||
# entry: bash -c 'trufflehog git file://. --only-verified --fail'
|
||||
# For running trufflehog in docker, use the following entry instead:
|
||||
entry: bash -c 'docker run -v "$(pwd):/workdir" -i --rm trufflesecurity/trufflehog:latest git file:///workdir --only-verified --fail'
|
||||
language: system
|
||||
stages: ["commit", "push"]
|
||||
|
||||
- id: pytest-check
|
||||
name: pytest-check
|
||||
entry: bash -c 'pytest tests -n auto'
|
||||
|
||||
59
README.md
@@ -11,11 +11,10 @@
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://join.slack.com/t/prowler-workspace/shared_invite/zt-1hix76xsl-2uq222JIXrC7Q8It~9ZNog"><img alt="Slack Shield" src="https://img.shields.io/badge/slack-prowler-brightgreen.svg?logo=slack"></a>
|
||||
<a href="https://pypi.org/project/prowler-cloud/"><img alt="Python Version" src="https://img.shields.io/pypi/v/prowler.svg"></a>
|
||||
<a href="https://pypi.python.org/pypi/prowler-cloud/"><img alt="Python Version" src="https://img.shields.io/pypi/pyversions/prowler.svg"></a>
|
||||
<a href="https://pypi.org/project/prowler/"><img alt="Python Version" src="https://img.shields.io/pypi/v/prowler.svg"></a>
|
||||
<a href="https://pypi.python.org/pypi/prowler/"><img alt="Python Version" src="https://img.shields.io/pypi/pyversions/prowler.svg"></a>
|
||||
<a href="https://pypistats.org/packages/prowler"><img alt="PyPI Prowler Downloads" src="https://img.shields.io/pypi/dw/prowler.svg?label=prowler%20downloads"></a>
|
||||
<a href="https://pypistats.org/packages/prowler-cloud"><img alt="PyPI Prowler-Cloud Downloads" src="https://img.shields.io/pypi/dw/prowler-cloud.svg?label=prowler-cloud%20downloads"></a>
|
||||
<a href="https://formulae.brew.sh/formula/prowler#default"><img alt="Brew Prowler Downloads" src="https://img.shields.io/homebrew/installs/dm/prowler?label=brew%20downloads"></a>
|
||||
<a href="https://hub.docker.com/r/toniblyx/prowler"><img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/toniblyx/prowler"></a>
|
||||
<a href="https://hub.docker.com/r/toniblyx/prowler"><img alt="Docker" src="https://img.shields.io/docker/cloud/build/toniblyx/prowler"></a>
|
||||
<a href="https://hub.docker.com/r/toniblyx/prowler"><img alt="Docker" src="https://img.shields.io/docker/image-size/toniblyx/prowler"></a>
|
||||
@@ -85,7 +84,7 @@ python prowler.py -v
|
||||
|
||||
You can run Prowler from your workstation, an EC2 instance, Fargate or any other container, Codebuild, CloudShell and Cloud9.
|
||||
|
||||

|
||||

|
||||
|
||||
# 📝 Requirements
|
||||
|
||||
@@ -116,22 +115,6 @@ Those credentials must be associated to a user or role with proper permissions t
|
||||
|
||||
> If you want Prowler to send findings to [AWS Security Hub](https://aws.amazon.com/security-hub), make sure you also attach the custom policy [prowler-security-hub.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-security-hub.json).
|
||||
|
||||
## Google Cloud Platform
|
||||
|
||||
Prowler will follow the same credentials search as [Google authentication libraries](https://cloud.google.com/docs/authentication/application-default-credentials#search_order):
|
||||
|
||||
1. [GOOGLE_APPLICATION_CREDENTIALS environment variable](https://cloud.google.com/docs/authentication/application-default-credentials#GAC)
|
||||
2. [User credentials set up by using the Google Cloud CLI](https://cloud.google.com/docs/authentication/application-default-credentials#personal)
|
||||
3. [The attached service account, returned by the metadata server](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa)
|
||||
|
||||
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the following roles to the member associated with the credentials:
|
||||
|
||||
- Viewer
|
||||
- Security Reviewer
|
||||
- Stackdriver Account Viewer
|
||||
|
||||
> `prowler` will scan the project associated with the credentials.
|
||||
|
||||
## Azure
|
||||
|
||||
Prowler for Azure supports the following authentication types:
|
||||
@@ -180,6 +163,22 @@ Regarding the subscription scope, Prowler by default scans all the subscriptions
|
||||
- `Reader`
|
||||
|
||||
|
||||
## Google Cloud Platform
|
||||
|
||||
Prowler will follow the same credentials search as [Google authentication libraries](https://cloud.google.com/docs/authentication/application-default-credentials#search_order):
|
||||
|
||||
1. [GOOGLE_APPLICATION_CREDENTIALS environment variable](https://cloud.google.com/docs/authentication/application-default-credentials#GAC)
|
||||
2. [User credentials set up by using the Google Cloud CLI](https://cloud.google.com/docs/authentication/application-default-credentials#personal)
|
||||
3. [The attached service account, returned by the metadata server](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa)
|
||||
|
||||
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the following roles to the member associated with the credentials:
|
||||
|
||||
- Viewer
|
||||
- Security Reviewer
|
||||
- Stackdriver Account Viewer
|
||||
|
||||
> `prowler` will scan the project associated with the credentials.
|
||||
|
||||
# 💻 Basic Usage
|
||||
|
||||
To run prowler, you will need to specify the provider (e.g aws or azure):
|
||||
@@ -245,14 +244,6 @@ prowler aws --profile custom-profile -f us-east-1 eu-south-2
|
||||
```
|
||||
> By default, `prowler` will scan all AWS regions.
|
||||
|
||||
## Google Cloud Platform
|
||||
|
||||
Optionally, you can provide the location of an application credential JSON file with the following argument:
|
||||
|
||||
```console
|
||||
prowler gcp --credentials-file path
|
||||
```
|
||||
|
||||
## Azure
|
||||
|
||||
With Azure you need to specify which auth method is going to be used:
|
||||
@@ -262,12 +253,14 @@ prowler azure [--sp-env-auth, --az-cli-auth, --browser-auth, --managed-identity-
|
||||
```
|
||||
> By default, `prowler` will scan all Azure subscriptions.
|
||||
|
||||
# 🎉 New Features
|
||||
## Google Cloud Platform
|
||||
|
||||
Optionally, you can provide the location of an application credential JSON file with the following argument:
|
||||
|
||||
```console
|
||||
prowler gcp --credentials-file path
|
||||
```
|
||||
|
||||
- Python: we got rid of all bash and it is now all in Python.
|
||||
- Faster: huge performance improvements (same account from 2.5 hours to 4 minutes).
|
||||
- Developers and community: we have made it easier to contribute with new checks and new compliance frameworks. We also included unit tests.
|
||||
- Multi-cloud: in addition to AWS, we have added Azure, we plan to include GCP and OCI soon, let us know if you want to contribute!
|
||||
|
||||
# 📃 License
|
||||
|
||||
|
||||
@@ -30,24 +30,6 @@ Those credentials must be associated to a user or role with proper permissions t
|
||||
|
||||
> If you want Prowler to send findings to [AWS Security Hub](https://aws.amazon.com/security-hub), make sure you also attach the custom policy [prowler-security-hub.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-security-hub.json).
|
||||
|
||||
## Google Cloud
|
||||
|
||||
### GCP Authentication
|
||||
|
||||
Prowler will follow the same credentials search as [Google authentication libraries](https://cloud.google.com/docs/authentication/application-default-credentials#search_order):
|
||||
|
||||
1. [GOOGLE_APPLICATION_CREDENTIALS environment variable](https://cloud.google.com/docs/authentication/application-default-credentials#GAC)
|
||||
2. [User credentials set up by using the Google Cloud CLI](https://cloud.google.com/docs/authentication/application-default-credentials#personal)
|
||||
3. [The attached service account, returned by the metadata server](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa)
|
||||
|
||||
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the following roles to the member associated with the credentials:
|
||||
|
||||
- Viewer
|
||||
- Security Reviewer
|
||||
- Stackdriver Account Viewer
|
||||
|
||||
> `prowler` will scan the project associated with the credentials.
|
||||
|
||||
## Azure
|
||||
|
||||
Prowler for azure supports the following authentication types:
|
||||
@@ -97,3 +79,21 @@ Regarding the subscription scope, Prowler by default scans all the subscriptions
|
||||
|
||||
- `Security Reader`
|
||||
- `Reader`
|
||||
|
||||
## Google Cloud
|
||||
|
||||
### GCP Authentication
|
||||
|
||||
Prowler will follow the same credentials search as [Google authentication libraries](https://cloud.google.com/docs/authentication/application-default-credentials#search_order):
|
||||
|
||||
1. [GOOGLE_APPLICATION_CREDENTIALS environment variable](https://cloud.google.com/docs/authentication/application-default-credentials#GAC)
|
||||
2. [User credentials set up by using the Google Cloud CLI](https://cloud.google.com/docs/authentication/application-default-credentials#personal)
|
||||
3. [The attached service account, returned by the metadata server](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa)
|
||||
|
||||
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the following roles to the member associated with the credentials:
|
||||
|
||||
- Viewer
|
||||
- Security Reviewer
|
||||
- Stackdriver Account Viewer
|
||||
|
||||
> `prowler` will scan the project associated with the credentials.
|
||||
|
||||
|
Before Width: | Height: | Size: 258 KiB After Width: | Height: | Size: 283 KiB |
@@ -256,25 +256,6 @@ prowler aws --profile custom-profile -f us-east-1 eu-south-2
|
||||
|
||||
See more details about AWS Authentication in [Requirements](getting-started/requirements.md)
|
||||
|
||||
### Google Cloud
|
||||
|
||||
Prowler will use by default your User Account credentials, you can configure it using:
|
||||
|
||||
- `gcloud init` to use a new account
|
||||
- `gcloud config set account <account>` to use an existing account
|
||||
|
||||
Then, obtain your access credentials using: `gcloud auth application-default login`
|
||||
|
||||
Otherwise, you can generate and download Service Account keys in JSON format (refer to https://cloud.google.com/iam/docs/creating-managing-service-account-keys) and provide the location of the file with the following argument:
|
||||
|
||||
```console
|
||||
prowler gcp --credentials-file path
|
||||
```
|
||||
|
||||
> `prowler` will scan the GCP project associated with the credentials.
|
||||
|
||||
See more details about GCP Authentication in [Requirements](getting-started/requirements.md)
|
||||
|
||||
### Azure
|
||||
|
||||
With Azure you need to specify which auth method is going to be used:
|
||||
@@ -299,3 +280,22 @@ Prowler by default scans all the subscriptions that is allowed to scan, if you w
|
||||
```console
|
||||
prowler azure --az-cli-auth --subscription-ids <subscription ID 1> <subscription ID 2> ... <subscription ID N>
|
||||
```
|
||||
|
||||
### Google Cloud
|
||||
|
||||
Prowler will use by default your User Account credentials, you can configure it using:
|
||||
|
||||
- `gcloud init` to use a new account
|
||||
- `gcloud config set account <account>` to use an existing account
|
||||
|
||||
Then, obtain your access credentials using: `gcloud auth application-default login`
|
||||
|
||||
Otherwise, you can generate and download Service Account keys in JSON format (refer to https://cloud.google.com/iam/docs/creating-managing-service-account-keys) and provide the location of the file with the following argument:
|
||||
|
||||
```console
|
||||
prowler gcp --credentials-file path
|
||||
```
|
||||
|
||||
> `prowler` will scan the GCP project associated with the credentials.
|
||||
|
||||
See more details about GCP Authentication in [Requirements](getting-started/requirements.md)
|
||||
|
||||
@@ -7,9 +7,10 @@ You can use `-w`/`--allowlist-file` with the path of your allowlist yaml file, b
|
||||
|
||||
## Allowlist Yaml File Syntax
|
||||
|
||||
### Account, Check and/or Region can be * to apply for all the cases
|
||||
### Resources is a list that can have either Regex or Keywords
|
||||
### Tags is an optional list containing tuples of 'key=value'
|
||||
### Account, Check and/or Region can be * to apply for all the cases.
|
||||
### Resources and tags are lists that can have either Regex or Keywords.
|
||||
### Tags is an optional list that matches on tuples of 'key=value' and are "ANDed" together.
|
||||
### Use an alternation Regex to match one of multiple tags with "ORed" logic.
|
||||
########################### ALLOWLIST EXAMPLE ###########################
|
||||
Allowlist:
|
||||
Accounts:
|
||||
@@ -21,14 +22,19 @@ You can use `-w`/`--allowlist-file` with the path of your allowlist yaml file, b
|
||||
Resources:
|
||||
- "user-1" # Will ignore user-1 in check iam_user_hardware_mfa_enabled
|
||||
- "user-2" # Will ignore user-2 in check iam_user_hardware_mfa_enabled
|
||||
"ec2_*":
|
||||
Regions:
|
||||
- "*"
|
||||
Resources:
|
||||
- "*" # Will ignore every EC2 check in every account and region
|
||||
"*":
|
||||
Regions:
|
||||
- "*"
|
||||
Resources:
|
||||
- "test" # Will ignore every resource containing the string "test" and the tags 'test=test' and 'project=test' in account 123456789012 and every region
|
||||
- "test"
|
||||
Tags:
|
||||
- "test=test" # Will ignore every resource containing the string "test" and the tags 'test=test' and 'project=test' in account 123456789012 and every region
|
||||
- "project=test"
|
||||
- "test=test" # Will ignore every resource containing the string "test" and the tags 'test=test' and
|
||||
- "project=test|project=stage" # either of ('project=test' OR project=stage) in account 123456789012 and every region
|
||||
|
||||
"*":
|
||||
Checks:
|
||||
|
||||
81
docs/tutorials/aws/regions-and-partitions.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# AWS Regions and Partitions
|
||||
|
||||
By default Prowler is able to scan the following AWS partitions:
|
||||
|
||||
- Commercial: `aws`
|
||||
- China: `aws-cn`
|
||||
- GovCloud (US): `aws-us-gov`
|
||||
|
||||
> To check the available regions for each partition and service please refer to the following document [aws_regions_by_service.json](https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/aws/aws_regions_by_service.json)
|
||||
|
||||
It is important to take into consideration that to scan the China (`aws-cn`) or GovCloud (`aws-us-gov`) partitions it is either required to have a valid region for that partition in your AWS credentials or to specify the regions you want to audit for that partition using the `-f/--region` flag.
|
||||
> Please, refer to https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#configuring-credentials for more information about the AWS credentials configuration.
|
||||
|
||||
You can get more information about the available partitions and regions in the following [Botocore](https://github.com/boto/botocore) [file](https://github.com/boto/botocore/blob/22a19ea7c4c2c4dd7df4ab8c32733cba0c7597a4/botocore/data/partitions.json).
|
||||
## AWS China
|
||||
|
||||
To scan your AWS account in the China partition (`aws-cn`):
|
||||
|
||||
- Using the `-f/--region` flag:
|
||||
```
|
||||
prowler aws --region cn-north-1 cn-northwest-1
|
||||
```
|
||||
- Using the region configured in your AWS profile at `~/.aws/credentials` or `~/.aws/config`:
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id = XXXXXXXXXXXXXXXXXXX
|
||||
aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
|
||||
region = cn-north-1
|
||||
```
|
||||
> With this option all the partition regions will be scanned without the need of use the `-f/--region` flag
|
||||
|
||||
|
||||
## AWS GovCloud (US)
|
||||
|
||||
To scan your AWS account in the GovCloud (US) partition (`aws-us-gov`):
|
||||
|
||||
- Using the `-f/--region` flag:
|
||||
```
|
||||
prowler aws --region us-gov-east-1 us-gov-west-1
|
||||
```
|
||||
- Using the region configured in your AWS profile at `~/.aws/credentials` or `~/.aws/config`:
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id = XXXXXXXXXXXXXXXXXXX
|
||||
aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
|
||||
region = us-gov-east-1
|
||||
```
|
||||
> With this option all the partition regions will be scanned without the need of use the `-f/--region` flag
|
||||
|
||||
|
||||
## AWS ISO (US & Europe)
|
||||
|
||||
For the AWS ISO partitions, which are known as "secret partitions" and are air-gapped from the Internet, there is no builtin way to scan it. If you want to audit an AWS account in one of the AWS ISO partitions you should manually update the [aws_regions_by_service.json](https://github.com/prowler-cloud/prowler/blob/master/prowler/providers/aws/aws_regions_by_service.json) and include the partition, region and services, e.g.:
|
||||
```json
|
||||
"iam": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"eu-west-1",
|
||||
"us-east-1",
|
||||
],
|
||||
"aws-cn": [
|
||||
"cn-north-1",
|
||||
"cn-northwest-1"
|
||||
],
|
||||
"aws-us-gov": [
|
||||
"us-gov-east-1",
|
||||
"us-gov-west-1"
|
||||
],
|
||||
"aws-iso": [
|
||||
"aws-iso-global",
|
||||
"us-iso-east-1",
|
||||
"us-iso-west-1"
|
||||
],
|
||||
"aws-iso-b": [
|
||||
"aws-iso-b-global",
|
||||
"us-isob-east-1"
|
||||
],
|
||||
"aws-iso-e": [],
|
||||
}
|
||||
},
|
||||
```
|
||||
@@ -29,14 +29,34 @@ prowler -S -f eu-west-1
|
||||
|
||||
> **Note 1**: It is recommended to send only fails to Security Hub and that is possible adding `-q` to the command.
|
||||
|
||||
> **Note 2**: Since Prowler perform checks to all regions by defauls you may need to filter by region when runing Security Hub integration, as shown in the example above. Remember to enable Security Hub in the region or regions you need by calling `aws securityhub enable-security-hub --region <region>` and run Prowler with the option `-f <region>` (if no region is used it will try to push findings in all regions hubs).
|
||||
> **Note 2**: Since Prowler perform checks to all regions by default you may need to filter by region when runing Security Hub integration, as shown in the example above. Remember to enable Security Hub in the region or regions you need by calling `aws securityhub enable-security-hub --region <region>` and run Prowler with the option `-f <region>` (if no region is used it will try to push findings in all regions hubs). Prowler will send findings to the Security Hub on the region where the scanned resource is located.
|
||||
|
||||
> **Note 3** to have updated findings in Security Hub you have to run Prowler periodically. Once a day or every certain amount of hours.
|
||||
> **Note 3**: To have updated findings in Security Hub you have to run Prowler periodically. Once a day or every certain amount of hours.
|
||||
|
||||
Once you run findings for first time you will be able to see Prowler findings in Findings section:
|
||||
|
||||

|
||||
|
||||
## Send findings to Security Hub assuming an IAM Role
|
||||
|
||||
When you are auditing a multi-account AWS environment, you can send findings to a Security Hub of another account by assuming an IAM role from that account using the `-R` flag in the Prowler command:
|
||||
|
||||
```sh
|
||||
prowler -S -R arn:aws:iam::123456789012:role/ProwlerExecRole
|
||||
```
|
||||
|
||||
> Remember that the used role needs to have permissions to send findings to Security Hub. To get more information about the permissions required, please refer to the following IAM policy [prowler-security-hub.json](https://github.com/prowler-cloud/prowler/blob/master/permissions/prowler-security-hub.json)
|
||||
|
||||
|
||||
## Send only failed findings to Security Hub
|
||||
|
||||
When using Security Hub it is recommended to send only the failed findings generated. To follow that recommendation you could add the `-q` flag to the Prowler command:
|
||||
|
||||
```sh
|
||||
prowler -S -q
|
||||
```
|
||||
|
||||
|
||||
## Skip sending updates of findings to Security Hub
|
||||
|
||||
By default, Prowler archives all its findings in Security Hub that have not appeared in the last scan.
|
||||
|
||||
29
docs/tutorials/gcp/authentication.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# GCP authentication
|
||||
|
||||
Prowler will use by default your User Account credentials, you can configure it using:
|
||||
|
||||
- `gcloud init` to use a new account
|
||||
- `gcloud config set account <account>` to use an existing account
|
||||
|
||||
Then, obtain your access credentials using: `gcloud auth application-default login`
|
||||
|
||||
Otherwise, you can generate and download Service Account keys in JSON format (refer to https://cloud.google.com/iam/docs/creating-managing-service-account-keys) and provide the location of the file with the following argument:
|
||||
|
||||
```console
|
||||
prowler gcp --credentials-file path
|
||||
```
|
||||
|
||||
> `prowler` will scan the GCP project associated with the credentials.
|
||||
|
||||
|
||||
Prowler will follow the same credentials search as [Google authentication libraries](https://cloud.google.com/docs/authentication/application-default-credentials#search_order):
|
||||
|
||||
1. [GOOGLE_APPLICATION_CREDENTIALS environment variable](https://cloud.google.com/docs/authentication/application-default-credentials#GAC)
|
||||
2. [User credentials set up by using the Google Cloud CLI](https://cloud.google.com/docs/authentication/application-default-credentials#personal)
|
||||
3. [The attached service account, returned by the metadata server](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa)
|
||||
|
||||
Those credentials must be associated to a user or service account with proper permissions to do all checks. To make sure, add the following roles to the member associated with the credentials:
|
||||
|
||||
- Viewer
|
||||
- Security Reviewer
|
||||
- Stackdriver Account Viewer
|
||||
BIN
docs/tutorials/img/create-slack-app.png
Normal file
|
After Width: | Height: | Size: 61 KiB |
BIN
docs/tutorials/img/install-in-slack-workspace.png
Normal file
|
After Width: | Height: | Size: 67 KiB |
BIN
docs/tutorials/img/integrate-slack-app.png
Normal file
|
After Width: | Height: | Size: 200 KiB |
BIN
docs/tutorials/img/slack-app-token.png
Normal file
|
After Width: | Height: | Size: 456 KiB |
BIN
docs/tutorials/img/slack-prowler-message.png
Normal file
|
After Width: | Height: | Size: 69 KiB |
36
docs/tutorials/integrations.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Integrations
|
||||
|
||||
## Slack
|
||||
|
||||
Prowler can be integrated with [Slack](https://slack.com/) to send a summary of the execution having configured a Slack APP in your channel with the following command:
|
||||
|
||||
```sh
|
||||
prowler <provider> --slack
|
||||
```
|
||||
|
||||

|
||||
|
||||
> Slack integration needs SLACK_API_TOKEN and SLACK_CHANNEL_ID environment variables.
|
||||
### Configuration
|
||||
|
||||
To configure the Slack Integration, follow the next steps:
|
||||
|
||||
1. Create a Slack Application:
|
||||
- Go to [Slack API page](https://api.slack.com/tutorials/tracks/getting-a-token), scroll down to the *Create app* button and select your workspace:
|
||||

|
||||
|
||||
- Install the application in your selected workspaces:
|
||||

|
||||
|
||||
- Get the *Slack App OAuth Token* that Prowler needs to send the message:
|
||||

|
||||
|
||||
2. Optionally, create a Slack Channel (you can use an existing one)
|
||||
|
||||
3. Integrate the created Slack App to your Slack channel:
|
||||
- Click on the channel, go to the Integrations tab, and Add an App.
|
||||

|
||||
|
||||
4. Set the following environment variables that Prowler will read:
|
||||
- `SLACK_API_TOKEN`: the *Slack App OAuth Token* that was previously get.
|
||||
- `SLACK_CHANNEL_ID`: the name of your Slack Channel where Prowler will send the message.
|
||||
@@ -33,6 +33,7 @@ nav:
|
||||
- Reporting: tutorials/reporting.md
|
||||
- Compliance: tutorials/compliance.md
|
||||
- Quick Inventory: tutorials/quick-inventory.md
|
||||
- Integrations: tutorials/integrations.md
|
||||
- Configuration File: tutorials/configuration_file.md
|
||||
- Logging: tutorials/logging.md
|
||||
- Allowlist: tutorials/allowlist.md
|
||||
@@ -42,6 +43,7 @@ nav:
|
||||
- Assume Role: tutorials/aws/role-assumption.md
|
||||
- AWS Security Hub: tutorials/aws/securityhub.md
|
||||
- AWS Organizations: tutorials/aws/organizations.md
|
||||
- AWS Regions and Partitions: tutorials/aws/regions-and-partitions.md
|
||||
- Scan Multiple AWS Accounts: tutorials/aws/multiaccount.md
|
||||
- AWS CloudShell: tutorials/aws/cloudshell.md
|
||||
- Checks v2 to v3 Mapping: tutorials/aws/v2_to_v3_checks_mapping.md
|
||||
@@ -51,6 +53,8 @@ nav:
|
||||
- Azure:
|
||||
- Authentication: tutorials/azure/authentication.md
|
||||
- Subscriptions: tutorials/azure/subscriptions.md
|
||||
- Google Cloud:
|
||||
- Authentication: tutorials/gcp/authentication.md
|
||||
- Developer Guide: tutorials/developer-guide.md
|
||||
- Security: security.md
|
||||
- Contact Us: contact.md
|
||||
|
||||
@@ -6,28 +6,33 @@
|
||||
"account:Get*",
|
||||
"appstream:Describe*",
|
||||
"appstream:List*",
|
||||
"backup:List*",
|
||||
"cloudtrail:GetInsightSelectors",
|
||||
"codeartifact:List*",
|
||||
"codebuild:BatchGet*",
|
||||
"ds:Describe*",
|
||||
"drs:Describe*",
|
||||
"ds:Get*",
|
||||
"ds:Describe*",
|
||||
"ds:List*",
|
||||
"ec2:GetEbsEncryptionByDefault",
|
||||
"ecr:Describe*",
|
||||
"ecr:GetRegistryScanningConfiguration",
|
||||
"elasticfilesystem:DescribeBackupPolicy",
|
||||
"glue:GetConnections",
|
||||
"glue:GetSecurityConfiguration*",
|
||||
"glue:SearchTables",
|
||||
"lambda:GetFunction*",
|
||||
"logs:FilterLogEvents",
|
||||
"macie2:GetMacieSession",
|
||||
"s3:GetAccountPublicAccessBlock",
|
||||
"shield:DescribeProtection",
|
||||
"shield:GetSubscriptionState",
|
||||
"securityhub:BatchImportFindings",
|
||||
"securityhub:GetFindings",
|
||||
"ssm:GetDocument",
|
||||
"ssm-incidents:List*",
|
||||
"support:Describe*",
|
||||
"tag:GetTagKeys",
|
||||
"organizations:DescribeOrganization",
|
||||
"organizations:ListPolicies*",
|
||||
"organizations:DescribePolicy"
|
||||
"tag:GetTagKeys"
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow",
|
||||
@@ -39,7 +44,8 @@
|
||||
"apigateway:GET"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:apigateway:*::/restapis/*"
|
||||
"arn:aws:apigateway:*::/restapis/*",
|
||||
"arn:aws:apigateway:*::/apis/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
586
poetry.lock
generated
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from prowler.lib.banner import print_banner
|
||||
@@ -29,6 +30,7 @@ from prowler.lib.outputs.compliance import display_compliance_table
|
||||
from prowler.lib.outputs.html import add_html_footer, fill_html_overview_statistics
|
||||
from prowler.lib.outputs.json import close_json
|
||||
from prowler.lib.outputs.outputs import extract_findings_statistics, send_to_s3_bucket
|
||||
from prowler.lib.outputs.slack import send_slack_message
|
||||
from prowler.lib.outputs.summary_table import display_summary_table
|
||||
from prowler.providers.aws.lib.security_hub.security_hub import (
|
||||
resolve_security_hub_previous_findings,
|
||||
@@ -169,6 +171,21 @@ def prowler():
|
||||
# Extract findings stats
|
||||
stats = extract_findings_statistics(findings)
|
||||
|
||||
if args.slack:
|
||||
if "SLACK_API_TOKEN" in os.environ and "SLACK_CHANNEL_ID" in os.environ:
|
||||
_ = send_slack_message(
|
||||
os.environ["SLACK_API_TOKEN"],
|
||||
os.environ["SLACK_CHANNEL_ID"],
|
||||
stats,
|
||||
provider,
|
||||
audit_info,
|
||||
)
|
||||
else:
|
||||
logger.critical(
|
||||
"Slack integration needs SLACK_API_TOKEN and SLACK_CHANNEL_ID environment variables (see more in https://docs.prowler.cloud/en/latest/tutorials/integrations/#slack)."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
if args.output_modes:
|
||||
for mode in args.output_modes:
|
||||
# Close json file if exists
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
### Account, Check and/or Region can be * to apply for all the cases
|
||||
### Resources is a list that can have either Regex or Keywords
|
||||
### Tags is an optional list containing tuples of 'key=value'
|
||||
### Account, Check and/or Region can be * to apply for all the cases.
|
||||
### Resources and tags are lists that can have either Regex or Keywords.
|
||||
### Tags is an optional list that matches on tuples of 'key=value' and are "ANDed" together.
|
||||
### Use an alternation Regex to match one of multiple tags with "ORed" logic.
|
||||
########################### ALLOWLIST EXAMPLE ###########################
|
||||
Allowlist:
|
||||
Accounts:
|
||||
@@ -12,14 +13,19 @@ Allowlist:
|
||||
Resources:
|
||||
- "user-1" # Will ignore user-1 in check iam_user_hardware_mfa_enabled
|
||||
- "user-2" # Will ignore user-2 in check iam_user_hardware_mfa_enabled
|
||||
"ec2_*":
|
||||
Regions:
|
||||
- "*"
|
||||
Resources:
|
||||
- "*" # Will ignore every EC2 check in every account and region
|
||||
"*":
|
||||
Regions:
|
||||
- "*"
|
||||
Resources:
|
||||
- "test" # Will ignore every resource containing the string "test" and the tags 'test=test' and 'project=test' in account 123456789012 and every region
|
||||
- "test"
|
||||
Tags:
|
||||
- "test=test" # Will ignore every resource containing the string "test" and the tags 'test=test' and 'project=test' in account 123456789012 and every region
|
||||
- "project=test"
|
||||
- "test=test" # Will ignore every resource containing the string "test" and the tags 'test=test' and
|
||||
- "project=test|project=stage" # either of ('project=test' OR project=stage) in account 123456789012 and every region
|
||||
|
||||
"*":
|
||||
Checks:
|
||||
|
||||
@@ -10,9 +10,13 @@ from prowler.lib.logger import logger
|
||||
|
||||
timestamp = datetime.today()
|
||||
timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)
|
||||
prowler_version = "3.4.0"
|
||||
prowler_version = "3.5.2"
|
||||
html_logo_url = "https://github.com/prowler-cloud/prowler/"
|
||||
html_logo_img = "https://user-images.githubusercontent.com/3985464/113734260-7ba06900-96fb-11eb-82bc-d4f68a1e2710.png"
|
||||
square_logo_img = "https://user-images.githubusercontent.com/38561120/235905862-9ece5bd7-9aa3-4e48-807a-3a9035eb8bfb.png"
|
||||
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
|
||||
azure_logo = "https://user-images.githubusercontent.com/38561120/235927375-b23e2e0f-8932-49ec-b59c-d89f61c8041d.png"
|
||||
gcp_logo = "https://user-images.githubusercontent.com/38561120/235928332-eb4accdc-c226-4391-8e97-6ca86a91cf50.png"
|
||||
|
||||
orange_color = "\033[38;5;208m"
|
||||
banner_color = "\033[1;92m"
|
||||
|
||||
@@ -154,6 +154,11 @@ Detailed documentation at https://docs.prowler.cloud
|
||||
common_outputs_parser.add_argument(
|
||||
"-b", "--no-banner", action="store_true", help="Hide Prowler banner"
|
||||
)
|
||||
common_outputs_parser.add_argument(
|
||||
"--slack",
|
||||
action="store_true",
|
||||
help="Send a summary of the execution with a Slack APP in your channel. Environment variables SLACK_API_TOKEN and SLACK_CHANNEL_ID are required (see more in https://docs.prowler.cloud/en/latest/tutorials/integrations/#slack).",
|
||||
)
|
||||
|
||||
def __init_logging_parser__(self):
|
||||
# Logging Options
|
||||
|
||||
@@ -42,8 +42,6 @@ def generate_provider_output_csv(
|
||||
set_provider_output_options configures automatically the outputs based on the selected provider and returns the Provider_Output_Options object.
|
||||
"""
|
||||
try:
|
||||
finding_output_model = f"{provider.capitalize()}_Check_Output_{mode.upper()}"
|
||||
output_model = getattr(importlib.import_module(__name__), finding_output_model)
|
||||
# Dynamically load the Provider_Output_Options class
|
||||
finding_output_model = f"{provider.capitalize()}_Check_Output_{mode.upper()}"
|
||||
output_model = getattr(importlib.import_module(__name__), finding_output_model)
|
||||
|
||||
135
prowler/lib/outputs/slack.py
Normal file
@@ -0,0 +1,135 @@
|
||||
import sys
|
||||
|
||||
from slack_sdk import WebClient
|
||||
|
||||
from prowler.config.config import aws_logo, azure_logo, gcp_logo, square_logo_img
|
||||
from prowler.lib.logger import logger
|
||||
|
||||
|
||||
def send_slack_message(token, channel, stats, provider, audit_info):
|
||||
try:
|
||||
client = WebClient(token=token)
|
||||
identity, logo = create_message_identity(provider, audit_info)
|
||||
response = client.chat_postMessage(
|
||||
username="Prowler",
|
||||
icon_url=square_logo_img,
|
||||
channel="#" + channel,
|
||||
blocks=create_message_blocks(identity, logo, stats),
|
||||
)
|
||||
return response
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
|
||||
def create_message_identity(provider, audit_info):
|
||||
try:
|
||||
identity = ""
|
||||
logo = aws_logo
|
||||
if provider == "aws":
|
||||
identity = f"AWS Account *{audit_info.audited_account}*"
|
||||
elif provider == "gcp":
|
||||
identity = f"GCP Project *{audit_info.project_id}*"
|
||||
logo = gcp_logo
|
||||
elif provider == "azure":
|
||||
printed_subscriptions = []
|
||||
for key, value in audit_info.identity.subscriptions.items():
|
||||
intermediate = "- *" + key + ": " + value + "*\n"
|
||||
printed_subscriptions.append(intermediate)
|
||||
identity = f"Azure Subscriptions:\n{''.join(printed_subscriptions)}"
|
||||
logo = azure_logo
|
||||
return identity, logo
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
|
||||
def create_message_blocks(identity, logo, stats):
|
||||
try:
|
||||
blocks = [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": f"Hey there 👋 \n I'm *Prowler*, _the handy cloud security tool_ :cloud::key:\n\n I have just finished the security assessment on your {identity} with a total of *{stats['findings_count']}* findings.",
|
||||
},
|
||||
"accessory": {
|
||||
"type": "image",
|
||||
"image_url": logo,
|
||||
"alt_text": "Provider Logo",
|
||||
},
|
||||
},
|
||||
{"type": "divider"},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": f"\n:white_check_mark: *{stats['total_pass']} Passed findings* ({round(stats['total_pass']/stats['findings_count']*100,2)}%)\n",
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": f"\n:x: *{stats['total_fail']} Failed findings* ({round(stats['total_fail']/stats['findings_count']*100,2)}%)\n ",
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": f"\n:bar_chart: *{stats['resources_count']} Scanned Resources*\n",
|
||||
},
|
||||
},
|
||||
{"type": "divider"},
|
||||
{
|
||||
"type": "context",
|
||||
"elements": [
|
||||
{
|
||||
"type": "mrkdwn",
|
||||
"text": f"Used parameters: `prowler {' '.join(sys.argv[1:])} `",
|
||||
}
|
||||
],
|
||||
},
|
||||
{"type": "divider"},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {"type": "mrkdwn", "text": "Join our Slack Community!"},
|
||||
"accessory": {
|
||||
"type": "button",
|
||||
"text": {"type": "plain_text", "text": "Prowler :slack:"},
|
||||
"url": "https://join.slack.com/t/prowler-workspace/shared_invite/zt-1hix76xsl-2uq222JIXrC7Q8It~9ZNog",
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": "Feel free to contact us in our repo",
|
||||
},
|
||||
"accessory": {
|
||||
"type": "button",
|
||||
"text": {"type": "plain_text", "text": "Prowler :github:"},
|
||||
"url": "https://github.com/prowler-cloud/prowler",
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": "See all the things you can do with ProwlerPro",
|
||||
},
|
||||
"accessory": {
|
||||
"type": "button",
|
||||
"text": {"type": "plain_text", "text": "Prowler Pro"},
|
||||
"url": "https://prowler.pro",
|
||||
},
|
||||
},
|
||||
]
|
||||
return blocks
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
@@ -36,7 +36,7 @@ class AWS_Provider:
|
||||
secret_key=audit_info.credentials.aws_secret_access_key,
|
||||
token=audit_info.credentials.aws_session_token,
|
||||
expiry_time=audit_info.credentials.expiration,
|
||||
refresh_using=self.refresh,
|
||||
refresh_using=self.refresh_credentials,
|
||||
method="sts-assume-role",
|
||||
)
|
||||
# Here we need the botocore session since it needs to use refreshable credentials
|
||||
@@ -60,7 +60,7 @@ class AWS_Provider:
|
||||
# Refresh credentials method using assume role
|
||||
# This method is called "adding ()" to the name, so it cannot accept arguments
|
||||
# https://github.com/boto/botocore/blob/098cc255f81a25b852e1ecdeb7adebd94c7b1b73/botocore/credentials.py#L570
|
||||
def refresh(self):
|
||||
def refresh_credentials(self):
|
||||
logger.info("Refreshing assumed credentials...")
|
||||
|
||||
response = assume_role(self.aws_session, self.role_info)
|
||||
|
||||
@@ -736,12 +736,16 @@
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"eu-central-1",
|
||||
"eu-north-1",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-2"
|
||||
@@ -778,6 +782,7 @@
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
@@ -980,6 +985,47 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"awshealthdashboard": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"af-south-1",
|
||||
"ap-east-1",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-northeast-3",
|
||||
"ap-south-1",
|
||||
"ap-south-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
"eu-north-1",
|
||||
"eu-south-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [
|
||||
"cn-north-1",
|
||||
"cn-northwest-1"
|
||||
],
|
||||
"aws-us-gov": [
|
||||
"us-gov-east-1",
|
||||
"us-gov-west-1"
|
||||
]
|
||||
}
|
||||
},
|
||||
"backup": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -1937,6 +1983,7 @@
|
||||
"cn-northwest-1"
|
||||
],
|
||||
"aws-us-gov": [
|
||||
"us-gov-east-1",
|
||||
"us-gov-west-1"
|
||||
]
|
||||
}
|
||||
@@ -3421,6 +3468,7 @@
|
||||
"emr-serverless": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-east-1",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
@@ -3432,13 +3480,17 @@
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"me-south-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-cn": [
|
||||
"cn-north-1",
|
||||
"cn-northwest-1"
|
||||
],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
@@ -3949,6 +4001,7 @@
|
||||
"ap-south-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
@@ -3963,6 +4016,7 @@
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
@@ -4925,6 +4979,21 @@
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"ivs-realtime": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
"eu-central-1",
|
||||
"eu-west-1",
|
||||
"us-east-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"ivschat": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -5094,6 +5163,7 @@
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
@@ -5279,6 +5349,7 @@
|
||||
"ap-south-1",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-north-1",
|
||||
@@ -5293,7 +5364,10 @@
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-cn": [
|
||||
"cn-north-1",
|
||||
"cn-northwest-1"
|
||||
],
|
||||
"aws-us-gov": [
|
||||
"us-gov-east-1",
|
||||
"us-gov-west-1"
|
||||
@@ -5423,7 +5497,10 @@
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-cn": [
|
||||
"cn-north-1",
|
||||
"cn-northwest-1"
|
||||
],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
@@ -6195,13 +6272,17 @@
|
||||
"ap-northeast-2",
|
||||
"ap-northeast-3",
|
||||
"ap-south-1",
|
||||
"ap-south-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
"eu-north-1",
|
||||
"eu-south-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
@@ -6251,7 +6332,10 @@
|
||||
"us-east-2",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-cn": [
|
||||
"cn-north-1",
|
||||
"cn-northwest-1"
|
||||
],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
@@ -6592,6 +6676,24 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"osis": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-northeast-1",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"eu-central-1",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"outposts": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -6660,47 +6762,6 @@
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"phd": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"af-south-1",
|
||||
"ap-east-1",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-northeast-3",
|
||||
"ap-south-1",
|
||||
"ap-south-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
"eu-north-1",
|
||||
"eu-south-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [
|
||||
"cn-north-1",
|
||||
"cn-northwest-1"
|
||||
],
|
||||
"aws-us-gov": [
|
||||
"us-gov-east-1",
|
||||
"us-gov-west-1"
|
||||
]
|
||||
}
|
||||
},
|
||||
"pi": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -7562,6 +7623,7 @@
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
@@ -7802,6 +7864,7 @@
|
||||
"ap-northeast-2",
|
||||
"ap-northeast-3",
|
||||
"ap-south-1",
|
||||
"ap-south-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
@@ -7881,14 +7944,21 @@
|
||||
"ap-northeast-2",
|
||||
"ap-northeast-3",
|
||||
"ap-south-1",
|
||||
"ap-south-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
"eu-north-1",
|
||||
"eu-south-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
@@ -7897,7 +7967,10 @@
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
"aws-us-gov": [
|
||||
"us-gov-east-1",
|
||||
"us-gov-west-1"
|
||||
]
|
||||
}
|
||||
},
|
||||
"scheduler": {
|
||||
@@ -8876,30 +8949,6 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"sumerian": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-north-1",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"support": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -9269,6 +9318,24 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"verified-access": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-southeast-2",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"vmwarecloudonaws": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -9358,6 +9425,21 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"vpc-lattice": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-northeast-1",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"eu-west-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"vpn": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -9643,6 +9725,7 @@
|
||||
"cn-northwest-1"
|
||||
],
|
||||
"aws-us-gov": [
|
||||
"us-gov-east-1",
|
||||
"us-gov-west-1"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -130,19 +130,33 @@ def is_allowlisted(allowlist, audited_account, check, region, resource, tags):
|
||||
|
||||
def is_allowlisted_in_check(allowlist, audited_account, check, region, resource, tags):
|
||||
try:
|
||||
# If there is a *, it affects to all checks
|
||||
if "*" in allowlist["Accounts"][audited_account]["Checks"]:
|
||||
check = "*"
|
||||
if is_allowlisted_in_region(
|
||||
allowlist, audited_account, check, region, resource, tags
|
||||
):
|
||||
return True
|
||||
# Check if there is the specific check
|
||||
if check in allowlist["Accounts"][audited_account]["Checks"]:
|
||||
if is_allowlisted_in_region(
|
||||
allowlist, audited_account, check, region, resource, tags
|
||||
):
|
||||
return True
|
||||
for allowlisted_check in allowlist["Accounts"][audited_account][
|
||||
"Checks"
|
||||
].keys():
|
||||
# If there is a *, it affects to all checks
|
||||
if "*" == allowlisted_check:
|
||||
check = "*"
|
||||
if is_allowlisted_in_region(
|
||||
allowlist, audited_account, check, region, resource, tags
|
||||
):
|
||||
return True
|
||||
# Check if there is the specific check
|
||||
elif check == allowlisted_check:
|
||||
if is_allowlisted_in_region(
|
||||
allowlist, audited_account, check, region, resource, tags
|
||||
):
|
||||
return True
|
||||
# Check if check is a regex
|
||||
elif re.search(allowlisted_check, check):
|
||||
if is_allowlisted_in_region(
|
||||
allowlist,
|
||||
audited_account,
|
||||
allowlisted_check,
|
||||
region,
|
||||
resource,
|
||||
tags,
|
||||
):
|
||||
return True
|
||||
return False
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
@@ -192,13 +206,21 @@ def is_allowlisted_in_tags(check_allowlist, elem, resource, tags):
|
||||
# Check if there are allowlisted tags
|
||||
if "Tags" in check_allowlist:
|
||||
# Check if there are resource tags
|
||||
if tags:
|
||||
tags_in_resource_tags = True
|
||||
for tag in check_allowlist["Tags"]:
|
||||
if tag not in tags:
|
||||
tags_in_resource_tags = False
|
||||
if tags_in_resource_tags and re.search(elem, resource):
|
||||
return True
|
||||
if not tags or not re.search(elem, resource):
|
||||
return False
|
||||
|
||||
all_allowed_tags_in_resource_tags = True
|
||||
for allowed_tag in check_allowlist["Tags"]:
|
||||
found_allowed_tag = False
|
||||
for resource_tag in tags:
|
||||
if re.search(allowed_tag, resource_tag):
|
||||
found_allowed_tag = True
|
||||
break
|
||||
|
||||
if not found_allowed_tag:
|
||||
all_allowed_tags_in_resource_tags = False
|
||||
|
||||
return all_allowed_tags_in_resource_tags
|
||||
else:
|
||||
if re.search(elem, resource):
|
||||
return True
|
||||
|
||||
@@ -1,50 +1,48 @@
|
||||
import re
|
||||
|
||||
from arnparse import arnparse
|
||||
|
||||
from prowler.providers.aws.lib.arn.error import (
|
||||
RoleArnParsingEmptyResource,
|
||||
RoleArnParsingFailedMissingFields,
|
||||
RoleArnParsingIAMRegionNotEmpty,
|
||||
RoleArnParsingInvalidAccountID,
|
||||
RoleArnParsingInvalidResourceType,
|
||||
RoleArnParsingPartitionEmpty,
|
||||
RoleArnParsingServiceNotIAM,
|
||||
RoleArnParsingServiceNotIAMnorSTS,
|
||||
)
|
||||
from prowler.providers.aws.lib.arn.models import ARN
|
||||
|
||||
|
||||
def arn_parsing(arn):
|
||||
# check for number of fields, must be six
|
||||
if len(arn.split(":")) != 6:
|
||||
raise RoleArnParsingFailedMissingFields
|
||||
def parse_iam_credentials_arn(arn: str) -> ARN:
|
||||
arn_parsed = ARN(arn)
|
||||
# First check if region is empty (in IAM ARN's region is always empty)
|
||||
if arn_parsed.region:
|
||||
raise RoleArnParsingIAMRegionNotEmpty
|
||||
else:
|
||||
arn_parsed = arnparse(arn)
|
||||
# First check if region is empty (in IAM arns region is always empty)
|
||||
if arn_parsed.region is not None:
|
||||
raise RoleArnParsingIAMRegionNotEmpty
|
||||
# check if needed fields are filled:
|
||||
# - partition
|
||||
# - service
|
||||
# - account_id
|
||||
# - resource_type
|
||||
# - resource
|
||||
if arn_parsed.partition is None or arn_parsed.partition == "":
|
||||
raise RoleArnParsingPartitionEmpty
|
||||
elif arn_parsed.service != "iam" and arn_parsed.service != "sts":
|
||||
raise RoleArnParsingServiceNotIAMnorSTS
|
||||
elif (
|
||||
arn_parsed.account_id is None
|
||||
or len(arn_parsed.account_id) != 12
|
||||
or not arn_parsed.account_id.isnumeric()
|
||||
):
|
||||
raise RoleArnParsingInvalidAccountID
|
||||
elif (
|
||||
arn_parsed.resource_type != "role"
|
||||
and arn_parsed.resource_type != "user"
|
||||
and arn_parsed.resource_type != "assumed-role"
|
||||
):
|
||||
raise RoleArnParsingInvalidResourceType
|
||||
elif arn_parsed.resource == "":
|
||||
raise RoleArnParsingEmptyResource
|
||||
else:
|
||||
# check if needed fields are filled:
|
||||
# - partition
|
||||
# - service
|
||||
# - account_id
|
||||
# - resource_type
|
||||
# - resource
|
||||
if arn_parsed.partition is None:
|
||||
raise RoleArnParsingPartitionEmpty
|
||||
elif arn_parsed.service != "iam":
|
||||
raise RoleArnParsingServiceNotIAM
|
||||
elif (
|
||||
arn_parsed.account_id is None
|
||||
or len(arn_parsed.account_id) != 12
|
||||
or not arn_parsed.account_id.isnumeric()
|
||||
):
|
||||
raise RoleArnParsingInvalidAccountID
|
||||
elif arn_parsed.resource_type != "role":
|
||||
raise RoleArnParsingInvalidResourceType
|
||||
elif arn_parsed.resource == "":
|
||||
raise RoleArnParsingEmptyResource
|
||||
else:
|
||||
return arn_parsed
|
||||
return arn_parsed
|
||||
|
||||
|
||||
def is_valid_arn(arn: str) -> bool:
|
||||
|
||||
@@ -1,43 +1,49 @@
|
||||
class RoleArnParsingFailedMissingFields(Exception):
|
||||
# The arn contains a numberof fields different than six separated by :"
|
||||
# The ARN contains a numberof fields different than six separated by :"
|
||||
def __init__(self):
|
||||
self.message = "The assumed role arn contains a number of fields different than six separated by :, please input a valid arn"
|
||||
self.message = "The assumed role ARN contains an invalid number of fields separated by : or it does not start by arn, please input a valid ARN"
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class RoleArnParsingIAMRegionNotEmpty(Exception):
|
||||
# The arn contains a non-empty value for region, since it is an IAM arn is not valid
|
||||
# The ARN contains a non-empty value for region, since it is an IAM ARN is not valid
|
||||
def __init__(self):
|
||||
self.message = "The assumed role arn contains a non-empty value for region, since it is an IAM arn is not valid, please input a valid arn"
|
||||
self.message = "The assumed role ARN contains a non-empty value for region, since it is an IAM ARN is not valid, please input a valid ARN"
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class RoleArnParsingPartitionEmpty(Exception):
|
||||
# The arn contains an empty value for partition
|
||||
# The ARN contains an empty value for partition
|
||||
def __init__(self):
|
||||
self.message = "The assumed role arn does not contain a value for partition, please input a valid arn"
|
||||
self.message = "The assumed role ARN does not contain a value for partition, please input a valid ARN"
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class RoleArnParsingServiceNotIAM(Exception):
|
||||
class RoleArnParsingServiceNotIAMnorSTS(Exception):
|
||||
def __init__(self):
|
||||
self.message = "The assumed role arn contains a value for service distinct than iam, please input a valid arn"
|
||||
self.message = "The assumed role ARN contains a value for service distinct than IAM or STS, please input a valid ARN"
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class RoleArnParsingServiceNotSTS(Exception):
|
||||
def __init__(self):
|
||||
self.message = "The assumed role ARN contains a value for service distinct than STS, please input a valid ARN"
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class RoleArnParsingInvalidAccountID(Exception):
|
||||
def __init__(self):
|
||||
self.message = "The assumed role arn contains a value for account id empty or invalid, a valid account id must be composed of 12 numbers, please input a valid arn"
|
||||
self.message = "The assumed role ARN contains a value for account id empty or invalid, a valid account id must be composed of 12 numbers, please input a valid ARN"
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class RoleArnParsingInvalidResourceType(Exception):
|
||||
def __init__(self):
|
||||
self.message = "The assumed role arn contains a value for resource type different than role, please input a valid arn"
|
||||
self.message = "The assumed role ARN contains a value for resource type different than role, please input a valid ARN"
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class RoleArnParsingEmptyResource(Exception):
|
||||
def __init__(self):
|
||||
self.message = "The assumed role arn does not contain a value for resource, please input a valid arn"
|
||||
self.message = "The assumed role ARN does not contain a value for resource, please input a valid ARN"
|
||||
super().__init__(self.message)
|
||||
|
||||
57
prowler/providers/aws/lib/arn/models.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.providers.aws.lib.arn.error import RoleArnParsingFailedMissingFields
|
||||
|
||||
|
||||
class ARN(BaseModel):
|
||||
partition: str
|
||||
service: str
|
||||
region: Optional[str] # In IAM ARN's do not have region
|
||||
account_id: str
|
||||
resource: str
|
||||
resource_type: str
|
||||
|
||||
def __init__(self, arn):
|
||||
# Validate the ARN
|
||||
## Check that arn starts with arn
|
||||
if not arn.startswith("arn:"):
|
||||
raise RoleArnParsingFailedMissingFields
|
||||
## Retrieve fields
|
||||
arn_elements = arn.split(":", 5)
|
||||
data = {
|
||||
"partition": arn_elements[1],
|
||||
"service": arn_elements[2],
|
||||
"region": arn_elements[3] if arn_elements[3] != "" else None,
|
||||
"account_id": arn_elements[4],
|
||||
"resource": arn_elements[5],
|
||||
"resource_type": get_arn_resource_type(arn, arn_elements[2]),
|
||||
}
|
||||
if "/" in data["resource"]:
|
||||
data["resource"] = data["resource"].split("/", 1)[1]
|
||||
elif ":" in data["resource"]:
|
||||
data["resource"] = data["resource"].split(":", 1)[1]
|
||||
|
||||
# Calls Pydantic's BaseModel __init__
|
||||
super().__init__(**data)
|
||||
|
||||
|
||||
def get_arn_resource_type(arn, service):
|
||||
if service == "s3":
|
||||
resource_type = "bucket"
|
||||
elif service == "sns":
|
||||
resource_type = "topic"
|
||||
elif service == "sqs":
|
||||
resource_type = "queue"
|
||||
elif service == "apigateway":
|
||||
split_parts = arn.split(":")[5].split("/")
|
||||
if "integration" in split_parts and "responses" in split_parts:
|
||||
resource_type = "restapis-resources-methods-integration-response"
|
||||
elif "documentation" in split_parts and "parts" in split_parts:
|
||||
resource_type = "restapis-documentation-parts"
|
||||
else:
|
||||
resource_type = arn.split(":")[5].split("/")[1]
|
||||
else:
|
||||
resource_type = arn.split(":")[5].split("/")[0]
|
||||
return resource_type
|
||||
0
prowler/providers/aws/lib/credentials/__init__.py
Normal file
59
prowler/providers/aws/lib/credentials/credentials.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import sys
|
||||
|
||||
from boto3 import session
|
||||
from colorama import Fore, Style
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
|
||||
|
||||
AWS_STS_GLOBAL_ENDPOINT_REGION = "us-east-1"
|
||||
|
||||
|
||||
def validate_aws_credentials(session: session, input_regions: list) -> dict:
|
||||
try:
|
||||
# For a valid STS GetCallerIdentity we have to use the right AWS Region
|
||||
if input_regions is None or len(input_regions) == 0:
|
||||
if session.region_name is not None:
|
||||
aws_region = session.region_name
|
||||
else:
|
||||
# If there is no region set passed with -f/--region
|
||||
# we use the Global STS Endpoint Region, us-east-1
|
||||
aws_region = AWS_STS_GLOBAL_ENDPOINT_REGION
|
||||
else:
|
||||
# Get the first region passed to the -f/--region
|
||||
aws_region = input_regions[0]
|
||||
validate_credentials_client = session.client("sts", aws_region)
|
||||
caller_identity = validate_credentials_client.get_caller_identity()
|
||||
# Include the region where the caller_identity has validated the credentials
|
||||
caller_identity["region"] = aws_region
|
||||
except Exception as error:
|
||||
logger.critical(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
return caller_identity
|
||||
|
||||
|
||||
def print_aws_credentials(audit_info: AWS_Audit_Info):
|
||||
# Beautify audited regions, set "all" if there is no filter region
|
||||
regions = (
|
||||
", ".join(audit_info.audited_regions)
|
||||
if audit_info.audited_regions is not None
|
||||
else "all"
|
||||
)
|
||||
# Beautify audited profile, set "default" if there is no profile set
|
||||
profile = audit_info.profile if audit_info.profile is not None else "default"
|
||||
|
||||
report = f"""
|
||||
This report is being generated using credentials below:
|
||||
|
||||
AWS-CLI Profile: {Fore.YELLOW}[{profile}]{Style.RESET_ALL} AWS Filter Region: {Fore.YELLOW}[{regions}]{Style.RESET_ALL}
|
||||
AWS Account: {Fore.YELLOW}[{audit_info.audited_account}]{Style.RESET_ALL} UserId: {Fore.YELLOW}[{audit_info.audited_user_id}]{Style.RESET_ALL}
|
||||
Caller Identity ARN: {Fore.YELLOW}[{audit_info.audited_identity_arn}]{Style.RESET_ALL}
|
||||
"""
|
||||
# If -A is set, print Assumed Role ARN
|
||||
if audit_info.assumed_role_info.role_arn is not None:
|
||||
report += f"""Assumed Role ARN: {Fore.YELLOW}[{audit_info.assumed_role_info.role_arn}]{Style.RESET_ALL}
|
||||
"""
|
||||
print(report)
|
||||
0
prowler/providers/aws/lib/organizations/__init__.py
Normal file
40
prowler/providers/aws/lib/organizations/organizations.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import sys
|
||||
|
||||
from boto3 import client
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.aws.lib.audit_info.models import AWS_Organizations_Info
|
||||
|
||||
|
||||
def get_organizations_metadata(
|
||||
metadata_account: str, assumed_credentials: dict
|
||||
) -> AWS_Organizations_Info:
|
||||
try:
|
||||
organizations_client = client(
|
||||
"organizations",
|
||||
aws_access_key_id=assumed_credentials["Credentials"]["AccessKeyId"],
|
||||
aws_secret_access_key=assumed_credentials["Credentials"]["SecretAccessKey"],
|
||||
aws_session_token=assumed_credentials["Credentials"]["SessionToken"],
|
||||
)
|
||||
organizations_metadata = organizations_client.describe_account(
|
||||
AccountId=metadata_account
|
||||
)
|
||||
list_tags_for_resource = organizations_client.list_tags_for_resource(
|
||||
ResourceId=metadata_account
|
||||
)
|
||||
except Exception as error:
|
||||
logger.critical(f"{error.__class__.__name__} -- {error}")
|
||||
sys.exit(1)
|
||||
else:
|
||||
# Convert Tags dictionary to String
|
||||
account_details_tags = ""
|
||||
for tag in list_tags_for_resource["Tags"]:
|
||||
account_details_tags += tag["Key"] + ":" + tag["Value"] + ","
|
||||
organizations_info = AWS_Organizations_Info(
|
||||
account_details_email=organizations_metadata["Account"]["Email"],
|
||||
account_details_name=organizations_metadata["Account"]["Name"],
|
||||
account_details_arn=organizations_metadata["Account"]["Arn"],
|
||||
account_details_org=organizations_metadata["Account"]["Arn"].split("/")[1],
|
||||
account_details_tags=account_details_tags,
|
||||
)
|
||||
return organizations_info
|
||||
@@ -14,6 +14,7 @@ from prowler.config.config import (
|
||||
output_file_timestamp,
|
||||
)
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.providers.aws.lib.arn.models import get_arn_resource_type
|
||||
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
|
||||
|
||||
|
||||
@@ -153,22 +154,8 @@ def create_inventory_table(resources: list, resources_in_region: dict) -> dict:
|
||||
services[service] = 0
|
||||
services[service] += 1
|
||||
|
||||
if service == "s3":
|
||||
resource_type = "bucket"
|
||||
elif service == "sns":
|
||||
resource_type = "topic"
|
||||
elif service == "sqs":
|
||||
resource_type = "queue"
|
||||
elif service == "apigateway":
|
||||
split_parts = resource["arn"].split(":")[5].split("/")
|
||||
if "integration" in split_parts and "responses" in split_parts:
|
||||
resource_type = "restapis-resources-methods-integration-response"
|
||||
elif "documentation" in split_parts and "parts" in split_parts:
|
||||
resource_type = "restapis-documentation-parts"
|
||||
else:
|
||||
resource_type = resource["arn"].split(":")[5].split("/")[1]
|
||||
else:
|
||||
resource_type = resource["arn"].split(":")[5].split("/")[0]
|
||||
resource_type = get_arn_resource_type(resource["arn"], service)
|
||||
|
||||
if service not in resources_type:
|
||||
resources_type[service] = {}
|
||||
if resource_type not in resources_type[service]:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import threading
|
||||
from typing import Optional
|
||||
|
||||
from botocore.exceptions import ClientError
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
@@ -79,10 +80,21 @@ class AccessAnalyzer:
|
||||
if analyzer.status == "ACTIVE":
|
||||
regional_client = self.regional_clients[analyzer.region]
|
||||
for finding in analyzer.findings:
|
||||
finding_information = regional_client.get_finding(
|
||||
analyzerArn=analyzer.arn, id=finding.id
|
||||
)
|
||||
finding.status = finding_information["finding"]["status"]
|
||||
try:
|
||||
finding_information = regional_client.get_finding(
|
||||
analyzerArn=analyzer.arn, id=finding.id
|
||||
)
|
||||
finding.status = finding_information["finding"]["status"]
|
||||
except ClientError as error:
|
||||
if (
|
||||
error.response["Error"]["Code"]
|
||||
== "ResourceNotFoundException"
|
||||
):
|
||||
logger.warning(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
finding.status = ""
|
||||
continue
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
|
||||
@@ -36,8 +36,8 @@ class ApiGatewayV2:
|
||||
def __get_apis__(self, regional_client):
|
||||
logger.info("APIGatewayv2 - Getting APIs...")
|
||||
try:
|
||||
get_rest_apis_paginator = regional_client.get_paginator("get_apis")
|
||||
for page in get_rest_apis_paginator.paginate():
|
||||
get_apis_paginator = regional_client.get_paginator("get_apis")
|
||||
for page in get_apis_paginator.paginate():
|
||||
for apigw in page["Items"]:
|
||||
if not self.audit_resources or (
|
||||
is_resource_filtered(apigw["ApiId"], self.audit_resources)
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
],
|
||||
"ServiceName": "autoscaling",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id",
|
||||
"ResourceIdTemplate": "arn:partition:autoscaling:region:account-id:autoScalingGroupName/resource-name",
|
||||
"Severity": "critical",
|
||||
"ResourceType": "Other",
|
||||
"Description": "Find secrets in EC2 Auto Scaling Launch Configuration",
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "autoscaling_group_multiple_az",
|
||||
"CheckTitle": "EC2 Auto Scaling Group should use multiple Availability Zones",
|
||||
"CheckType": [],
|
||||
"ServiceName": "autoscaling",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:autoscaling:region:account-id:autoScalingGroupName/resource-name",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Other",
|
||||
"Description": "EC2 Auto Scaling Group should use multiple Availability Zones",
|
||||
"Risk": "In case of a failure in a single Availability Zone, the Auto Scaling Group will not be able to launch new instances to replace the failed ones.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-add-availability-zone.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws autoscaling update-auto-scaling-group",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/AutoScaling/multiple-availability-zones.html",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Configure multiple Availability Zones for EC2 Auto Scaling Group",
|
||||
"Url": "https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-add-availability-zone.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.autoscaling.autoscaling_client import (
|
||||
autoscaling_client,
|
||||
)
|
||||
|
||||
|
||||
class autoscaling_group_multiple_az(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for group in autoscaling_client.groups:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = group.region
|
||||
report.resource_id = group.name
|
||||
report.resource_arn = group.arn
|
||||
report.resource_tags = group.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Autoscaling group {group.name} has only one availability zones."
|
||||
)
|
||||
if len(group.availability_zones) > 1:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Autoscaling group {group.name} has multiple availability zones."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -17,6 +17,8 @@ class AutoScaling:
|
||||
self.regional_clients = generate_regional_clients(self.service, audit_info)
|
||||
self.launch_configurations = []
|
||||
self.__threading_call__(self.__describe_launch_configurations__)
|
||||
self.groups = []
|
||||
self.__threading_call__(self.__describe_auto_scaling_groups__)
|
||||
|
||||
def __get_session__(self):
|
||||
return self.session
|
||||
@@ -59,6 +61,35 @@ class AutoScaling:
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_auto_scaling_groups__(self, regional_client):
|
||||
logger.info("AutoScaling - Describing AutoScaling Groups...")
|
||||
try:
|
||||
describe_auto_scaling_groups_paginator = regional_client.get_paginator(
|
||||
"describe_auto_scaling_groups"
|
||||
)
|
||||
for page in describe_auto_scaling_groups_paginator.paginate():
|
||||
for group in page["AutoScalingGroups"]:
|
||||
if not self.audit_resources or (
|
||||
is_resource_filtered(
|
||||
group["AutoScalingGroupARN"],
|
||||
self.audit_resources,
|
||||
)
|
||||
):
|
||||
self.groups.append(
|
||||
Group(
|
||||
arn=group.get("AutoScalingGroupARN"),
|
||||
name=group.get("AutoScalingGroupName"),
|
||||
region=regional_client.region,
|
||||
availability_zones=group.get("AvailabilityZones"),
|
||||
tags=group.get("Tags"),
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
|
||||
class LaunchConfiguration(BaseModel):
|
||||
arn: str
|
||||
@@ -66,3 +97,11 @@ class LaunchConfiguration(BaseModel):
|
||||
user_data: str
|
||||
image_id: str
|
||||
region: str
|
||||
|
||||
|
||||
class Group(BaseModel):
|
||||
arn: str
|
||||
name: str
|
||||
region: str
|
||||
availability_zones: list
|
||||
tags: list = []
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
"ServiceName": "backup",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:backup-plan:backup-plan-id",
|
||||
"Severity": "medium",
|
||||
"Severity": "low",
|
||||
"ResourceType": "AwsBackupBackupPlan",
|
||||
"Description": "This check ensures that there is at least one backup plan in place.",
|
||||
"Risk": "Without a backup plan, an organization may be at risk of losing important data due to accidental deletion, system failures, or natural disasters. This can result in significant financial and reputational damage for the organization.",
|
||||
|
||||
@@ -9,11 +9,13 @@ class backup_plans_exist(Check):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "No Backup Plan Exist"
|
||||
report.resource_arn = ""
|
||||
report.resource_id = "No Backups"
|
||||
report.resource_id = "Backups"
|
||||
report.region = backup_client.region
|
||||
if backup_client.backup_plans:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"At least one backup plan exists: { backup_client.backup_plans[0].name}"
|
||||
report.status_extended = (
|
||||
f"At least one backup plan exists: {backup_client.backup_plans[0].name}"
|
||||
)
|
||||
report.resource_arn = backup_client.backup_plans[0].arn
|
||||
report.resource_id = backup_client.backup_plans[0].name
|
||||
report.region = backup_client.backup_plans[0].region
|
||||
|
||||
@@ -5,18 +5,20 @@ from prowler.providers.aws.services.backup.backup_client import backup_client
|
||||
class backup_reportplans_exist(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "No Backup Report Plan Exist"
|
||||
report.resource_arn = ""
|
||||
report.resource_id = "No Backups"
|
||||
report.region = backup_client.region
|
||||
if backup_client.backup_report_plans:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
|
||||
report.resource_arn = backup_client.backup_report_plans[0].arn
|
||||
report.resource_id = backup_client.backup_report_plans[0].name
|
||||
report.region = backup_client.backup_report_plans[0].region
|
||||
# We only check report plans if backup plans exist, reducing noise
|
||||
if backup_client.backup_plans:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "No Backup Report Plan Exist"
|
||||
report.resource_arn = ""
|
||||
report.resource_id = "Backups"
|
||||
report.region = backup_client.region
|
||||
if backup_client.backup_report_plans:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
|
||||
report.resource_arn = backup_client.backup_report_plans[0].arn
|
||||
report.resource_id = backup_client.backup_report_plans[0].name
|
||||
report.region = backup_client.backup_report_plans[0].region
|
||||
|
||||
findings.append(report)
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -102,7 +102,7 @@ class Backup:
|
||||
"LastExecutionDate"
|
||||
),
|
||||
advanced_settings=configuration.get(
|
||||
"AdvancedBackupSettings"
|
||||
"AdvancedBackupSettings", []
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "backup_vaults_exist",
|
||||
"CheckTitle": "Esure AWS Backup vaults exist",
|
||||
"CheckTitle": "Ensure AWS Backup vaults exist",
|
||||
"CheckType": [
|
||||
"Recover",
|
||||
"Resilience",
|
||||
@@ -10,7 +10,7 @@
|
||||
"ServiceName": "backup",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:backup-vault:backup-vault-id",
|
||||
"Severity": "medium",
|
||||
"Severity": "low",
|
||||
"ResourceType": "AwsBackupBackupVault",
|
||||
"Description": "This check ensures that AWS Backup vaults exist to provide a secure and durable storage location for backup data.",
|
||||
"Risk": "Without an AWS Backup vault, an organization's critical data may be at risk of being lost in the event of an accidental deletion, system failures, or natural disasters.",
|
||||
|
||||
@@ -9,7 +9,7 @@ class backup_vaults_exist(Check):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = "No Backup Vault Exist"
|
||||
report.resource_arn = ""
|
||||
report.resource_id = "No Backups"
|
||||
report.resource_id = "Backups"
|
||||
report.region = backup_client.region
|
||||
if backup_client.backup_vaults:
|
||||
report.status = "PASS"
|
||||
|
||||
@@ -81,7 +81,7 @@ class CloudFormation:
|
||||
stack.is_nested_stack = True if stack.root_nested_stack != "" else False
|
||||
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] != "ValidationError":
|
||||
if error.response["Error"]["Code"] == "ValidationError":
|
||||
logger.warning(
|
||||
f"{stack.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
@@ -16,14 +16,8 @@ class cloudfront_distributions_https_enabled(Check):
|
||||
report.resource_arn = distribution.arn
|
||||
report.resource_id = distribution.id
|
||||
report.resource_tags = distribution.tags
|
||||
|
||||
if (
|
||||
distribution.default_cache_config
|
||||
and distribution.default_cache_config.viewer_protocol_policy
|
||||
== ViewerProtocolPolicy.allow_all
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} viewers can use HTTP or HTTPS"
|
||||
elif (
|
||||
distribution.default_cache_config
|
||||
and distribution.default_cache_config.viewer_protocol_policy
|
||||
== ViewerProtocolPolicy.redirect_to_https
|
||||
@@ -41,6 +35,10 @@ class cloudfront_distributions_https_enabled(Check):
|
||||
report.status_extended = (
|
||||
f"CloudFront Distribution {distribution.id} has HTTPS only"
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"CloudFront Distribution {distribution.id} viewers can use HTTP or HTTPS"
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -83,7 +83,7 @@ class CloudFront:
|
||||
]["WebACLId"]
|
||||
|
||||
# Default Cache Config
|
||||
default_chache_config = DefaultCacheConfigBehaviour(
|
||||
default_cache_config = DefaultCacheConfigBehaviour(
|
||||
realtime_log_config_arn=distribution_config["DistributionConfig"][
|
||||
"DefaultCacheBehavior"
|
||||
].get("RealtimeLogConfigArn"),
|
||||
@@ -96,7 +96,7 @@ class CloudFront:
|
||||
)
|
||||
distributions[
|
||||
distribution_id
|
||||
].default_cache_config = default_chache_config
|
||||
].default_cache_config = default_cache_config
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
|
||||
@@ -2,6 +2,7 @@ import threading
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from botocore.client import ClientError
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
@@ -158,6 +159,16 @@ class Cloudtrail:
|
||||
insight_selectors = client_insight_selectors.get(
|
||||
"InsightSelectors"
|
||||
)
|
||||
except ClientError as error:
|
||||
if (
|
||||
error.response["Error"]["Code"]
|
||||
== "InsightNotEnabledException"
|
||||
):
|
||||
continue
|
||||
else:
|
||||
logger.error(
|
||||
f"{client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
|
||||
@@ -2,6 +2,7 @@ import threading
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
from botocore.exceptions import ClientError
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
@@ -209,11 +210,17 @@ class Logs:
|
||||
logger.info("CloudWatch Logs - List Tags...")
|
||||
try:
|
||||
for log_group in self.log_groups:
|
||||
regional_client = self.regional_clients[log_group.region]
|
||||
response = regional_client.list_tags_log_group(
|
||||
logGroupName=log_group.name
|
||||
)["tags"]
|
||||
log_group.tags = [response]
|
||||
try:
|
||||
regional_client = self.regional_clients[log_group.region]
|
||||
response = regional_client.list_tags_log_group(
|
||||
logGroupName=log_group.name
|
||||
)["tags"]
|
||||
log_group.tags = [response]
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "ResourceNotFoundException":
|
||||
log_group.tags = []
|
||||
|
||||
continue
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
|
||||
@@ -177,7 +177,7 @@ class DAX:
|
||||
cluster.tags = response
|
||||
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] != "InvalidARNFault":
|
||||
if error.response["Error"]["Code"] == "InvalidARNFault":
|
||||
logger.warning(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
@@ -29,7 +29,9 @@ class EC2:
|
||||
self.snapshots = []
|
||||
self.__threading_call__(self.__describe_snapshots__)
|
||||
self.__get_snapshot_public__()
|
||||
self.__threading_call__(self.__describe_network_interfaces__)
|
||||
self.network_interfaces = []
|
||||
self.__threading_call__(self.__describe_public_network_interfaces__)
|
||||
self.__threading_call__(self.__describe_sg_network_interfaces__)
|
||||
self.images = []
|
||||
self.__threading_call__(self.__describe_images__)
|
||||
self.volumes = []
|
||||
@@ -220,10 +222,37 @@ class EC2:
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_network_interfaces__(self, regional_client):
|
||||
def __describe_public_network_interfaces__(self, regional_client):
|
||||
logger.info("EC2 - Describing Network Interfaces...")
|
||||
try:
|
||||
# Get SGs Network Interfaces
|
||||
# Get Network Interfaces with Public IPs
|
||||
describe_network_interfaces_paginator = regional_client.get_paginator(
|
||||
"describe_network_interfaces"
|
||||
)
|
||||
for page in describe_network_interfaces_paginator.paginate():
|
||||
for interface in page["NetworkInterfaces"]:
|
||||
if interface.get("Association"):
|
||||
self.network_interfaces.append(
|
||||
NetworkInterface(
|
||||
public_ip=interface["Association"]["PublicIp"],
|
||||
type=interface["InterfaceType"],
|
||||
private_ip=interface["PrivateIpAddress"],
|
||||
subnet_id=interface["SubnetId"],
|
||||
vpc_id=interface["VpcId"],
|
||||
region=regional_client.region,
|
||||
tags=interface.get("TagSet"),
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_sg_network_interfaces__(self, regional_client):
|
||||
logger.info("EC2 - Describing Network Interfaces...")
|
||||
try:
|
||||
# Get Network Interfaces for Security Groups
|
||||
for sg in self.security_groups:
|
||||
regional_client = self.regional_clients[sg.region]
|
||||
describe_network_interfaces_paginator = regional_client.get_paginator(
|
||||
@@ -241,7 +270,6 @@ class EC2:
|
||||
):
|
||||
for interface in page["NetworkInterfaces"]:
|
||||
sg.network_interfaces.append(interface["NetworkInterfaceId"])
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -425,6 +453,16 @@ class NetworkACL(BaseModel):
|
||||
tags: Optional[list] = []
|
||||
|
||||
|
||||
class NetworkInterface(BaseModel):
|
||||
public_ip: str
|
||||
private_ip: str
|
||||
type: str
|
||||
subnet_id: str
|
||||
vpc_id: str
|
||||
region: str
|
||||
tags: Optional[list] = []
|
||||
|
||||
|
||||
class ElasticIP(BaseModel):
|
||||
public_ip: Optional[str]
|
||||
association_id: Optional[str]
|
||||
|
||||
@@ -5,24 +5,27 @@ from prowler.providers.aws.services.ecr.ecr_client import ecr_client
|
||||
class ecr_registry_scan_images_on_push_enabled(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for registry in ecr_client.registries:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = registry.region
|
||||
report.resource_id = registry.id
|
||||
report.resource_tags = registry.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scanning without scan on push"
|
||||
if registry.rules:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scan with scan on push"
|
||||
filters = True
|
||||
for rule in registry.rules:
|
||||
if not rule.scan_filters or "'*'" in str(rule.scan_filters):
|
||||
filters = False
|
||||
if filters:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scanning with scan on push but with repository filters"
|
||||
for registry in ecr_client.registries.values():
|
||||
# We want to check the registry if it is in use, hence there are repositories
|
||||
if len(registry.repositories) != 0:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = registry.region
|
||||
report.resource_id = registry.id
|
||||
# A registry cannot have tags
|
||||
report.resource_tags = []
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scanning without scan on push enabled"
|
||||
if registry.rules:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scan with scan on push enabled"
|
||||
filters = True
|
||||
for rule in registry.rules:
|
||||
if not rule.scan_filters or "'*'" in str(rule.scan_filters):
|
||||
filters = False
|
||||
if filters:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR registry {registry.id} has {registry.scan_type} scanning with scan on push but with repository filters"
|
||||
|
||||
findings.append(report)
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -5,22 +5,19 @@ from prowler.providers.aws.services.ecr.ecr_client import ecr_client
|
||||
class ecr_repositories_lifecycle_policy_enabled(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for repository in ecr_client.repositories:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = repository.region
|
||||
report.resource_id = repository.name
|
||||
report.resource_arn = repository.arn
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"Repository {repository.name} has no lifecycle policy"
|
||||
)
|
||||
if repository.lyfecicle_policy:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Repository {repository.name} has lifecycle policy"
|
||||
)
|
||||
for registry in ecr_client.registries.values():
|
||||
for repository in registry.repositories:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = repository.region
|
||||
report.resource_id = repository.name
|
||||
report.resource_arn = repository.arn
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Repository {repository.name} has not a lifecycle policy configured"
|
||||
if repository.lifecycle_policy:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Repository {repository.name} has a lifecycle policy configured"
|
||||
|
||||
findings.append(report)
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -5,25 +5,28 @@ from prowler.providers.aws.services.ecr.ecr_client import ecr_client
|
||||
class ecr_repositories_not_publicly_accessible(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for repository in ecr_client.repositories:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = repository.region
|
||||
report.resource_id = repository.name
|
||||
report.resource_arn = repository.arn
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Repository {repository.name} is not open"
|
||||
if repository.policy:
|
||||
for statement in repository.policy["Statement"]:
|
||||
if statement["Effect"] == "Allow":
|
||||
if "*" in statement["Principal"] or (
|
||||
"AWS" in statement["Principal"]
|
||||
and "*" in statement["Principal"]["AWS"]
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Repository {repository.name} policy may allow anonymous users to perform actions (Principal: '*')"
|
||||
break
|
||||
for registry in ecr_client.registries.values():
|
||||
for repository in registry.repositories:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = repository.region
|
||||
report.resource_id = repository.name
|
||||
report.resource_arn = repository.arn
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"Repository {repository.name} is not publicly accesible"
|
||||
)
|
||||
if repository.policy:
|
||||
for statement in repository.policy["Statement"]:
|
||||
if statement["Effect"] == "Allow":
|
||||
if "*" in statement["Principal"] or (
|
||||
"AWS" in statement["Principal"]
|
||||
and "*" in statement["Principal"]["AWS"]
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Repository {repository.name} policy may allow anonymous users to perform actions (Principal: '*')"
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -5,22 +5,23 @@ from prowler.providers.aws.services.ecr.ecr_client import ecr_client
|
||||
class ecr_repositories_scan_images_on_push_enabled(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for repository in ecr_client.repositories:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = repository.region
|
||||
report.resource_id = repository.name
|
||||
report.resource_arn = repository.arn
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"ECR repository {repository.name} has scan on push enabled"
|
||||
)
|
||||
if not repository.scan_on_push:
|
||||
report.status = "FAIL"
|
||||
for registry in ecr_client.registries.values():
|
||||
for repository in registry.repositories:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = repository.region
|
||||
report.resource_id = repository.name
|
||||
report.resource_arn = repository.arn
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"ECR repository {repository.name} has scan on push disabled"
|
||||
f"ECR repository {repository.name} has scan on push enabled"
|
||||
)
|
||||
if not repository.scan_on_push:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"ECR repository {repository.name} has scan on push disabled"
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -5,32 +5,37 @@ from prowler.providers.aws.services.ecr.ecr_client import ecr_client
|
||||
class ecr_repositories_scan_vulnerabilities_in_latest_image(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for repository in ecr_client.repositories:
|
||||
for image in repository.images_details:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = repository.region
|
||||
report.resource_id = repository.name
|
||||
report.resource_arn = repository.arn
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned without findings"
|
||||
if not image.scan_findings_status:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} without a scan"
|
||||
elif image.scan_findings_status == "FAILED":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"ECR repository {repository.name} with scan status FAILED"
|
||||
)
|
||||
elif image.scan_findings_status != "FAILED":
|
||||
if image.scan_findings_severity_count and (
|
||||
image.scan_findings_severity_count.critical
|
||||
or image.scan_findings_severity_count.high
|
||||
or image.scan_findings_severity_count.medium
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}, MEDIUM->{image.scan_findings_severity_count.medium} "
|
||||
for registry in ecr_client.registries.values():
|
||||
for repository in registry.repositories:
|
||||
# First check if the repository has images
|
||||
if len(repository.images_details) > 0:
|
||||
# We only want to check the latest image pushed
|
||||
image = repository.images_details[-1]
|
||||
|
||||
findings.append(report)
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = repository.region
|
||||
report.resource_id = repository.name
|
||||
report.resource_arn = repository.arn
|
||||
report.resource_tags = repository.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned without findings"
|
||||
if not image.scan_findings_status:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} without a scan"
|
||||
elif image.scan_findings_status == "FAILED":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"ECR repository {repository.name} with scan status FAILED"
|
||||
)
|
||||
elif image.scan_findings_status != "FAILED":
|
||||
if image.scan_findings_severity_count and (
|
||||
image.scan_findings_severity_count.critical
|
||||
or image.scan_findings_severity_count.high
|
||||
or image.scan_findings_severity_count.medium
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}, MEDIUM->{image.scan_findings_severity_count.medium} "
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import threading
|
||||
from datetime import datetime
|
||||
from json import loads
|
||||
from typing import Optional
|
||||
|
||||
@@ -17,14 +18,14 @@ class ECR:
|
||||
self.session = audit_info.audit_session
|
||||
self.audit_resources = audit_info.audit_resources
|
||||
self.regional_clients = generate_regional_clients(self.service, audit_info)
|
||||
self.repositories = []
|
||||
self.registries = []
|
||||
self.__threading_call__(self.__describe_repositories__)
|
||||
self.__describe_repository_policies__()
|
||||
self.__get_image_details__()
|
||||
self.__get_repository_lifecycle_policy__()
|
||||
self.registry_id = audit_info.audited_account
|
||||
self.registries = {}
|
||||
self.__threading_call__(self.__describe_registries_and_repositories__)
|
||||
self.__threading_call__(self.__describe_repository_policies__)
|
||||
self.__threading_call__(self.__get_image_details__)
|
||||
self.__threading_call__(self.__get_repository_lifecycle_policy__)
|
||||
self.__threading_call__(self.__get_registry_scanning_configuration__)
|
||||
self.__list_tags_for_resource__()
|
||||
self.__threading_call__(self.__list_tags_for_resource__)
|
||||
|
||||
def __get_session__(self):
|
||||
return self.session
|
||||
@@ -38,8 +39,9 @@ class ECR:
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
def __describe_repositories__(self, regional_client):
|
||||
logger.info("ECR - Describing repositories...")
|
||||
def __describe_registries_and_repositories__(self, regional_client):
|
||||
logger.info("ECR - Describing registries and repositories...")
|
||||
regional_registry_repositories = []
|
||||
try:
|
||||
describe_ecr_paginator = regional_client.get_paginator(
|
||||
"describe_repositories"
|
||||
@@ -51,126 +53,157 @@ class ECR:
|
||||
repository["repositoryArn"], self.audit_resources
|
||||
)
|
||||
):
|
||||
self.repositories.append(
|
||||
regional_registry_repositories.append(
|
||||
Repository(
|
||||
name=repository["repositoryName"],
|
||||
arn=repository["repositoryArn"],
|
||||
registry_id=repository["registryId"],
|
||||
region=regional_client.region,
|
||||
scan_on_push=repository["imageScanningConfiguration"][
|
||||
"scanOnPush"
|
||||
],
|
||||
policy=None,
|
||||
images_details=[],
|
||||
lyfecicle_policy=None,
|
||||
lifecycle_policy=None,
|
||||
)
|
||||
)
|
||||
# The default ECR registry is assumed
|
||||
self.registries[regional_client.region] = Registry(
|
||||
id=self.registry_id,
|
||||
region=regional_client.region,
|
||||
repositories=regional_registry_repositories,
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_repository_policies__(self):
|
||||
def __describe_repository_policies__(self, regional_client):
|
||||
logger.info("ECR - Describing repository policies...")
|
||||
try:
|
||||
for repository in self.repositories:
|
||||
client = self.regional_clients[repository.region]
|
||||
policy = client.get_repository_policy(repositoryName=repository.name)
|
||||
if "policyText" in policy:
|
||||
repository.policy = loads(policy["policyText"])
|
||||
if regional_client.region in self.registries:
|
||||
for repository in self.registries[regional_client.region].repositories:
|
||||
client = self.regional_clients[repository.region]
|
||||
policy = client.get_repository_policy(
|
||||
repositoryName=repository.name
|
||||
)
|
||||
if "policyText" in policy:
|
||||
repository.policy = loads(policy["policyText"])
|
||||
|
||||
except Exception as error:
|
||||
if "RepositoryPolicyNotFoundException" not in str(error):
|
||||
logger.error(
|
||||
f"-- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_repository_lifecycle_policy__(self):
|
||||
def __get_repository_lifecycle_policy__(self, regional_client):
|
||||
logger.info("ECR - Getting repository lifecycle policy...")
|
||||
try:
|
||||
for repository in self.repositories:
|
||||
client = self.regional_clients[repository.region]
|
||||
policy = client.get_lifecycle_policy(repositoryName=repository.name)
|
||||
if "lifecyclePolicyText" in policy:
|
||||
repository.lyfecicle_policy = policy["lifecyclePolicyText"]
|
||||
if regional_client.region in self.registries:
|
||||
for repository in self.registries[regional_client.region].repositories:
|
||||
client = self.regional_clients[repository.region]
|
||||
policy = client.get_lifecycle_policy(repositoryName=repository.name)
|
||||
if "lifecyclePolicyText" in policy:
|
||||
repository.lifecycle_policy = policy["lifecyclePolicyText"]
|
||||
|
||||
except Exception as error:
|
||||
if "LifecyclePolicyNotFoundException" not in str(error):
|
||||
logger.error(
|
||||
f"-- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __get_image_details__(self):
|
||||
def __get_image_details__(self, regional_client):
|
||||
logger.info("ECR - Getting images details...")
|
||||
try:
|
||||
for repository in self.repositories:
|
||||
# if the repo is not scanning pushed images there is nothing to do
|
||||
if repository.scan_on_push:
|
||||
client = self.regional_clients[repository.region]
|
||||
describe_images_paginator = client.get_paginator("describe_images")
|
||||
for page in describe_images_paginator.paginate(
|
||||
repositoryName=repository.name
|
||||
):
|
||||
for image in page["imageDetails"]:
|
||||
severity_counts = None
|
||||
last_scan_status = None
|
||||
if "imageScanStatus" in image:
|
||||
last_scan_status = image["imageScanStatus"]["status"]
|
||||
if regional_client.region in self.registries:
|
||||
for repository in self.registries[regional_client.region].repositories:
|
||||
# There is nothing to do if the repository is not scanning pushed images
|
||||
if repository.scan_on_push:
|
||||
client = self.regional_clients[repository.region]
|
||||
describe_images_paginator = client.get_paginator(
|
||||
"describe_images"
|
||||
)
|
||||
for page in describe_images_paginator.paginate(
|
||||
registryId=self.registries[regional_client.region].id,
|
||||
repositoryName=repository.name,
|
||||
PaginationConfig={"PageSize": 1000},
|
||||
):
|
||||
for image in page["imageDetails"]:
|
||||
# The following condition is required since sometimes
|
||||
# the AWS ECR API returns None using the iterator
|
||||
if image is not None:
|
||||
severity_counts = None
|
||||
last_scan_status = None
|
||||
if "imageScanStatus" in image:
|
||||
last_scan_status = image["imageScanStatus"][
|
||||
"status"
|
||||
]
|
||||
|
||||
if "imageScanFindingsSummary" in image:
|
||||
severity_counts = FindingSeverityCounts(
|
||||
critical=0, high=0, medium=0
|
||||
)
|
||||
finding_severity_counts = image[
|
||||
"imageScanFindingsSummary"
|
||||
]["findingSeverityCounts"]
|
||||
if "CRITICAL" in finding_severity_counts:
|
||||
severity_counts.critical = finding_severity_counts[
|
||||
"CRITICAL"
|
||||
]
|
||||
if "HIGH" in finding_severity_counts:
|
||||
severity_counts.high = finding_severity_counts[
|
||||
"HIGH"
|
||||
]
|
||||
if "MEDIUM" in finding_severity_counts:
|
||||
severity_counts.medium = finding_severity_counts[
|
||||
"MEDIUM"
|
||||
]
|
||||
latest_tag = "None"
|
||||
if image.get("imageTags"):
|
||||
latest_tag = image["imageTags"][0]
|
||||
repository.images_details.append(
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
latest_digest=image["imageDigest"],
|
||||
scan_findings_status=last_scan_status,
|
||||
scan_findings_severity_count=severity_counts,
|
||||
)
|
||||
)
|
||||
if "imageScanFindingsSummary" in image:
|
||||
severity_counts = FindingSeverityCounts(
|
||||
critical=0, high=0, medium=0
|
||||
)
|
||||
finding_severity_counts = image[
|
||||
"imageScanFindingsSummary"
|
||||
]["findingSeverityCounts"]
|
||||
if "CRITICAL" in finding_severity_counts:
|
||||
severity_counts.critical = (
|
||||
finding_severity_counts["CRITICAL"]
|
||||
)
|
||||
if "HIGH" in finding_severity_counts:
|
||||
severity_counts.high = (
|
||||
finding_severity_counts["HIGH"]
|
||||
)
|
||||
if "MEDIUM" in finding_severity_counts:
|
||||
severity_counts.medium = (
|
||||
finding_severity_counts["MEDIUM"]
|
||||
)
|
||||
latest_tag = "None"
|
||||
if image.get("imageTags"):
|
||||
latest_tag = image["imageTags"][0]
|
||||
repository.images_details.append(
|
||||
ImageDetails(
|
||||
latest_tag=latest_tag,
|
||||
image_pushed_at=image["imagePushedAt"],
|
||||
latest_digest=image["imageDigest"],
|
||||
scan_findings_status=last_scan_status,
|
||||
scan_findings_severity_count=severity_counts,
|
||||
)
|
||||
)
|
||||
# Sort the repository images by date pushed
|
||||
repository.images_details.sort(
|
||||
key=lambda image: image.image_pushed_at
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"-- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __list_tags_for_resource__(self):
|
||||
def __list_tags_for_resource__(self, regional_client):
|
||||
logger.info("ECR - List Tags...")
|
||||
try:
|
||||
for repository in self.repositories:
|
||||
try:
|
||||
regional_client = self.regional_clients[repository.region]
|
||||
response = regional_client.list_tags_for_resource(
|
||||
resourceArn=repository.arn
|
||||
)["tags"]
|
||||
repository.tags = response
|
||||
if regional_client.region in self.registries:
|
||||
for repository in self.registries[regional_client.region].repositories:
|
||||
try:
|
||||
regional_client = self.regional_clients[repository.region]
|
||||
response = regional_client.list_tags_for_resource(
|
||||
resourceArn=repository.arn
|
||||
)["tags"]
|
||||
repository.tags = response
|
||||
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "RepositoryNotFoundException":
|
||||
logger.warning(
|
||||
f"{regional_client.region} --"
|
||||
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
|
||||
f" {error}"
|
||||
)
|
||||
continue
|
||||
except ClientError as error:
|
||||
if (
|
||||
error.response["Error"]["Code"]
|
||||
== "RepositoryNotFoundException"
|
||||
):
|
||||
logger.warning(
|
||||
f"{regional_client.region} --"
|
||||
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
|
||||
f" {error}"
|
||||
)
|
||||
continue
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -179,25 +212,34 @@ class ECR:
|
||||
def __get_registry_scanning_configuration__(self, regional_client):
|
||||
logger.info("ECR - Getting Registry Scanning Configuration...")
|
||||
try:
|
||||
response = regional_client.get_registry_scanning_configuration()
|
||||
rules = []
|
||||
for rule in response.get("scanningConfiguration").get("rules", []):
|
||||
rules.append(
|
||||
ScanningRule(
|
||||
scan_frequency=rule.get("scanFrequency"),
|
||||
scan_filters=rule.get("repositoryFilters"),
|
||||
if regional_client.region in self.registries:
|
||||
response = regional_client.get_registry_scanning_configuration()
|
||||
rules = []
|
||||
for rule in response.get("scanningConfiguration").get("rules", []):
|
||||
rules.append(
|
||||
ScanningRule(
|
||||
scan_frequency=rule.get("scanFrequency"),
|
||||
scan_filters=rule.get("repositoryFilters", []),
|
||||
)
|
||||
)
|
||||
|
||||
self.registries[regional_client.region].scan_type = response.get(
|
||||
"scanningConfiguration"
|
||||
).get("scanType", "BASIC")
|
||||
self.registries[regional_client.region].rules = rules
|
||||
except ClientError as error:
|
||||
if error.response["Error"][
|
||||
"Code"
|
||||
] == "ValidationException" and "GetRegistryScanningConfiguration operation: This feature is disabled" in str(
|
||||
error
|
||||
):
|
||||
self.registries[regional_client.region].scan_type = "BASIC"
|
||||
self.registries[regional_client.region].rules = []
|
||||
else:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
self.registries.append(
|
||||
Registry(
|
||||
id=response.get("registryId", ""),
|
||||
scan_type=response.get("scanningConfiguration").get(
|
||||
"scanType", "BASIC"
|
||||
),
|
||||
region=regional_client.region,
|
||||
rules=rules,
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -213,6 +255,7 @@ class FindingSeverityCounts(BaseModel):
|
||||
class ImageDetails(BaseModel):
|
||||
latest_tag: str
|
||||
latest_digest: str
|
||||
image_pushed_at: datetime
|
||||
scan_findings_status: Optional[str]
|
||||
scan_findings_severity_count: Optional[FindingSeverityCounts]
|
||||
|
||||
@@ -221,10 +264,11 @@ class Repository(BaseModel):
|
||||
name: str
|
||||
arn: str
|
||||
region: str
|
||||
registry_id = str
|
||||
scan_on_push: bool
|
||||
policy: Optional[dict]
|
||||
images_details: Optional[list[ImageDetails]]
|
||||
lyfecicle_policy: Optional[str]
|
||||
lifecycle_policy: Optional[str]
|
||||
tags: Optional[list] = []
|
||||
|
||||
|
||||
@@ -236,6 +280,6 @@ class ScanningRule(BaseModel):
|
||||
class Registry(BaseModel):
|
||||
id: str
|
||||
region: str
|
||||
scan_type: str
|
||||
rules: list[ScanningRule]
|
||||
tags: Optional[list] = []
|
||||
repositories: list[Repository]
|
||||
scan_type: Optional[str]
|
||||
rules: Optional[list[ScanningRule]]
|
||||
|
||||
@@ -2,6 +2,7 @@ import threading
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from botocore.client import ClientError
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
@@ -71,10 +72,19 @@ class EMR:
|
||||
try:
|
||||
for cluster in self.clusters.values():
|
||||
if cluster.region == regional_client.region:
|
||||
describe_cluster_parameters = {"ClusterId": cluster.id}
|
||||
cluster_info = regional_client.describe_cluster(
|
||||
**describe_cluster_parameters
|
||||
)
|
||||
try:
|
||||
describe_cluster_parameters = {"ClusterId": cluster.id}
|
||||
cluster_info = regional_client.describe_cluster(
|
||||
**describe_cluster_parameters
|
||||
)
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "InvalidRequestException":
|
||||
logger.warning(
|
||||
f"{regional_client.region} --"
|
||||
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
|
||||
f" {error}"
|
||||
)
|
||||
continue
|
||||
|
||||
# Master Node Security Groups
|
||||
master_node_security_group = cluster_info["Cluster"][
|
||||
|
||||
0
prowler/providers/aws/services/fms/__init__.py
Normal file
4
prowler/providers/aws/services/fms/fms_client.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
|
||||
from prowler.providers.aws.services.fms.fms_service import FMS
|
||||
|
||||
fms_client = FMS(current_audit_info)
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "fms_policy_compliant",
|
||||
"CheckTitle": "Ensure that all FMS policies inside an admin account are compliant",
|
||||
"CheckType": [],
|
||||
"ServiceName": "fms",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:aws:fms:region:account-id:policy/policy",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Other",
|
||||
"Description": "This check ensures all FMS policies inside an admin account are compliant",
|
||||
"Risk": "If FMS policies are not compliant, means there are resources unprotected by the policies",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/waf/latest/developerguide/getting-started-fms-intro.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws fms list-policies",
|
||||
"NativeIaC": "",
|
||||
"Other": "",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Ensure FMS is enabled and all the policies are compliant across your AWS accounts",
|
||||
"Url": ""
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.fms.fms_client import fms_client
|
||||
|
||||
|
||||
class fms_policy_compliant(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
if fms_client.fms_admin_account:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.resource_id = "FMS"
|
||||
report.resource_arn = ""
|
||||
report.region = fms_client.region
|
||||
report.status = "PASS"
|
||||
report.status_extended = "FMS enabled with all compliant accounts"
|
||||
non_compliant_policy = False
|
||||
for policy in fms_client.fms_policies:
|
||||
for policy_to_account in policy.compliance_status:
|
||||
if policy_to_account.status == "NON_COMPLIANT":
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"FMS with non-compliant policy {policy.name} for account {policy_to_account.account_id}"
|
||||
report.resource_id = policy.id
|
||||
report.resource_arn = policy.arn
|
||||
non_compliant_policy = True
|
||||
break
|
||||
if non_compliant_policy:
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
108
prowler/providers/aws/services/fms/fms_service.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from botocore.client import ClientError
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.aws_provider import generate_regional_clients
|
||||
|
||||
|
||||
################## FMS
|
||||
class FMS:
|
||||
def __init__(self, audit_info):
|
||||
self.service = "fms"
|
||||
self.session = audit_info.audit_session
|
||||
self.audited_account = audit_info.audited_account
|
||||
self.audited_partition = audit_info.audited_partition
|
||||
self.audit_resources = audit_info.audit_resources
|
||||
global_client = generate_regional_clients(
|
||||
self.service, audit_info, global_service=True
|
||||
)
|
||||
self.client = list(global_client.values())[0]
|
||||
self.region = self.client.region
|
||||
self.fms_admin_account = True
|
||||
self.fms_policies = []
|
||||
self.__list_policies__()
|
||||
self.__list_compliance_status__()
|
||||
|
||||
def __list_policies__(self):
|
||||
logger.info("FMS - Listing Policies...")
|
||||
try:
|
||||
list_policies_paginator = self.client.get_paginator("list_policies")
|
||||
try:
|
||||
for page in list_policies_paginator.paginate():
|
||||
for fms_policy in page["PolicyList"]:
|
||||
if not self.audit_resources or (
|
||||
is_resource_filtered(
|
||||
fms_policy["PolicyArn"], self.audit_resources
|
||||
)
|
||||
):
|
||||
self.fms_policies.append(
|
||||
Policy(
|
||||
arn=fms_policy.get("PolicyArn"),
|
||||
id=fms_policy.get("PolicyId"),
|
||||
name=fms_policy.get("PolicyName"),
|
||||
resource_type=fms_policy.get("ResourceType"),
|
||||
service_type=fms_policy.get("SecurityServiceType"),
|
||||
remediation_enabled=fms_policy.get(
|
||||
"RemediationEnabled"
|
||||
),
|
||||
delete_unused_managed_resources=fms_policy.get(
|
||||
"DeleteUnusedFMManagedResources"
|
||||
),
|
||||
)
|
||||
)
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "AccessDeniedException":
|
||||
if (
|
||||
"No default admin could be found for account"
|
||||
in error.response["Error"]["Message"]
|
||||
):
|
||||
# FMS is not enabled in this account
|
||||
self.fms_admin_account = False
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
|
||||
)
|
||||
|
||||
def __list_compliance_status__(self):
|
||||
logger.info("FMS - Listing Policies...")
|
||||
try:
|
||||
for fms_policy in self.fms_policies:
|
||||
list_compliance_status_paginator = self.client.get_paginator(
|
||||
"list_compliance_status"
|
||||
)
|
||||
for page in list_compliance_status_paginator.paginate(
|
||||
PolicyId=fms_policy.id
|
||||
):
|
||||
for fms_compliance_status in page["PolicyComplianceStatusList"]:
|
||||
fms_policy.compliance_status.append(
|
||||
PolicyAccountComplianceStatus(
|
||||
account_id=fms_compliance_status.get("MemberAccount"),
|
||||
policy_id=fms_compliance_status.get("PolicyId"),
|
||||
status=fms_compliance_status.get("EvaluationResults")[
|
||||
0
|
||||
].get("ComplianceStatus"),
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
|
||||
)
|
||||
|
||||
|
||||
class PolicyAccountComplianceStatus(BaseModel):
|
||||
account_id: str
|
||||
policy_id: str
|
||||
status: str
|
||||
|
||||
|
||||
class Policy(BaseModel):
|
||||
arn: str
|
||||
id: str
|
||||
name: str
|
||||
resource_type: str
|
||||
service_type: str
|
||||
remediation_enabled: bool
|
||||
delete_unused_managed_resources: bool
|
||||
compliance_status: list[PolicyAccountComplianceStatus] = []
|
||||
@@ -18,19 +18,19 @@ class iam_policy_no_full_access_to_cloudtrail(Check):
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Custom Policy {policy.name} does not allow '{critical_service}:*' privileges"
|
||||
if policy.document:
|
||||
# Check the statements, if one includes critical_service:* stop iterating over the rest
|
||||
if type(policy.document.get("Statement")) != list:
|
||||
policy_statements = [policy.document.get("Statement")]
|
||||
if type(policy.document["Statement"]) != list:
|
||||
policy_statements = [policy.document["Statement"]]
|
||||
else:
|
||||
policy_statements = policy.document.get("Statement")
|
||||
policy_statements = policy.document["Statement"]
|
||||
# Check the statements, if one includes kms:* stop iterating over the rest
|
||||
for statement in policy_statements:
|
||||
# Check policies with "Effect": "Allow" with "Action": "*" over "Resource": "*".
|
||||
if (
|
||||
statement.get("Effect") == "Allow"
|
||||
and critical_service + ":*" in statement.get("Action")
|
||||
statement["Effect"] == "Allow"
|
||||
and "Action" in statement
|
||||
and critical_service + ":*" in statement["Action"]
|
||||
and (
|
||||
statement.get("Resource") == "*"
|
||||
or statement.get("Resource") == ["*"]
|
||||
statement["Resource"] == "*"
|
||||
or statement["Resource"] == ["*"]
|
||||
)
|
||||
):
|
||||
report.status = "FAIL"
|
||||
|
||||
@@ -18,23 +18,24 @@ class iam_policy_no_full_access_to_kms(Check):
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Custom Policy {policy.name} does not allow '{critical_service}:*' privileges"
|
||||
if policy.document:
|
||||
# Check the statements, if one includes critical_service:* stop iterating over the rest
|
||||
if type(policy.document.get("Statement")) != list:
|
||||
policy_statements = [policy.document.get("Statement")]
|
||||
if type(policy.document["Statement"]) != list:
|
||||
policy_statements = [policy.document["Statement"]]
|
||||
else:
|
||||
policy_statements = policy.document.get("Statement")
|
||||
policy_statements = policy.document["Statement"]
|
||||
# Check the statements, if one includes kms:* stop iterating over the rest
|
||||
for statement in policy_statements:
|
||||
# Check policies with "Effect": "Allow" with "Action": "*" over "Resource": "*".
|
||||
if (
|
||||
statement.get("Effect") == "Allow"
|
||||
and critical_service + ":*" in statement.get("Action")
|
||||
statement["Effect"] == "Allow"
|
||||
and "Action" in statement
|
||||
and critical_service + ":*" in statement["Action"]
|
||||
and (
|
||||
statement.get("Resource") == "*"
|
||||
or statement.get("Resource") == ["*"]
|
||||
statement["Resource"] == "*"
|
||||
or statement["Resource"] == ["*"]
|
||||
)
|
||||
):
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Custom Policy {policy.name} allows '{critical_service}:*' privileges"
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
return findings
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "iam_role_cross_account_readonlyaccess_policy",
|
||||
"CheckTitle": "Ensure IAM Roles do not have ReadOnlyAccess access for external AWS accounts",
|
||||
"CheckType": [],
|
||||
"ServiceName": "iam",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AwsIamRole",
|
||||
"Description": "Ensure IAM Roles do not have ReadOnlyAccess access for external AWS accounts",
|
||||
"Risk": "The AWS-managed ReadOnlyAccess policy is highly potent and exposes the customer to a significant data leakage threat. It should be granted very conservatively. For granting access to 3rd party vendors, consider using alternative managed policies, such as ViewOnlyAccess or SecurityAudit.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html#awsmp_readonlyaccess",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Remove the AWS-managed ReadOnlyAccess policy from all roles that have a trust policy, including third-party cloud accounts, or remove third-party cloud accounts from the trust policy of all roles that need the ReadOnlyAccess policy.",
|
||||
"Url": "https://docs.securestate.vmware.com/rule-docs/aws-iam-role-cross-account-readonlyaccess-policy"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"trustboundaries"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": "CAF Security Epic: IAM"
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.iam.iam_client import iam_client
|
||||
|
||||
|
||||
class iam_role_cross_account_readonlyaccess_policy(Check):
|
||||
def execute(self) -> Check_Report_AWS:
|
||||
findings = []
|
||||
for role in iam_client.roles:
|
||||
if (
|
||||
not role.is_service_role
|
||||
): # Avoid service roles since they cannot be modified by the user
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = iam_client.region
|
||||
report.resource_arn = role.arn
|
||||
report.resource_id = role.name
|
||||
report.resource_tags = role.tags
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"IAM Role {role.name} has not ReadOnlyAccess policy"
|
||||
)
|
||||
for policy in role.attached_policies:
|
||||
if policy["PolicyName"] == "ReadOnlyAccess":
|
||||
report.status_extended = f"IAM Role {role.name} has read-only access but is not cross account"
|
||||
cross_account_access = False
|
||||
if type(role.assume_role_policy["Statement"]) == list:
|
||||
for statement in role.assume_role_policy["Statement"]:
|
||||
if not cross_account_access:
|
||||
if (
|
||||
statement["Effect"] == "Allow"
|
||||
and "AWS" in statement["Principal"]
|
||||
):
|
||||
if type(statement["Principal"]["AWS"]) == list:
|
||||
for aws_account in statement["Principal"][
|
||||
"AWS"
|
||||
]:
|
||||
if (
|
||||
iam_client.account
|
||||
not in aws_account
|
||||
or "*" == aws_account
|
||||
):
|
||||
cross_account_access = True
|
||||
break
|
||||
else:
|
||||
if (
|
||||
iam_client.account
|
||||
not in statement["Principal"]["AWS"]
|
||||
or "*" == statement["Principal"]["AWS"]
|
||||
):
|
||||
cross_account_access = True
|
||||
else:
|
||||
break
|
||||
else:
|
||||
statement = role.assume_role_policy["Statement"]
|
||||
if (
|
||||
statement["Effect"] == "Allow"
|
||||
and "AWS" in statement["Principal"]
|
||||
):
|
||||
if type(statement["Principal"]["AWS"]) == list:
|
||||
for aws_account in statement["Principal"]["AWS"]:
|
||||
if (
|
||||
iam_client.account not in aws_account
|
||||
or "*" == aws_account
|
||||
):
|
||||
cross_account_access = True
|
||||
break
|
||||
else:
|
||||
if (
|
||||
iam_client.account
|
||||
not in statement["Principal"]["AWS"]
|
||||
or "*" == statement["Principal"]["AWS"]
|
||||
):
|
||||
cross_account_access = True
|
||||
if cross_account_access:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"IAM Role {role.name} gives cross account read-only access!"
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -7,7 +7,7 @@
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AwsIamPolicy",
|
||||
"ResourceType": "AwsIamRole",
|
||||
"Description": "Ensure IAM Service Roles prevents against a cross-service confused deputy attack",
|
||||
"Risk": "Allow attackers to gain unauthorized access to resources",
|
||||
"RelatedUrl": "",
|
||||
|
||||
@@ -11,19 +11,38 @@ from prowler.providers.aws.aws_provider import generate_regional_clients
|
||||
|
||||
|
||||
def is_service_role(role):
|
||||
if "Statement" in role["AssumeRolePolicyDocument"]:
|
||||
for statement in role["AssumeRolePolicyDocument"]["Statement"]:
|
||||
if (
|
||||
statement["Effect"] == "Allow"
|
||||
and (
|
||||
"sts:AssumeRole" in statement["Action"]
|
||||
or "sts:*" in statement["Action"]
|
||||
or "*" in statement["Action"]
|
||||
)
|
||||
# This is what defines a service role
|
||||
and "Service" in statement["Principal"]
|
||||
):
|
||||
return True
|
||||
try:
|
||||
if "Statement" in role["AssumeRolePolicyDocument"]:
|
||||
if type(role["AssumeRolePolicyDocument"]["Statement"]) == list:
|
||||
for statement in role["AssumeRolePolicyDocument"]["Statement"]:
|
||||
if (
|
||||
statement["Effect"] == "Allow"
|
||||
and (
|
||||
"sts:AssumeRole" in statement["Action"]
|
||||
or "sts:*" in statement["Action"]
|
||||
or "*" in statement["Action"]
|
||||
)
|
||||
# This is what defines a service role
|
||||
and "Service" in statement["Principal"]
|
||||
):
|
||||
return True
|
||||
else:
|
||||
statement = role["AssumeRolePolicyDocument"]["Statement"]
|
||||
if (
|
||||
statement["Effect"] == "Allow"
|
||||
and (
|
||||
"sts:AssumeRole" in statement["Action"]
|
||||
or "sts:*" in statement["Action"]
|
||||
or "*" in statement["Action"]
|
||||
)
|
||||
# This is what defines a service role
|
||||
and "Service" in statement["Principal"]
|
||||
):
|
||||
return True
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
@@ -50,6 +69,7 @@ class IAM:
|
||||
self.__get_group_users__()
|
||||
self.__list_attached_group_policies__()
|
||||
self.__list_attached_user_policies__()
|
||||
self.__list_attached_role_policies__()
|
||||
self.__list_inline_user_policies__()
|
||||
self.__list_mfa_devices__()
|
||||
self.password_policy = self.__get_password_policy__()
|
||||
@@ -119,7 +139,7 @@ class IAM:
|
||||
credential_list = list(csv_reader)
|
||||
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] != "LimitExceededException":
|
||||
if error.response["Error"]["Code"] == "LimitExceededException":
|
||||
logger.warning(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
@@ -338,6 +358,27 @@ class IAM:
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __list_attached_role_policies__(self):
|
||||
logger.info("IAM - List Attached User Policies...")
|
||||
try:
|
||||
for role in self.roles:
|
||||
attached_role_policies = []
|
||||
list_attached_role_policies_paginator = self.client.get_paginator(
|
||||
"list_attached_role_policies"
|
||||
)
|
||||
for page in list_attached_role_policies_paginator.paginate(
|
||||
RoleName=role.name
|
||||
):
|
||||
for policy in page["AttachedPolicies"]:
|
||||
attached_role_policies.append(policy)
|
||||
|
||||
role.attached_policies = attached_role_policies
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __list_inline_user_policies__(self):
|
||||
logger.info("IAM - List Inline User Policies...")
|
||||
try:
|
||||
@@ -457,24 +498,43 @@ class IAM:
|
||||
logger.info("IAM - List Tags...")
|
||||
try:
|
||||
for role in self.roles:
|
||||
response = self.client.list_role_tags(RoleName=role.name)["Tags"]
|
||||
role.tags = response
|
||||
try:
|
||||
response = self.client.list_role_tags(RoleName=role.name)["Tags"]
|
||||
role.tags = response
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntityException":
|
||||
role.tags = []
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
try:
|
||||
for user in self.users:
|
||||
response = self.client.list_user_tags(UserName=user.name)["Tags"]
|
||||
user.tags = response
|
||||
try:
|
||||
response = self.client.list_user_tags(UserName=user.name)["Tags"]
|
||||
user.tags = response
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntityException":
|
||||
user.tags = []
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
try:
|
||||
for policy in self.policies:
|
||||
response = self.client.list_policy_tags(PolicyArn=policy.arn)["Tags"]
|
||||
policy.tags = response
|
||||
try:
|
||||
response = self.client.list_policy_tags(PolicyArn=policy.arn)[
|
||||
"Tags"
|
||||
]
|
||||
policy.tags = response
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] == "NoSuchEntityException":
|
||||
policy.tags = []
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
@@ -501,6 +561,7 @@ class Role(BaseModel):
|
||||
arn: str
|
||||
assume_role_policy: dict
|
||||
is_service_role: bool
|
||||
attached_policies: list[dict] = []
|
||||
tags: Optional[list] = []
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
|
||||
from prowler.providers.aws.services.networkfirewall.networkfirewall_service import (
|
||||
NetworkFirewall,
|
||||
)
|
||||
|
||||
networkfirewall_client = NetworkFirewall(current_audit_info)
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "networkfirewall_in_all_vpc",
|
||||
"CheckTitle": "Ensure all VPCs have Network Firewall enabled",
|
||||
"CheckType": [],
|
||||
"ServiceName": "network-firewall",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:network-firewall::account-id:firewall/firewall-name",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Other",
|
||||
"Description": "Ensure all VPCs have Network Firewall enabled",
|
||||
"Risk": "Without a network firewall, it can be difficult to monitor and control traffic within the VPC. This can make it harder to detect and prevent attacks or unauthorized access to resources.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/network-firewall/latest/developerguide/setting-up.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws network-firewall create-firewall --firewall-name <value> --vpc-id <value>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/NetworkFirewall/network-firewall-in-use.html",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Ensure all VPCs have Network Firewall enabled",
|
||||
"Url": "https://docs.aws.amazon.com/network-firewall/latest/developerguide/vpc-config.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.networkfirewall.networkfirewall_client import (
|
||||
networkfirewall_client,
|
||||
)
|
||||
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
|
||||
|
||||
|
||||
class networkfirewall_in_all_vpc(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for vpc in vpc_client.vpcs.values():
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = vpc.region
|
||||
report.resource_id = vpc.id
|
||||
report.resource_arn = ""
|
||||
report.resource_tags = vpc.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"VPC {vpc.id} does not have Network Firewall enabled."
|
||||
)
|
||||
for firewall in networkfirewall_client.network_firewalls:
|
||||
if firewall.vpc_id == vpc.id:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"VPC {vpc.id} has Network Firewall enabled."
|
||||
)
|
||||
break
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -0,0 +1,94 @@
|
||||
import threading
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from prowler.lib.logger import logger
|
||||
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.aws_provider import generate_regional_clients
|
||||
|
||||
|
||||
################## NetworkFirewall
|
||||
class NetworkFirewall:
|
||||
def __init__(self, audit_info):
|
||||
self.service = "network-firewall"
|
||||
self.session = audit_info.audit_session
|
||||
self.audited_account = audit_info.audited_account
|
||||
self.audited_partition = audit_info.audited_partition
|
||||
self.audit_resources = audit_info.audit_resources
|
||||
self.regional_clients = generate_regional_clients(self.service, audit_info)
|
||||
# If the region is not set in the audit profile,
|
||||
# we pick the first region from the regional clients list
|
||||
self.region = (
|
||||
audit_info.profile_region
|
||||
if audit_info.profile_region
|
||||
else list(self.regional_clients.keys())[0]
|
||||
)
|
||||
self.network_firewalls = []
|
||||
self.__threading_call__(self.__list_firewalls__)
|
||||
self.__describe_firewall__()
|
||||
|
||||
def __get_session__(self):
|
||||
return self.session
|
||||
|
||||
def __threading_call__(self, call):
|
||||
threads = []
|
||||
for regional_client in self.regional_clients.values():
|
||||
threads.append(threading.Thread(target=call, args=(regional_client,)))
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
def __list_firewalls__(self, regional_client):
|
||||
logger.info("Network Firewall - Listing Network Firewalls...")
|
||||
try:
|
||||
list_network_firewalls_paginator = regional_client.get_paginator(
|
||||
"list_firewalls"
|
||||
)
|
||||
for page in list_network_firewalls_paginator.paginate():
|
||||
for network_firewall in page["Firewalls"]:
|
||||
if not self.audit_resources or (
|
||||
is_resource_filtered(
|
||||
network_firewall["FirewallArn"], self.audit_resources
|
||||
)
|
||||
):
|
||||
self.network_firewalls.append(
|
||||
Firewall(
|
||||
arn=network_firewall.get("FirewallArn"),
|
||||
region=regional_client.region,
|
||||
name=network_firewall.get("FirewallName"),
|
||||
)
|
||||
)
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_firewall__(self):
|
||||
logger.info("Network Firewall - Describe Network Firewalls...")
|
||||
try:
|
||||
for network_firewall in self.network_firewalls:
|
||||
regional_client = self.regional_clients[network_firewall.region]
|
||||
describe_firewall = regional_client.describe_firewall(
|
||||
FirewallArn=network_firewall.arn
|
||||
)["Firewall"]
|
||||
network_firewall.policy_arn = describe_firewall.get("FirewallPolicyArn")
|
||||
network_firewall.vpc_id = describe_firewall.get("VpcId")
|
||||
network_firewall.tags = describe_firewall.get("Tags")
|
||||
network_firewall.encryption_type = describe_firewall.get(
|
||||
"EncryptionConfiguration"
|
||||
).get("Type")
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
|
||||
)
|
||||
|
||||
|
||||
class Firewall(BaseModel):
|
||||
arn: str
|
||||
name: str
|
||||
region: str
|
||||
policy_arn: str = None
|
||||
vpc_id: str = None
|
||||
tags: list = []
|
||||
encryption_type: str = None
|
||||
@@ -1,5 +1,5 @@
|
||||
import threading
|
||||
from json import loads
|
||||
from json import JSONDecodeError, loads
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
@@ -79,9 +79,16 @@ class OpenSearchService:
|
||||
]["Options"][logging_key]["Enabled"],
|
||||
)
|
||||
)
|
||||
domain.access_policy = loads(
|
||||
describe_domain["DomainConfig"]["AccessPolicies"]["Options"]
|
||||
)
|
||||
try:
|
||||
domain.access_policy = loads(
|
||||
describe_domain["DomainConfig"]["AccessPolicies"]["Options"]
|
||||
)
|
||||
except JSONDecodeError as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
continue
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
|
||||
@@ -29,6 +29,10 @@ class organizations_scp_check_deny_regions(Check):
|
||||
is_region_restricted_statement = False
|
||||
|
||||
for policy in org.policies:
|
||||
# We only check SCP policies here
|
||||
if policy.type != "SERVICE_CONTROL_POLICY":
|
||||
continue
|
||||
|
||||
# Statements are not always list
|
||||
statements = policy.content.get("Statement")
|
||||
if type(policy.content["Statement"]) is not list:
|
||||
|
||||
@@ -8,6 +8,13 @@ from prowler.lib.logger import logger
|
||||
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.aws_provider import generate_regional_clients
|
||||
|
||||
available_organizations_policies = [
|
||||
"SERVICE_CONTROL_POLICY",
|
||||
"TAG_POLICY",
|
||||
"BACKUP_POLICY",
|
||||
"AISERVICES_OPT_OUT_POLICY",
|
||||
]
|
||||
|
||||
|
||||
################## Organizations
|
||||
class Organizations:
|
||||
@@ -36,13 +43,8 @@ class Organizations:
|
||||
organization_arn = organization_desc.get("Arn")
|
||||
organization_id = organization_desc.get("Id")
|
||||
organization_master_id = organization_desc.get("MasterAccountId")
|
||||
organization_available_policy_types = organization_desc.get(
|
||||
"AvailablePolicyTypes"
|
||||
)
|
||||
# Fetch policies for organization:
|
||||
organization_policies = self.__list_policies__(
|
||||
organization_available_policy_types
|
||||
)
|
||||
organization_policies = self.__list_policies__()
|
||||
# Fetch delegated administrators for organization:
|
||||
organization_delegated_administrator = (
|
||||
self.__list_delegated_administrators__()
|
||||
@@ -95,19 +97,17 @@ class Organizations:
|
||||
)
|
||||
|
||||
# I'm using list_policies instead of list_policies_for_target, because the last one only returns "Attached directly" policies but not "Inherited from..." policies.
|
||||
def __list_policies__(self, enabled_policy_types):
|
||||
def __list_policies__(self):
|
||||
logger.info("Organizations - List policies...")
|
||||
|
||||
try:
|
||||
list_policies_paginator = self.client.get_paginator("list_policies")
|
||||
for policy_type in enabled_policy_types:
|
||||
for policy_type in available_organizations_policies:
|
||||
logger.info(
|
||||
"Organizations - List policies... - Type: %s",
|
||||
policy_type.get("Type"),
|
||||
policy_type,
|
||||
)
|
||||
for page in list_policies_paginator.paginate(
|
||||
Filter=policy_type.get("Type")
|
||||
):
|
||||
for page in list_policies_paginator.paginate(Filter=policy_type):
|
||||
for policy in page["Policies"]:
|
||||
policy_content = self.__describe_policy__(policy.get("Id"))
|
||||
policy_targets = self.__list_targets_for_policy__(
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "organizations_tags_policies_enabled_and_attached",
|
||||
"CheckTitle": "Check if an AWS Organization has tags policies enabled and attached.",
|
||||
"CheckType": [],
|
||||
"ServiceName": "organizations",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:partition:service::account-id:organization/organization-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Other",
|
||||
"Description": "Check if an AWS Organization has tags policies enabled and attached.",
|
||||
"Risk": "If an AWS Organization tags policies are not enabled and attached, it is not possible to enforce tags on AWS resources.",
|
||||
"RelatedUrl": "",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Enable and attach AWS Organizations tags policies.",
|
||||
"Url": "https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.organizations.organizations_client import (
|
||||
organizations_client,
|
||||
)
|
||||
|
||||
|
||||
class organizations_tags_policies_enabled_and_attached(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
|
||||
for org in organizations_client.organizations:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.resource_id = org.id
|
||||
report.resource_arn = org.arn
|
||||
report.region = organizations_client.region
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
"AWS Organizations is not in-use for this AWS Account"
|
||||
)
|
||||
if org.status == "ACTIVE":
|
||||
if org.policies is None:
|
||||
# Access Denied to list_policies
|
||||
continue
|
||||
for policy in org.policies:
|
||||
# We only check SCP policies here
|
||||
if policy.type != "TAG_POLICY":
|
||||
continue
|
||||
|
||||
report.status_extended = f"AWS Organization {org.id} has tag policies enabled but not attached"
|
||||
|
||||
if policy.targets:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"AWS Organization {org.id} has tag policies enabled and attached to an AWS account"
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -10,16 +10,23 @@ class rds_instance_deletion_protection(Check):
|
||||
report.region = db_instance.region
|
||||
report.resource_id = db_instance.id
|
||||
report.resource_tags = db_instance.tags
|
||||
if db_instance.deletion_protection:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"RDS Instance {db_instance.id} deletion protection is enabled."
|
||||
)
|
||||
# Check if is member of a cluster
|
||||
if db_instance.cluster_id:
|
||||
if rds_client.db_clusters[db_instance.cluster_id].deletion_protection:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"RDS Instance {db_instance.id} deletion protection is enabled at cluster {db_instance.cluster_id} level."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"RDS Instance {db_instance.id} deletion protection is not enabled at cluster {db_instance.cluster_id} level."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"RDS Instance {db_instance.id} deletion protection is not enabled."
|
||||
)
|
||||
if db_instance.deletion_protection:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"RDS Instance {db_instance.id} deletion protection is enabled."
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"RDS Instance {db_instance.id} deletion protection is not enabled."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "rds_instance_deprecated_engine_version",
|
||||
"CheckTitle": "Check if RDS instance is using a supported engine version",
|
||||
"CheckType": [],
|
||||
"ServiceName": "rds",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "arn:aws:rds:region:account-id:db-instance",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsRdsDbInstance",
|
||||
"Description": "Check if RDS is using a supported engine version for MariaDB, MySQL and PostgreSQL",
|
||||
"Risk": "If not enabled RDS instances may be vulnerable to security issues",
|
||||
"RelatedUrl": "",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws rds describe-db-engine-versions --engine <my_engine>'",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-engine-versions.html",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "",
|
||||
"Url": ""
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.rds.rds_client import rds_client
|
||||
|
||||
|
||||
class rds_instance_deprecated_engine_version(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
|
||||
for instance in rds_client.db_instances:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = instance.region
|
||||
report.status = "FAIL"
|
||||
report.resource_id = instance.id
|
||||
report.resource_tags = instance.tags
|
||||
report.status_extended = f"RDS instance {instance.id} is using a deprecated engine {instance.engine} with version {instance.engine_version}."
|
||||
|
||||
if (
|
||||
instance.engine_version
|
||||
in rds_client.db_engines[instance.region][
|
||||
instance.engine
|
||||
].engine_versions
|
||||
):
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"RDS instance {instance.id} is not using a deprecated engine {instance.engine} with version {instance.engine_version}."
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
@@ -10,16 +10,25 @@ class rds_instance_multi_az(Check):
|
||||
report.region = db_instance.region
|
||||
report.resource_id = db_instance.id
|
||||
report.resource_tags = db_instance.tags
|
||||
if db_instance.multi_az:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"RDS Instance {db_instance.id} has multi-AZ enabled."
|
||||
)
|
||||
# Check if is member of a cluster
|
||||
if db_instance.cluster_id:
|
||||
if rds_client.db_clusters[db_instance.cluster_id].multi_az:
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"RDS Instance {db_instance.id} has multi-AZ enabled at cluster {db_instance.cluster_id} level."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"RDS Instance {db_instance.id} does not have multi-AZ enabled at cluster {db_instance.cluster_id} level."
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"RDS Instance {db_instance.id} does not have multi-AZ enabled."
|
||||
)
|
||||
if db_instance.multi_az:
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
f"RDS Instance {db_instance.id} has multi-AZ enabled."
|
||||
)
|
||||
else:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"RDS Instance {db_instance.id} does not have multi-AZ enabled."
|
||||
)
|
||||
|
||||
findings.append(report)
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ class rds_instance_transport_encrypted(Check):
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = db_instance.region
|
||||
report.resource_id = db_instance.id
|
||||
report.resource_tags = db_instance.tags
|
||||
report.status = "FAIL"
|
||||
report.status_extended = (
|
||||
f"RDS Instance {db_instance.id} connections are not encrypted."
|
||||
|
||||
@@ -18,14 +18,18 @@ class RDS:
|
||||
self.audit_resources = audit_info.audit_resources
|
||||
self.regional_clients = generate_regional_clients(self.service, audit_info)
|
||||
self.db_instances = []
|
||||
self.db_clusters = {}
|
||||
self.db_snapshots = []
|
||||
self.db_engines = {}
|
||||
self.db_cluster_snapshots = []
|
||||
self.__threading_call__(self.__describe_db_instances__)
|
||||
self.__threading_call__(self.__describe_db_parameters__)
|
||||
self.__threading_call__(self.__describe_db_snapshots__)
|
||||
self.__threading_call__(self.__describe_db_snapshot_attributes__)
|
||||
self.__threading_call__(self.__describe_db_clusters__)
|
||||
self.__threading_call__(self.__describe_db_cluster_snapshots__)
|
||||
self.__threading_call__(self.__describe_db_cluster_snapshot_attributes__)
|
||||
self.__threading_call__(self.__describe_db_engine_versions__)
|
||||
|
||||
def __get_session__(self):
|
||||
return self.session
|
||||
@@ -58,6 +62,7 @@ class RDS:
|
||||
id=instance["DBInstanceIdentifier"],
|
||||
endpoint=instance.get("Endpoint"),
|
||||
engine=instance["Engine"],
|
||||
engine_version=instance["EngineVersion"],
|
||||
status=instance["DBInstanceStatus"],
|
||||
public=instance["PubliclyAccessible"],
|
||||
encrypted=instance["StorageEncrypted"],
|
||||
@@ -79,6 +84,7 @@ class RDS:
|
||||
for item in instance["DBParameterGroups"]
|
||||
],
|
||||
multi_az=instance["MultiAZ"],
|
||||
cluster_id=instance.get("DBClusterIdentifier"),
|
||||
region=regional_client.region,
|
||||
tags=instance.get("TagList"),
|
||||
)
|
||||
@@ -147,7 +153,7 @@ class RDS:
|
||||
if "all" in att["AttributeValues"]:
|
||||
snapshot.public = True
|
||||
except ClientError as error:
|
||||
if error.response["Error"]["Code"] != "DBSnapshotNotFound":
|
||||
if error.response["Error"]["Code"] == "DBSnapshotNotFound":
|
||||
logger.warning(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
@@ -157,6 +163,50 @@ class RDS:
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_db_clusters__(self, regional_client):
|
||||
logger.info("RDS - Describe Clusters...")
|
||||
try:
|
||||
describe_db_clusters_paginator = regional_client.get_paginator(
|
||||
"describe_db_clusters"
|
||||
)
|
||||
for page in describe_db_clusters_paginator.paginate():
|
||||
for cluster in page["DBClusters"]:
|
||||
if not self.audit_resources or (
|
||||
is_resource_filtered(
|
||||
cluster["DBClusterIdentifier"], self.audit_resources
|
||||
)
|
||||
):
|
||||
if cluster["Engine"] != "docdb":
|
||||
db_cluster = DBCluster(
|
||||
id=cluster["DBClusterIdentifier"],
|
||||
endpoint=cluster.get("Endpoint"),
|
||||
engine=cluster["Engine"],
|
||||
status=cluster["Status"],
|
||||
public=cluster.get("PubliclyAccessible", False),
|
||||
encrypted=cluster["StorageEncrypted"],
|
||||
auto_minor_version_upgrade=cluster.get(
|
||||
"AutoMinorVersionUpgrade", False
|
||||
),
|
||||
backup_retention_period=cluster.get(
|
||||
"BackupRetentionPeriod"
|
||||
),
|
||||
cloudwatch_logs=cluster.get(
|
||||
"EnabledCloudwatchLogsExports"
|
||||
),
|
||||
deletion_protection=cluster["DeletionProtection"],
|
||||
parameter_group=cluster["DBClusterParameterGroup"],
|
||||
multi_az=cluster["MultiAZ"],
|
||||
region=regional_client.region,
|
||||
tags=cluster.get("TagList"),
|
||||
)
|
||||
self.db_clusters[
|
||||
cluster["DBClusterIdentifier"]
|
||||
] = db_cluster
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_db_cluster_snapshots__(self, regional_client):
|
||||
logger.info("RDS - Describe Cluster Snapshots...")
|
||||
try:
|
||||
@@ -202,11 +252,42 @@ class RDS:
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
def __describe_db_engine_versions__(self, regional_client):
|
||||
logger.info("RDS - Describe Engine Versions...")
|
||||
try:
|
||||
describe_db_engine_versions_paginator = regional_client.get_paginator(
|
||||
"describe_db_engine_versions"
|
||||
)
|
||||
for page in describe_db_engine_versions_paginator.paginate():
|
||||
for engine in page["DBEngineVersions"]:
|
||||
if regional_client.region not in self.db_engines:
|
||||
self.db_engines[regional_client.region] = {}
|
||||
if engine["Engine"] not in self.db_engines[regional_client.region]:
|
||||
db_engine = DBEngine(
|
||||
region=regional_client.region,
|
||||
engine=engine["Engine"],
|
||||
engine_versions=[engine["EngineVersion"]],
|
||||
engine_description=engine["DBEngineDescription"],
|
||||
)
|
||||
self.db_engines[regional_client.region][
|
||||
engine["Engine"]
|
||||
] = db_engine
|
||||
else:
|
||||
self.db_engines[regional_client.region][
|
||||
engine["Engine"]
|
||||
].engine_versions.append(engine["EngineVersion"])
|
||||
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
|
||||
|
||||
class DBInstance(BaseModel):
|
||||
id: str
|
||||
endpoint: Optional[dict]
|
||||
engine: str
|
||||
engine_version: str
|
||||
status: str
|
||||
public: bool
|
||||
encrypted: bool
|
||||
@@ -218,6 +299,24 @@ class DBInstance(BaseModel):
|
||||
multi_az: bool
|
||||
parameter_groups: list[str] = []
|
||||
parameters: list[dict] = []
|
||||
cluster_id: Optional[str]
|
||||
region: str
|
||||
tags: Optional[list] = []
|
||||
|
||||
|
||||
class DBCluster(BaseModel):
|
||||
id: str
|
||||
endpoint: Optional[str]
|
||||
engine: str
|
||||
status: str
|
||||
public: bool
|
||||
encrypted: bool
|
||||
backup_retention_period: int = 0
|
||||
cloudwatch_logs: Optional[list]
|
||||
deletion_protection: bool
|
||||
auto_minor_version_upgrade: bool
|
||||
multi_az: bool
|
||||
parameter_group: Optional[str]
|
||||
region: str
|
||||
tags: Optional[list] = []
|
||||
|
||||
@@ -236,3 +335,10 @@ class ClusterSnapshot(BaseModel):
|
||||
public: bool = False
|
||||
region: str
|
||||
tags: Optional[list] = []
|
||||
|
||||
|
||||
class DBEngine(BaseModel):
|
||||
region: str
|
||||
engine: str
|
||||
engine_versions: list[str]
|
||||
engine_description: str
|
||||
|
||||
@@ -12,6 +12,7 @@ class resourceexplorer2_indexes_found(Check):
|
||||
report.status_extended = "No Resource Explorer Indexes found"
|
||||
report.region = resource_explorer_2_client.region
|
||||
report.resource_arn = "NoResourceExplorer"
|
||||
report.resource_id = resource_explorer_2_client.audited_account
|
||||
if resource_explorer_2_client.indexes:
|
||||
report.region = resource_explorer_2_client.indexes[0].region
|
||||
report.resource_arn = resource_explorer_2_client.indexes[0].arn
|
||||
|
||||
@@ -13,6 +13,7 @@ class ResourceExplorer2:
|
||||
self.service = "resource-explorer-2"
|
||||
self.session = audit_info.audit_session
|
||||
self.audit_resources = audit_info.audit_resources
|
||||
self.audited_account = audit_info.audited_account
|
||||
self.regional_clients = generate_regional_clients(self.service, audit_info)
|
||||
# If the region is not set in the audit profile,
|
||||
# we pick the first region from the regional clients list
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "route53_dangling_ip_subdomain_takeover",
|
||||
"CheckTitle": "Check if Route53 Records contains dangling IPs.",
|
||||
"CheckType": [],
|
||||
"ServiceName": "route53",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AWSRoute53RecordSet",
|
||||
"Description": "Check if Route53 Records contains dangling IPs.",
|
||||
"Risk": "When an ephemeral AWS resource such as an Elastic IP (EIP) is released into the Amazon's Elastic IP pool, an attacker may acquire the EIP resource and effectively control the domain/subdomain associated with that EIP in your Route 53 DNS records.",
|
||||
"RelatedUrl": "",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws route53 change-resource-record-sets --hosted-zone-id <resource_id>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Route53/dangling-dns-records.html",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Ensure that any dangling DNS records are deleted from your Amazon Route 53 public hosted zones in order to maintain the integrity and authenticity of your domains/subdomains and to protect against domain hijacking attacks.",
|
||||
"Url": "https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-deleting.html"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"forensics-ready"
|
||||
],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
from ipaddress import ip_address
|
||||
|
||||
from prowler.lib.check.models import Check, Check_Report_AWS
|
||||
from prowler.providers.aws.services.ec2.ec2_client import ec2_client
|
||||
from prowler.providers.aws.services.route53.route53_client import route53_client
|
||||
|
||||
|
||||
class route53_dangling_ip_subdomain_takeover(Check):
|
||||
def execute(self) -> Check_Report_AWS:
|
||||
findings = []
|
||||
|
||||
for record_set in route53_client.record_sets:
|
||||
# Check only A records and avoid aliases (only need to check IPs not AWS Resources)
|
||||
if record_set.type == "A" and not record_set.is_alias:
|
||||
# Gather Elastic IPs and Network Interfaces Public IPs inside the AWS Account
|
||||
public_ips = []
|
||||
public_ips.extend([eip.public_ip for eip in ec2_client.elastic_ips])
|
||||
public_ips.extend(
|
||||
[interface.public_ip for interface in ec2_client.network_interfaces]
|
||||
)
|
||||
for record in record_set.records:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.resource_id = record_set.hosted_zone_id
|
||||
report.resource_arn = route53_client.hosted_zones[
|
||||
record_set.hosted_zone_id
|
||||
].arn
|
||||
report.resource_tags = route53_client.hosted_zones[
|
||||
record_set.hosted_zone_id
|
||||
].tags
|
||||
report.region = record_set.region
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"Route53 record {record} in Hosted Zone {route53_client.hosted_zones[record_set.hosted_zone_id].name} is not a dangling IP."
|
||||
# If Public IP check if it is in the AWS Account
|
||||
if not ip_address(record).is_private and record not in public_ips:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"Route53 record {record} in Hosted Zone {route53_client.hosted_zones[record_set.hosted_zone_id].name} is a dangling IP which can lead to a subdomain takeover attack!"
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||