Compare commits
137 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 06a91aa9ff | |||
| bdda377482 | |||
| aa11ed70bd | |||
| 0580dca6cf | |||
| 678ef0ab5a | |||
| 4888c27713 | |||
| b256c10622 | |||
| 878e4e0bbc | |||
| 6c3653c483 | |||
| 71ac703e6f | |||
| a89e3598f2 | |||
| 5d043cc929 | |||
| 921f94ebbf | |||
| 48c9ed8a79 | |||
| 12987ec9f9 | |||
| 40b90ed063 | |||
| 60314e781f | |||
| bc56d48595 | |||
| 2d71cef3d5 | |||
| 41f6637497 | |||
| c2e54bbbcc | |||
| df8aacd09d | |||
| 2dd6be59b9 | |||
| 9e8e3eb0e6 | |||
| 3728430f8c | |||
| ea97de7f43 | |||
| f254a4bc0d | |||
| 66acfd8691 | |||
| 02ca82004f | |||
| 60b5a79b27 | |||
| be1e3e942b | |||
| 3658e85cfc | |||
| 15e4d1acce | |||
| 44afd9ed31 | |||
| 4f099c5663 | |||
| eaec683eb9 | |||
| 50bcd828e9 | |||
| 91545e409e | |||
| 33031d2c96 | |||
| 1b42dda817 | |||
| f726d964a8 | |||
| 36aaec8a55 | |||
| 99164ce93e | |||
| 7ebc5d3c31 | |||
| 06ff3db8af | |||
| c44ea3943e | |||
| d036e0054b | |||
| f72eb7e212 | |||
| 62dcbc2961 | |||
| dddec4c688 | |||
| 6d00554082 | |||
| 65d3fcee4c | |||
| 16cd0e4661 | |||
| 6e184dae93 | |||
| 118f3d163d | |||
| 7d84d67935 | |||
| 1c1c58c975 | |||
| 31ea672c61 | |||
| 7016779b8e | |||
| 4e958fdf39 | |||
| c6259b6c75 | |||
| 021e243ada | |||
| acdf420941 | |||
| 4e84507130 | |||
| 2a61610fec | |||
| 9b127eba93 | |||
| 1a89d65516 | |||
| 84749df708 | |||
| 6f7cd85a18 | |||
| ad39061e1a | |||
| 615bacccaf | |||
| b3a2479fab | |||
| 871c877a33 | |||
| 7fd58de3bf | |||
| 40f24b4d70 | |||
| d8f80699d4 | |||
| f24d0efc77 | |||
| a18dd76a5a | |||
| a2362b4bbc | |||
| e5f1c2b19c | |||
| 0490ab6944 | |||
| 97baa8a1e6 | |||
| 637ebdc3db | |||
| 451b36093f | |||
| beb0457aff | |||
| 0335ea4e0b | |||
| 355abca5a3 | |||
| 7d69cc4cd9 | |||
| cdc4b362a4 | |||
| 6417e6bbba | |||
| b810d45d34 | |||
| f5a2695c3b | |||
| 977c788fff | |||
| 21f8b5dbad | |||
| 1c874d1283 | |||
| 8f9bdae2b7 | |||
| 600813fb99 | |||
| 5a9ccd60a0 | |||
| beb7a53efe | |||
| 8431ce42a1 | |||
| c5a9b63970 | |||
| a765c1543e | |||
| 484a773f5b | |||
| 9ecf570790 | |||
| f8c840f283 | |||
| deec9efa97 | |||
| 2ee62cca8e | |||
| 413b948ca0 | |||
| d548e869fa | |||
| 5c8919372c | |||
| 9baac9fd89 | |||
| 252b664e49 | |||
| 496e0f1e0a | |||
| 80342d612f | |||
| 02d7eaf268 | |||
| 1a8df3bf18 | |||
| 16f2209d3f | |||
| 70e22af550 | |||
| 44f26bc0d5 | |||
| a19f5d9a9a | |||
| b78f53a722 | |||
| c20f07ced4 | |||
| 7c3a53908b | |||
| ea3c71e22c | |||
| 40eaa79777 | |||
| aa8119970e | |||
| 55fc8cb55b | |||
| abf51eceee | |||
| 458c51dda3 | |||
| c8d2a44ab0 | |||
| 0a71628298 | |||
| 60e0040577 | |||
| 5c375d63c5 | |||
| 4d84529ba2 | |||
| 0737d9e8bb | |||
| 50c5294bc0 | |||
| f63e9e5e77 |
@@ -24,6 +24,10 @@ POSTGRES_USER=prowler
|
||||
POSTGRES_PASSWORD=postgres
|
||||
POSTGRES_DB=prowler_db
|
||||
|
||||
# Celery-Prowler task settings
|
||||
TASK_RETRY_DELAY_SECONDS=0.1
|
||||
TASK_RETRY_ATTEMPTS=5
|
||||
|
||||
# Valkey settings
|
||||
# If running Valkey and celery on host, use localhost, else use 'valkey'
|
||||
VALKEY_HOST=valkey
|
||||
|
||||
@@ -0,0 +1,67 @@
|
||||
name: Create Backport Label
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
create_label:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
steps:
|
||||
- name: Create backport label
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
OWNER_REPO: ${{ github.repository }}
|
||||
run: |
|
||||
VERSION_ONLY=${RELEASE_TAG#v} # Remove 'v' prefix if present (e.g., v3.2.0 -> 3.2.0)
|
||||
|
||||
# Check if it's a minor version (X.Y.0)
|
||||
if [[ "$VERSION_ONLY" =~ ^[0-9]+\.[0-9]+\.0$ ]]; then
|
||||
echo "Release ${RELEASE_TAG} (version ${VERSION_ONLY}) is a minor version. Proceeding to create backport label."
|
||||
|
||||
TWO_DIGIT_VERSION=${VERSION_ONLY%.0} # Extract X.Y from X.Y.0 (e.g., 5.6 from 5.6.0)
|
||||
|
||||
FINAL_LABEL_NAME="backport-to-v${TWO_DIGIT_VERSION}"
|
||||
FINAL_DESCRIPTION="Backport PR to the v${TWO_DIGIT_VERSION} branch"
|
||||
|
||||
echo "Effective label name will be: ${FINAL_LABEL_NAME}"
|
||||
echo "Effective description will be: ${FINAL_DESCRIPTION}"
|
||||
|
||||
# Check if the label already exists
|
||||
STATUS_CODE=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token ${GITHUB_TOKEN}" "https://api.github.com/repos/${OWNER_REPO}/labels/${FINAL_LABEL_NAME}")
|
||||
|
||||
if [ "${STATUS_CODE}" -eq 200 ]; then
|
||||
echo "Label '${FINAL_LABEL_NAME}' already exists."
|
||||
elif [ "${STATUS_CODE}" -eq 404 ]; then
|
||||
echo "Label '${FINAL_LABEL_NAME}' does not exist. Creating it..."
|
||||
# Prepare JSON data payload
|
||||
JSON_DATA=$(printf '{"name":"%s","description":"%s","color":"B60205"}' "${FINAL_LABEL_NAME}" "${FINAL_DESCRIPTION}")
|
||||
|
||||
CREATE_STATUS_CODE=$(curl -s -o /tmp/curl_create_response.json -w "%{http_code}" -X POST \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
-H "Authorization: token ${GITHUB_TOKEN}" \
|
||||
--data "${JSON_DATA}" \
|
||||
"https://api.github.com/repos/${OWNER_REPO}/labels")
|
||||
|
||||
CREATE_RESPONSE_BODY=$(cat /tmp/curl_create_response.json)
|
||||
rm -f /tmp/curl_create_response.json
|
||||
|
||||
if [ "$CREATE_STATUS_CODE" -eq 201 ]; then
|
||||
echo "Label '${FINAL_LABEL_NAME}' created successfully."
|
||||
else
|
||||
echo "Error creating label '${FINAL_LABEL_NAME}'. Status: $CREATE_STATUS_CODE"
|
||||
echo "Response: $CREATE_RESPONSE_BODY"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Error checking for label '${FINAL_LABEL_NAME}'. HTTP Status: ${STATUS_CODE}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Release ${RELEASE_TAG} (version ${VERSION_ONLY}) is not a minor version. Skipping backport label creation."
|
||||
exit 0
|
||||
fi
|
||||
@@ -12,11 +12,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.merge_commit_sha }}
|
||||
|
||||
- name: Set short git commit SHA
|
||||
id: vars
|
||||
run: |
|
||||
shortSha=$(git rev-parse --short ${{ github.sha }})
|
||||
shortSha=$(git rev-parse --short ${{ github.event.pull_request.merge_commit_sha }})
|
||||
echo "SHORT_SHA=${shortSha}" >> $GITHUB_ENV
|
||||
|
||||
- name: Trigger pull request
|
||||
@@ -26,9 +28,10 @@ jobs:
|
||||
repository: ${{ secrets.CLOUD_DISPATCH }}
|
||||
event-type: prowler-pull-request-merged
|
||||
client-payload: '{
|
||||
"PROWLER_COMMIT_SHA": "${{ github.sha }}",
|
||||
"PROWLER_COMMIT_SHA": "${{ github.event.pull_request.merge_commit_sha }}",
|
||||
"PROWLER_COMMIT_SHORT_SHA": "${{ env.SHORT_SHA }}",
|
||||
"PROWLER_PR_TITLE": "${{ github.event.pull_request.title }}",
|
||||
"PROWLER_PR_LABELS": ${{ toJson(github.event.pull_request.labels.*.name) }},
|
||||
"PROWLER_PR_BODY": ${{ toJson(github.event.pull_request.body) }}
|
||||
"PROWLER_PR_BODY": ${{ toJson(github.event.pull_request.body) }},
|
||||
"PROWLER_PR_URL":${{ toJson(github.event.pull_request.html_url) }}
|
||||
}'
|
||||
|
||||
@@ -25,17 +25,31 @@ jobs:
|
||||
MINOR_VERSION=${BASH_REMATCH[2]}
|
||||
FIX_VERSION=${BASH_REMATCH[3]}
|
||||
|
||||
# Export version components to GitHub environment
|
||||
echo "MAJOR_VERSION=${MAJOR_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "MINOR_VERSION=${MINOR_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "FIX_VERSION=${FIX_VERSION}" >> "${GITHUB_ENV}"
|
||||
|
||||
if (( MAJOR_VERSION == 5 )); then
|
||||
if (( FIX_VERSION == 0 )); then
|
||||
echo "Minor Release: $PROWLER_VERSION"
|
||||
|
||||
# Set up next minor version for master
|
||||
BUMP_VERSION_TO=${MAJOR_VERSION}.$((MINOR_VERSION + 1)).${FIX_VERSION}
|
||||
echo "BUMP_VERSION_TO=${BUMP_VERSION_TO}" >> "${GITHUB_ENV}"
|
||||
|
||||
TARGET_BRANCH=${BASE_BRANCH}
|
||||
echo "TARGET_BRANCH=${TARGET_BRANCH}" >> "${GITHUB_ENV}"
|
||||
|
||||
# Set up patch version for version branch
|
||||
PATCH_VERSION_TO=${MAJOR_VERSION}.${MINOR_VERSION}.1
|
||||
echo "PATCH_VERSION_TO=${PATCH_VERSION_TO}" >> "${GITHUB_ENV}"
|
||||
|
||||
VERSION_BRANCH=v${MAJOR_VERSION}.${MINOR_VERSION}
|
||||
echo "VERSION_BRANCH=${VERSION_BRANCH}" >> "${GITHUB_ENV}"
|
||||
|
||||
echo "Bumping to next minor version: ${BUMP_VERSION_TO} in branch ${TARGET_BRANCH}"
|
||||
echo "Bumping to next patch version: ${PATCH_VERSION_TO} in branch ${VERSION_BRANCH}"
|
||||
else
|
||||
echo "Patch Release: $PROWLER_VERSION"
|
||||
|
||||
@@ -74,7 +88,6 @@ jobs:
|
||||
|
||||
git --no-pager diff
|
||||
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
@@ -92,3 +105,41 @@ jobs:
|
||||
### License
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
- name: Handle patch version for minor release
|
||||
if: env.FIX_VERSION == '0'
|
||||
run: |
|
||||
echo "Using PROWLER_VERSION=$PROWLER_VERSION"
|
||||
echo "Using PATCH_VERSION_TO=$PATCH_VERSION_TO"
|
||||
|
||||
set -e
|
||||
|
||||
echo "Bumping version in pyproject.toml ..."
|
||||
sed -i "s|version = \"${PROWLER_VERSION}\"|version = \"${PATCH_VERSION_TO}\"|" pyproject.toml
|
||||
|
||||
echo "Bumping version in prowler/config/config.py ..."
|
||||
sed -i "s|prowler_version = \"${PROWLER_VERSION}\"|prowler_version = \"${PATCH_VERSION_TO}\"|" prowler/config/config.py
|
||||
|
||||
echo "Bumping version in .env ..."
|
||||
sed -i "s|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PROWLER_VERSION}|NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${PATCH_VERSION_TO}|" .env
|
||||
|
||||
git --no-pager diff
|
||||
|
||||
- name: Create Pull Request for patch version
|
||||
if: env.FIX_VERSION == '0'
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
author: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
base: ${{ env.VERSION_BRANCH }}
|
||||
commit-message: "chore(release): Bump version to v${{ env.PATCH_VERSION_TO }}"
|
||||
branch: "version-bump-to-v${{ env.PATCH_VERSION_TO }}"
|
||||
title: "chore(release): Bump version to v${{ env.PATCH_VERSION_TO }}"
|
||||
body: |
|
||||
### Description
|
||||
|
||||
Bump Prowler version to v${{ env.PATCH_VERSION_TO }}
|
||||
|
||||
### License
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
@@ -167,6 +167,21 @@ jobs:
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/kubernetes --cov-report=xml:kubernetes_coverage.xml tests/providers/kubernetes
|
||||
|
||||
# Test GitHub
|
||||
- name: GitHub - Check if any file has changed
|
||||
id: github-changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: |
|
||||
./prowler/providers/github/**
|
||||
./tests/providers/github/**
|
||||
.poetry.lock
|
||||
|
||||
- name: GitHub - Test
|
||||
if: steps.github-changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry run pytest -n auto --cov=./prowler/providers/github --cov-report=xml:github_coverage.xml tests/providers/github
|
||||
|
||||
# Test NHN
|
||||
- name: NHN - Check if any file has changed
|
||||
id: nhn-changed-files
|
||||
@@ -216,4 +231,4 @@ jobs:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prowler
|
||||
files: ./aws_coverage.xml,./azure_coverage.xml,./gcp_coverage.xml,./kubernetes_coverage.xml,./nhn_coverage.xml,./m365_coverage.xml,./lib_coverage.xml,./config_coverage.xml
|
||||
files: ./aws_coverage.xml,./azure_coverage.xml,./gcp_coverage.xml,./kubernetes_coverage.xml,./github_coverage.xml,./nhn_coverage.xml,./m365_coverage.xml,./lib_coverage.xml,./config_coverage.xml
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<img align="center" src="https://github.com/prowler-cloud/prowler/blob/master/docs/img/prowler-logo-white.png#gh-dark-mode-only" width="50%" height="50%">
|
||||
</p>
|
||||
<p align="center">
|
||||
<b><i>Prowler Open Source</b> is as dynamic and adaptable as the environment they’re meant to protect. Trusted by the leaders in security.
|
||||
<b><i>Prowler</b> is the Open Cloud Security platform trusted by thousands to automate security and compliance in any cloud environment. With hundreds of ready-to-use checks and compliance frameworks, Prowler delivers real-time, customizable monitoring and seamless integrations, making cloud security simple, scalable, and cost-effective for organizations of any size.
|
||||
</p>
|
||||
<p align="center">
|
||||
<b>Learn more at <a href="https://prowler.com">prowler.com</i></b>
|
||||
@@ -43,15 +43,29 @@
|
||||
|
||||
# Description
|
||||
|
||||
**Prowler** is an Open Source security tool to perform AWS, Azure, Google Cloud and Kubernetes security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness, and also remediations! We have Prowler CLI (Command Line Interface) that we call Prowler Open Source and a service on top of it that we call <a href="https://prowler.com">Prowler Cloud</a>.
|
||||
**Prowler** is an open-source security tool designed to assess and enforce security best practices across AWS, Azure, Google Cloud, and Kubernetes. It supports tasks such as security audits, incident response, continuous monitoring, system hardening, forensic readiness, and remediation processes.
|
||||
|
||||
Prowler includes hundreds of built-in controls to ensure compliance with standards and frameworks, including:
|
||||
|
||||
- **Industry Standards:** CIS, NIST 800, NIST CSF, and CISA
|
||||
- **Regulatory Compliance and Governance:** RBI, FedRAMP, and PCI-DSS
|
||||
- **Frameworks for Sensitive Data and Privacy:** GDPR, HIPAA, and FFIEC
|
||||
- **Frameworks for Organizational Governance and Quality Control:** SOC2 and GXP
|
||||
- **AWS-Specific Frameworks:** AWS Foundational Technical Review (FTR) and AWS Well-Architected Framework (Security Pillar)
|
||||
- **National Security Standards:** ENS (Spanish National Security Scheme)
|
||||
- **Custom Security Frameworks:** Tailored to your needs
|
||||
|
||||
## Prowler CLI and Prowler Cloud
|
||||
|
||||
Prowler offers a Command Line Interface (CLI), known as Prowler Open Source, and an additional service built on top of it, called <a href="https://prowler.com">Prowler Cloud</a>.
|
||||
|
||||
## Prowler App
|
||||
|
||||
Prowler App is a web application that allows you to run Prowler in your cloud provider accounts and visualize the results in a user-friendly interface.
|
||||
Prowler App is a web-based application that simplifies running Prowler across your cloud provider accounts. It provides a user-friendly interface to visualize the results and streamline your security assessments.
|
||||
|
||||

|
||||
|
||||
>More details at [Prowler App Documentation](https://docs.prowler.com/projects/prowler-open-source/en/latest/#prowler-app-installation)
|
||||
>For more details, refer to the [Prowler App Documentation](https://docs.prowler.com/projects/prowler-open-source/en/latest/#prowler-app-installation)
|
||||
|
||||
## Prowler CLI
|
||||
|
||||
@@ -60,6 +74,7 @@ prowler <provider>
|
||||
```
|
||||

|
||||
|
||||
|
||||
## Prowler Dashboard
|
||||
|
||||
```console
|
||||
@@ -67,26 +82,34 @@ prowler dashboard
|
||||
```
|
||||

|
||||
|
||||
It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks.
|
||||
# Prowler at a Glance
|
||||
|
||||
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) |
|
||||
|---|---|---|---|---|
|
||||
| AWS | 564 | 82 | 33 | 10 |
|
||||
| GCP | 79 | 13 | 7 | 3 |
|
||||
| Azure | 140 | 18 | 8 | 3 |
|
||||
| Kubernetes | 83 | 7 | 4 | 7 |
|
||||
| M365 | 44 | 2 | 1 | 0 |
|
||||
| AWS | 567 | 82 | 36 | 10 |
|
||||
| GCP | 79 | 13 | 9 | 3 |
|
||||
| Azure | 142 | 18 | 10 | 3 |
|
||||
| Kubernetes | 83 | 7 | 5 | 7 |
|
||||
| GitHub | 16 | 2 | 1 | 0 |
|
||||
| M365 | 69 | 7 | 2 | 2 |
|
||||
| NHN (Unofficial) | 6 | 2 | 1 | 0 |
|
||||
|
||||
> You can list the checks, services, compliance frameworks and categories with `prowler <provider> --list-checks`, `prowler <provider> --list-services`, `prowler <provider> --list-compliance` and `prowler <provider> --list-categories`.
|
||||
> [!Note]
|
||||
> The numbers in the table are updated periodically.
|
||||
|
||||
> [!Tip]
|
||||
> For the most accurate and up-to-date information about checks, services, frameworks, and categories, visit [**Prowler Hub**](https://hub.prowler.com).
|
||||
|
||||
> [!Note]
|
||||
> Use the following commands to list Prowler's available checks, services, compliance frameworks, and categories: `prowler <provider> --list-checks`, `prowler <provider> --list-services`, `prowler <provider> --list-compliance` and `prowler <provider> --list-categories`.
|
||||
|
||||
# 💻 Installation
|
||||
|
||||
## Prowler App
|
||||
|
||||
Prowler App can be installed in different ways, depending on your environment:
|
||||
Prowler App offers flexible installation methods tailored to various environments:
|
||||
|
||||
> See how to use Prowler App in the [Prowler App Usage Guide](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/prowler-app/).
|
||||
> For detailed instructions on using Prowler App, refer to the [Prowler App Usage Guide](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/prowler-app/).
|
||||
|
||||
### Docker Compose
|
||||
|
||||
@@ -102,8 +125,16 @@ curl -LO https://raw.githubusercontent.com/prowler-cloud/prowler/refs/heads/mast
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
> Containers are built for `linux/amd64`. If your workstation's architecture is different, please set `DOCKER_DEFAULT_PLATFORM=linux/amd64` in your environment or use the `--platform linux/amd64` flag in the docker command.
|
||||
> Enjoy Prowler App at http://localhost:3000 by signing up with your email and password.
|
||||
> Containers are built for `linux/amd64`.
|
||||
|
||||
### Configuring Your Workstation for Prowler App
|
||||
|
||||
If your workstation's architecture is incompatible, you can resolve this by:
|
||||
|
||||
- **Setting the environment variable**: `DOCKER_DEFAULT_PLATFORM=linux/amd64`
|
||||
- **Using the following flag in your Docker command**: `--platform linux/amd64`
|
||||
|
||||
> Once configured, access the Prowler App at http://localhost:3000. Sign up using your email and password to get started.
|
||||
|
||||
### From GitHub
|
||||
|
||||
@@ -129,12 +160,12 @@ python manage.py migrate --database admin
|
||||
gunicorn -c config/guniconf.py config.wsgi:application
|
||||
```
|
||||
> [!IMPORTANT]
|
||||
> Starting from Poetry v2.0.0, `poetry shell` has been deprecated in favor of `poetry env activate`.
|
||||
> As of Poetry v2.0.0, the `poetry shell` command has been deprecated. Use `poetry env activate` instead for environment activation.
|
||||
>
|
||||
> If your poetry version is below 2.0.0 you must keep using `poetry shell` to activate your environment.
|
||||
> In case you have any doubts, consult the Poetry environment activation guide: https://python-poetry.org/docs/managing-environments/#activating-the-environment
|
||||
> If your Poetry version is below v2.0.0, continue using `poetry shell` to activate your environment.
|
||||
> For further guidance, refer to the Poetry Environment Activation Guide https://python-poetry.org/docs/managing-environments/#activating-the-environment.
|
||||
|
||||
> Now, you can access the API documentation at http://localhost:8080/api/v1/docs.
|
||||
> After completing the setup, access the API documentation at http://localhost:8080/api/v1/docs.
|
||||
|
||||
**Commands to run the API Worker**
|
||||
|
||||
@@ -172,29 +203,31 @@ npm run build
|
||||
npm start
|
||||
```
|
||||
|
||||
> Enjoy Prowler App at http://localhost:3000 by signing up with your email and password.
|
||||
> Once configured, access the Prowler App at http://localhost:3000. Sign up using your email and password to get started.
|
||||
|
||||
## Prowler CLI
|
||||
### Pip package
|
||||
Prowler CLI is available as a project in [PyPI](https://pypi.org/project/prowler-cloud/), thus can be installed using pip with Python > 3.9.1, < 3.13:
|
||||
Prowler CLI is available as a project in [PyPI](https://pypi.org/project/prowler-cloud/). Consequently, it can be installed using pip with Python >3.9.1, <3.13:
|
||||
|
||||
```console
|
||||
pip install prowler
|
||||
prowler -v
|
||||
```
|
||||
>More details at [https://docs.prowler.com](https://docs.prowler.com/projects/prowler-open-source/en/latest/#prowler-cli-installation)
|
||||
>For further guidance, refer to [https://docs.prowler.com](https://docs.prowler.com/projects/prowler-open-source/en/latest/#prowler-cli-installation)
|
||||
|
||||
### Containers
|
||||
|
||||
The available versions of Prowler CLI are the following:
|
||||
**Available Versions of Prowler CLI**
|
||||
|
||||
- `latest`: in sync with `master` branch (bear in mind that it is not a stable version)
|
||||
- `v4-latest`: in sync with `v4` branch (bear in mind that it is not a stable version)
|
||||
- `v3-latest`: in sync with `v3` branch (bear in mind that it is not a stable version)
|
||||
- `<x.y.z>` (release): you can find the releases [here](https://github.com/prowler-cloud/prowler/releases), those are stable releases.
|
||||
- `stable`: this tag always point to the latest release.
|
||||
- `v4-stable`: this tag always point to the latest release for v4.
|
||||
- `v3-stable`: this tag always point to the latest release for v3.
|
||||
The following versions of Prowler CLI are available, depending on your requirements:
|
||||
|
||||
- `latest`: Synchronizes with the `master` branch. Note that this version is not stable.
|
||||
- `v4-latest`: Synchronizes with the `v4` branch. Note that this version is not stable.
|
||||
- `v3-latest`: Synchronizes with the `v3` branch. Note that this version is not stable.
|
||||
- `<x.y.z>` (release): Stable releases corresponding to specific versions. You can find the complete list of releases [here](https://github.com/prowler-cloud/prowler/releases).
|
||||
- `stable`: Always points to the latest release.
|
||||
- `v4-stable`: Always points to the latest release for v4.
|
||||
- `v3-stable`: Always points to the latest release for v3.
|
||||
|
||||
The container images are available here:
|
||||
- Prowler CLI:
|
||||
@@ -206,7 +239,7 @@ The container images are available here:
|
||||
|
||||
### From GitHub
|
||||
|
||||
Python > 3.9.1, < 3.13 is required with pip and poetry:
|
||||
Python >3.9.1, <3.13 is required with pip and Poetry:
|
||||
|
||||
``` console
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
@@ -216,25 +249,46 @@ poetry install
|
||||
python prowler-cli.py -v
|
||||
```
|
||||
> [!IMPORTANT]
|
||||
> Starting from Poetry v2.0.0, `poetry shell` has been deprecated in favor of `poetry env activate`.
|
||||
>
|
||||
> If your poetry version is below 2.0.0 you must keep using `poetry shell` to activate your environment.
|
||||
> In case you have any doubts, consult the Poetry environment activation guide: https://python-poetry.org/docs/managing-environments/#activating-the-environment
|
||||
> To clone Prowler on Windows, configure Git to support long file paths by running the following command: `git config core.longpaths true`.
|
||||
|
||||
> If you want to clone Prowler from Windows, use `git config core.longpaths true` to allow long file paths.
|
||||
# 📐✏️ High level architecture
|
||||
> [!IMPORTANT]
|
||||
> As of Poetry v2.0.0, the `poetry shell` command has been deprecated. Use `poetry env activate` instead for environment activation.
|
||||
>
|
||||
> If your Poetry version is below v2.0.0, continue using `poetry shell` to activate your environment.
|
||||
> For further guidance, refer to the Poetry Environment Activation Guide https://python-poetry.org/docs/managing-environments/#activating-the-environment.
|
||||
|
||||
# ✏️ High level architecture
|
||||
|
||||
## Prowler App
|
||||
The **Prowler App** consists of three main components:
|
||||
**Prowler App** is composed of three key components:
|
||||
|
||||
- **Prowler UI**: A user-friendly web interface for running Prowler and viewing results, powered by Next.js.
|
||||
- **Prowler API**: The backend API that executes Prowler scans and stores the results, built with Django REST Framework.
|
||||
- **Prowler SDK**: A Python SDK that integrates with the Prowler CLI for advanced functionality.
|
||||
- **Prowler UI**: A web-based interface, built with Next.js, providing a user-friendly experience for executing Prowler scans and visualizing results.
|
||||
- **Prowler API**: A backend service, developed with Django REST Framework, responsible for running Prowler scans and storing the generated results.
|
||||
- **Prowler SDK**: A Python SDK designed to extend the functionality of the Prowler CLI for advanced capabilities.
|
||||
|
||||

|
||||
|
||||
## Prowler CLI
|
||||
You can run Prowler from your workstation, a Kubernetes Job, a Google Compute Engine, an Azure VM, an EC2 instance, Fargate or any other container, CloudShell and many more.
|
||||
|
||||
**Running Prowler**
|
||||
|
||||
Prowler can be executed across various environments, offering flexibility to meet your needs. It can be run from:
|
||||
|
||||
- Your own workstation
|
||||
|
||||
- A Kubernetes Job
|
||||
|
||||
- Google Compute Engine
|
||||
|
||||
- Azure Virtual Machines (VMs)
|
||||
|
||||
- Amazon EC2 instances
|
||||
|
||||
- AWS Fargate or other container platforms
|
||||
|
||||
- CloudShell
|
||||
|
||||
And many more environments.
|
||||
|
||||

|
||||
|
||||
@@ -242,23 +296,36 @@ You can run Prowler from your workstation, a Kubernetes Job, a Google Compute En
|
||||
|
||||
## General
|
||||
- `Allowlist` now is called `Mutelist`.
|
||||
- The `--quiet` option has been deprecated, now use the `--status` flag to select the finding's status you want to get from PASS, FAIL or MANUAL.
|
||||
- All `INFO` finding's status has changed to `MANUAL`.
|
||||
- The CSV output format is common for all the providers.
|
||||
- The `--quiet` option has been deprecated. Use the `--status` flag to filter findings based on their status: PASS, FAIL, or MANUAL.
|
||||
- All findings with an `INFO` status have been reclassified as `MANUAL`.
|
||||
- The CSV output format is standardized across all providers.
|
||||
|
||||
We have deprecated some of our outputs formats:
|
||||
- The native JSON is replaced for the JSON [OCSF](https://schema.ocsf.io/) v1.1.0, common for all the providers.
|
||||
**Deprecated Output Formats**
|
||||
|
||||
The following formats are now deprecated:
|
||||
- Native JSON has been replaced with JSON in [OCSF] v1.1.0 format, which is standardized across all providers (https://schema.ocsf.io/).
|
||||
|
||||
## AWS
|
||||
- Deprecate the AWS flag --sts-endpoint-region since we use AWS STS regional tokens.
|
||||
- To send only FAILS to AWS Security Hub, now use either `--send-sh-only-fails` or `--security-hub --status FAIL`.
|
||||
|
||||
**AWS Flag Deprecation**
|
||||
|
||||
The flag --sts-endpoint-region has been deprecated due to the adoption of AWS STS regional tokens.
|
||||
|
||||
**Sending FAIL Results to AWS Security Hub**
|
||||
|
||||
- To send only FAILS to AWS Security Hub, use one of the following options: `--send-sh-only-fails` or `--security-hub --status FAIL`.
|
||||
|
||||
|
||||
# 📖 Documentation
|
||||
|
||||
Install, Usage, Tutorials and Developer Guide is at https://docs.prowler.com/
|
||||
**Documentation Resources**
|
||||
|
||||
For installation instructions, usage details, tutorials, and the Developer Guide, visit https://docs.prowler.com/
|
||||
|
||||
# 📃 License
|
||||
|
||||
Prowler is licensed as Apache License 2.0 as specified in each file. You may obtain a copy of the License at
|
||||
<http://www.apache.org/licenses/LICENSE-2.0>
|
||||
**Prowler License Information**
|
||||
|
||||
Prowler is licensed under the Apache License 2.0, as indicated in each file within the repository. Obtaining a Copy of the License
|
||||
|
||||
A copy of the License is available at <http://www.apache.org/licenses/LICENSE-2.0>
|
||||
|
||||
@@ -2,6 +2,46 @@
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
## [v1.9.0] (Prowler UNRELEASED)
|
||||
|
||||
### Added
|
||||
- Support GCP Service Account key. [(#7824)](https://github.com/prowler-cloud/prowler/pull/7824)
|
||||
|
||||
### Changed
|
||||
- Renamed field encrypted_password to password for M365 provider [(#7784)](https://github.com/prowler-cloud/prowler/pull/7784)
|
||||
|
||||
### Fixed
|
||||
- Fixed the connection status verification before launching a scan [(#7831)](https://github.com/prowler-cloud/prowler/pull/7831)
|
||||
|
||||
---
|
||||
|
||||
## [v1.8.2] (Prowler v5.7.2)
|
||||
|
||||
### Fixed
|
||||
- Fixed task lookup to use task_kwargs instead of task_args for scan report resolution. [(#7830)](https://github.com/prowler-cloud/prowler/pull/7830)
|
||||
- Fixed Kubernetes UID validation to allow valid context names [(#7871)](https://github.com/prowler-cloud/prowler/pull/7871)
|
||||
- Fixed a race condition when creating background tasks [(#7876)](https://github.com/prowler-cloud/prowler/pull/7876).
|
||||
- Fixed an error when modifying or retrieving tenants due to missing user UUID in transaction context [(#7890)](https://github.com/prowler-cloud/prowler/pull/7890).
|
||||
|
||||
---
|
||||
|
||||
## [v1.8.1] (Prowler v5.7.1)
|
||||
|
||||
### Fixed
|
||||
- Added database index to improve performance on finding lookup. [(#7800)](https://github.com/prowler-cloud/prowler/pull/7800)
|
||||
|
||||
---
|
||||
|
||||
## [v1.8.0] (Prowler v5.7.0)
|
||||
|
||||
### Added
|
||||
- Added huge improvements to `/findings/metadata` and resource related filters for findings [(#7690)](https://github.com/prowler-cloud/prowler/pull/7690).
|
||||
- Added improvements to `/overviews` endpoints [(#7690)](https://github.com/prowler-cloud/prowler/pull/7690).
|
||||
- Added new queue to perform backfill background tasks [(#7690)](https://github.com/prowler-cloud/prowler/pull/7690).
|
||||
- Added new endpoints to retrieve latest findings and metadata [(#7743)](https://github.com/prowler-cloud/prowler/pull/7743).
|
||||
- Added export support for Prowler ThreatScore in M365 [(7783)](https://github.com/prowler-cloud/prowler/pull/7783)
|
||||
|
||||
---
|
||||
|
||||
## [v1.7.0] (Prowler v5.6.0)
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ start_prod_server() {
|
||||
|
||||
start_worker() {
|
||||
echo "Starting the worker..."
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans,scan-reports,deletion -E --max-tasks-per-child 1
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans,scan-reports,deletion,backfill -E --max-tasks-per-child 1
|
||||
}
|
||||
|
||||
start_worker_beat() {
|
||||
|
||||
@@ -2237,14 +2237,14 @@ tornado = ["tornado (>=0.2)"]
|
||||
|
||||
[[package]]
|
||||
name = "h11"
|
||||
version = "0.14.0"
|
||||
version = "0.16.0"
|
||||
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
|
||||
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
|
||||
{file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
|
||||
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2277,19 +2277,19 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "httpcore"
|
||||
version = "1.0.8"
|
||||
version = "1.0.9"
|
||||
description = "A minimal low-level HTTP client."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be"},
|
||||
{file = "httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad"},
|
||||
{file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
|
||||
{file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
certifi = "*"
|
||||
h11 = ">=0.13,<0.15"
|
||||
h11 = ">=0.16"
|
||||
|
||||
[package.extras]
|
||||
asyncio = ["anyio (>=4.0,<5.0)"]
|
||||
|
||||
@@ -35,7 +35,7 @@ name = "prowler-api"
|
||||
package-mode = false
|
||||
# Needed for the SDK compatibility
|
||||
requires-python = ">=3.11,<3.13"
|
||||
version = "1.7.0"
|
||||
version = "1.9.0"
|
||||
|
||||
[project.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
|
||||
@@ -109,16 +109,6 @@ class BaseTenantViewset(BaseViewSet):
|
||||
pass # Tenant might not exist, handle gracefully
|
||||
|
||||
def initial(self, request, *args, **kwargs):
|
||||
if (
|
||||
request.resolver_match.url_name != "tenant-detail"
|
||||
and request.method != "DELETE"
|
||||
):
|
||||
user_id = str(request.user.id)
|
||||
|
||||
with rls_transaction(value=user_id, parameter=POSTGRES_USER_VAR):
|
||||
return super().initial(request, *args, **kwargs)
|
||||
|
||||
# TODO: DRY this when we have time
|
||||
if request.auth is None:
|
||||
raise NotAuthenticated
|
||||
|
||||
@@ -126,8 +116,8 @@ class BaseTenantViewset(BaseViewSet):
|
||||
if tenant_id is None:
|
||||
raise NotAuthenticated("Tenant ID is not present in token")
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
self.request.tenant_id = tenant_id
|
||||
user_id = str(request.user.id)
|
||||
with rls_transaction(value=user_id, parameter=POSTGRES_USER_VAR):
|
||||
return super().initial(request, *args, **kwargs)
|
||||
|
||||
|
||||
|
||||
@@ -227,6 +227,77 @@ def register_enum(apps, schema_editor, enum_class): # noqa: F841
|
||||
register_adapter(enum_class, enum_adapter)
|
||||
|
||||
|
||||
def create_index_on_partitions(
|
||||
apps, # noqa: F841
|
||||
schema_editor,
|
||||
parent_table: str,
|
||||
index_name: str,
|
||||
columns: str,
|
||||
method: str = "BTREE",
|
||||
where: str = "",
|
||||
):
|
||||
"""
|
||||
Create an index on every existing partition of `parent_table`.
|
||||
|
||||
Args:
|
||||
parent_table: The name of the root table (e.g. "findings").
|
||||
index_name: A short name for the index (will be prefixed per-partition).
|
||||
columns: The parenthesized column list, e.g. "tenant_id, scan_id, status".
|
||||
method: The index method—BTREE, GIN, etc. Defaults to BTREE.
|
||||
where: Optional WHERE clause (without the leading "WHERE"), e.g. "status = 'FAIL'".
|
||||
"""
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT inhrelid::regclass::text
|
||||
FROM pg_inherits
|
||||
WHERE inhparent = %s::regclass
|
||||
""",
|
||||
[parent_table],
|
||||
)
|
||||
partitions = [row[0] for row in cursor.fetchall()]
|
||||
|
||||
where_sql = f" WHERE {where}" if where else ""
|
||||
for partition in partitions:
|
||||
idx_name = f"{partition.replace('.', '_')}_{index_name}"
|
||||
sql = (
|
||||
f"CREATE INDEX CONCURRENTLY IF NOT EXISTS {idx_name} "
|
||||
f"ON {partition} USING {method} ({columns})"
|
||||
f"{where_sql};"
|
||||
)
|
||||
schema_editor.execute(sql)
|
||||
|
||||
|
||||
def drop_index_on_partitions(
|
||||
apps, # noqa: F841
|
||||
schema_editor,
|
||||
parent_table: str,
|
||||
index_name: str,
|
||||
):
|
||||
"""
|
||||
Drop the per-partition indexes that were created by create_index_on_partitions.
|
||||
|
||||
Args:
|
||||
parent_table: The name of the root table (e.g. "findings").
|
||||
index_name: The same short name used when creating them.
|
||||
"""
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT inhrelid::regclass::text
|
||||
FROM pg_inherits
|
||||
WHERE inhparent = %s::regclass
|
||||
""",
|
||||
[parent_table],
|
||||
)
|
||||
partitions = [row[0] for row in cursor.fetchall()]
|
||||
|
||||
for partition in partitions:
|
||||
idx_name = f"{partition.replace('.', '_')}_{index_name}"
|
||||
sql = f"DROP INDEX CONCURRENTLY IF EXISTS {idx_name};"
|
||||
schema_editor.execute(sql)
|
||||
|
||||
|
||||
# Postgres enum definition for member role
|
||||
|
||||
|
||||
|
||||
@@ -81,6 +81,114 @@ class ChoiceInFilter(BaseInFilter, ChoiceFilter):
|
||||
pass
|
||||
|
||||
|
||||
class CommonFindingFilters(FilterSet):
|
||||
# We filter providers from the scan in findings
|
||||
provider = UUIDFilter(field_name="scan__provider__id", lookup_expr="exact")
|
||||
provider__in = UUIDInFilter(field_name="scan__provider__id", lookup_expr="in")
|
||||
provider_type = ChoiceFilter(
|
||||
choices=Provider.ProviderChoices.choices, field_name="scan__provider__provider"
|
||||
)
|
||||
provider_type__in = ChoiceInFilter(
|
||||
choices=Provider.ProviderChoices.choices, field_name="scan__provider__provider"
|
||||
)
|
||||
provider_uid = CharFilter(field_name="scan__provider__uid", lookup_expr="exact")
|
||||
provider_uid__in = CharInFilter(field_name="scan__provider__uid", lookup_expr="in")
|
||||
provider_uid__icontains = CharFilter(
|
||||
field_name="scan__provider__uid", lookup_expr="icontains"
|
||||
)
|
||||
provider_alias = CharFilter(field_name="scan__provider__alias", lookup_expr="exact")
|
||||
provider_alias__in = CharInFilter(
|
||||
field_name="scan__provider__alias", lookup_expr="in"
|
||||
)
|
||||
provider_alias__icontains = CharFilter(
|
||||
field_name="scan__provider__alias", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
updated_at = DateFilter(field_name="updated_at", lookup_expr="date")
|
||||
|
||||
uid = CharFilter(field_name="uid")
|
||||
delta = ChoiceFilter(choices=Finding.DeltaChoices.choices)
|
||||
status = ChoiceFilter(choices=StatusChoices.choices)
|
||||
severity = ChoiceFilter(choices=SeverityChoices)
|
||||
impact = ChoiceFilter(choices=SeverityChoices)
|
||||
muted = BooleanFilter(
|
||||
help_text="If this filter is not provided, muted and non-muted findings will be returned."
|
||||
)
|
||||
|
||||
resources = UUIDInFilter(field_name="resource__id", lookup_expr="in")
|
||||
|
||||
region = CharFilter(method="filter_resource_region")
|
||||
region__in = CharInFilter(field_name="resource_regions", lookup_expr="overlap")
|
||||
region__icontains = CharFilter(
|
||||
field_name="resource_regions", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
service = CharFilter(method="filter_resource_service")
|
||||
service__in = CharInFilter(field_name="resource_services", lookup_expr="overlap")
|
||||
service__icontains = CharFilter(
|
||||
field_name="resource_services", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
resource_uid = CharFilter(field_name="resources__uid")
|
||||
resource_uid__in = CharInFilter(field_name="resources__uid", lookup_expr="in")
|
||||
resource_uid__icontains = CharFilter(
|
||||
field_name="resources__uid", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
resource_name = CharFilter(field_name="resources__name")
|
||||
resource_name__in = CharInFilter(field_name="resources__name", lookup_expr="in")
|
||||
resource_name__icontains = CharFilter(
|
||||
field_name="resources__name", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
resource_type = CharFilter(method="filter_resource_type")
|
||||
resource_type__in = CharInFilter(field_name="resource_types", lookup_expr="overlap")
|
||||
resource_type__icontains = CharFilter(
|
||||
field_name="resources__type", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# resource_tag_key = CharFilter(field_name="resources__tags__key")
|
||||
# resource_tag_key__in = CharInFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_key__icontains = CharFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tag_value = CharFilter(field_name="resources__tags__value")
|
||||
# resource_tag_value__in = CharInFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_value__icontains = CharFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tags = CharInFilter(
|
||||
# method="filter_resource_tag",
|
||||
# lookup_expr="in",
|
||||
# help_text="Filter by resource tags `key:value` pairs.\nMultiple values may be "
|
||||
# "separated by commas.",
|
||||
# )
|
||||
|
||||
def filter_resource_service(self, queryset, name, value):
|
||||
return queryset.filter(resource_services__contains=[value])
|
||||
|
||||
def filter_resource_region(self, queryset, name, value):
|
||||
return queryset.filter(resource_regions__contains=[value])
|
||||
|
||||
def filter_resource_type(self, queryset, name, value):
|
||||
return queryset.filter(resource_types__contains=[value])
|
||||
|
||||
def filter_resource_tag(self, queryset, name, value):
|
||||
overall_query = Q()
|
||||
for key_value_pair in value:
|
||||
tag_key, tag_value = key_value_pair.split(":", 1)
|
||||
overall_query |= Q(
|
||||
resources__tags__key__icontains=tag_key,
|
||||
resources__tags__value__icontains=tag_value,
|
||||
)
|
||||
return queryset.filter(overall_query).distinct()
|
||||
|
||||
|
||||
class TenantFilter(FilterSet):
|
||||
inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date")
|
||||
updated_at = DateFilter(field_name="updated_at", lookup_expr="date")
|
||||
@@ -257,94 +365,7 @@ class ResourceFilter(ProviderRelationshipFilterSet):
|
||||
return queryset.filter(tags__text_search=value)
|
||||
|
||||
|
||||
class FindingFilter(FilterSet):
|
||||
# We filter providers from the scan in findings
|
||||
provider = UUIDFilter(field_name="scan__provider__id", lookup_expr="exact")
|
||||
provider__in = UUIDInFilter(field_name="scan__provider__id", lookup_expr="in")
|
||||
provider_type = ChoiceFilter(
|
||||
choices=Provider.ProviderChoices.choices, field_name="scan__provider__provider"
|
||||
)
|
||||
provider_type__in = ChoiceInFilter(
|
||||
choices=Provider.ProviderChoices.choices, field_name="scan__provider__provider"
|
||||
)
|
||||
provider_uid = CharFilter(field_name="scan__provider__uid", lookup_expr="exact")
|
||||
provider_uid__in = CharInFilter(field_name="scan__provider__uid", lookup_expr="in")
|
||||
provider_uid__icontains = CharFilter(
|
||||
field_name="scan__provider__uid", lookup_expr="icontains"
|
||||
)
|
||||
provider_alias = CharFilter(field_name="scan__provider__alias", lookup_expr="exact")
|
||||
provider_alias__in = CharInFilter(
|
||||
field_name="scan__provider__alias", lookup_expr="in"
|
||||
)
|
||||
provider_alias__icontains = CharFilter(
|
||||
field_name="scan__provider__alias", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
updated_at = DateFilter(field_name="updated_at", lookup_expr="date")
|
||||
|
||||
uid = CharFilter(field_name="uid")
|
||||
delta = ChoiceFilter(choices=Finding.DeltaChoices.choices)
|
||||
status = ChoiceFilter(choices=StatusChoices.choices)
|
||||
severity = ChoiceFilter(choices=SeverityChoices)
|
||||
impact = ChoiceFilter(choices=SeverityChoices)
|
||||
muted = BooleanFilter(
|
||||
help_text="If this filter is not provided, muted and non-muted findings will be returned."
|
||||
)
|
||||
|
||||
resources = UUIDInFilter(field_name="resource__id", lookup_expr="in")
|
||||
|
||||
region = CharFilter(field_name="resources__region")
|
||||
region__in = CharInFilter(field_name="resources__region", lookup_expr="in")
|
||||
region__icontains = CharFilter(
|
||||
field_name="resources__region", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
service = CharFilter(field_name="resources__service")
|
||||
service__in = CharInFilter(field_name="resources__service", lookup_expr="in")
|
||||
service__icontains = CharFilter(
|
||||
field_name="resources__service", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
resource_uid = CharFilter(field_name="resources__uid")
|
||||
resource_uid__in = CharInFilter(field_name="resources__uid", lookup_expr="in")
|
||||
resource_uid__icontains = CharFilter(
|
||||
field_name="resources__uid", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
resource_name = CharFilter(field_name="resources__name")
|
||||
resource_name__in = CharInFilter(field_name="resources__name", lookup_expr="in")
|
||||
resource_name__icontains = CharFilter(
|
||||
field_name="resources__name", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
resource_type = CharFilter(field_name="resources__type")
|
||||
resource_type__in = CharInFilter(field_name="resources__type", lookup_expr="in")
|
||||
resource_type__icontains = CharFilter(
|
||||
field_name="resources__type", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# resource_tag_key = CharFilter(field_name="resources__tags__key")
|
||||
# resource_tag_key__in = CharInFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_key__icontains = CharFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tag_value = CharFilter(field_name="resources__tags__value")
|
||||
# resource_tag_value__in = CharInFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_value__icontains = CharFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tags = CharInFilter(
|
||||
# method="filter_resource_tag",
|
||||
# lookup_expr="in",
|
||||
# help_text="Filter by resource tags `key:value` pairs.\nMultiple values may be "
|
||||
# "separated by commas.",
|
||||
# )
|
||||
|
||||
class FindingFilter(CommonFindingFilters):
|
||||
scan = UUIDFilter(method="filter_scan_id")
|
||||
scan__in = UUIDInFilter(method="filter_scan_id_in")
|
||||
|
||||
@@ -385,6 +406,15 @@ class FindingFilter(FilterSet):
|
||||
},
|
||||
}
|
||||
|
||||
def filter_resource_type(self, queryset, name, value):
|
||||
return queryset.filter(resource_types__contains=[value])
|
||||
|
||||
def filter_resource_region(self, queryset, name, value):
|
||||
return queryset.filter(resource_regions__contains=[value])
|
||||
|
||||
def filter_resource_service(self, queryset, name, value):
|
||||
return queryset.filter(resource_services__contains=[value])
|
||||
|
||||
def filter_queryset(self, queryset):
|
||||
if not (self.data.get("scan") or self.data.get("scan__in")) and not (
|
||||
self.data.get("inserted_at")
|
||||
@@ -503,16 +533,6 @@ class FindingFilter(FilterSet):
|
||||
|
||||
return queryset.filter(id__lt=end)
|
||||
|
||||
def filter_resource_tag(self, queryset, name, value):
|
||||
overall_query = Q()
|
||||
for key_value_pair in value:
|
||||
tag_key, tag_value = key_value_pair.split(":", 1)
|
||||
overall_query |= Q(
|
||||
resources__tags__key__icontains=tag_key,
|
||||
resources__tags__value__icontains=tag_value,
|
||||
)
|
||||
return queryset.filter(overall_query).distinct()
|
||||
|
||||
@staticmethod
|
||||
def maybe_date_to_datetime(value):
|
||||
dt = value
|
||||
@@ -521,6 +541,31 @@ class FindingFilter(FilterSet):
|
||||
return dt
|
||||
|
||||
|
||||
class LatestFindingFilter(CommonFindingFilters):
|
||||
class Meta:
|
||||
model = Finding
|
||||
fields = {
|
||||
"id": ["exact", "in"],
|
||||
"uid": ["exact", "in"],
|
||||
"delta": ["exact", "in"],
|
||||
"status": ["exact", "in"],
|
||||
"severity": ["exact", "in"],
|
||||
"impact": ["exact", "in"],
|
||||
"check_id": ["exact", "in", "icontains"],
|
||||
}
|
||||
filter_overrides = {
|
||||
FindingDeltaEnumField: {
|
||||
"filter_class": CharFilter,
|
||||
},
|
||||
StatusEnumField: {
|
||||
"filter_class": CharFilter,
|
||||
},
|
||||
SeverityEnumField: {
|
||||
"filter_class": CharFilter,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class ProviderSecretFilter(FilterSet):
|
||||
inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date")
|
||||
updated_at = DateFilter(field_name="updated_at", lookup_expr="date")
|
||||
|
||||
@@ -12,6 +12,7 @@ from api.models import (
|
||||
Provider,
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
ResourceScanSummary,
|
||||
Scan,
|
||||
StatusChoices,
|
||||
)
|
||||
@@ -133,6 +134,7 @@ class Command(BaseCommand):
|
||||
region=random.choice(possible_regions),
|
||||
service=random.choice(possible_services),
|
||||
type=random.choice(possible_types),
|
||||
inserted_at="2024-10-01T00:00:00Z",
|
||||
)
|
||||
)
|
||||
|
||||
@@ -181,6 +183,10 @@ class Command(BaseCommand):
|
||||
"servicename": assigned_resource.service,
|
||||
"resourcetype": assigned_resource.type,
|
||||
},
|
||||
resource_types=[assigned_resource.type],
|
||||
resource_regions=[assigned_resource.region],
|
||||
resource_services=[assigned_resource.service],
|
||||
inserted_at="2024-10-01T00:00:00Z",
|
||||
)
|
||||
)
|
||||
|
||||
@@ -197,12 +203,22 @@ class Command(BaseCommand):
|
||||
|
||||
# Create ResourceFindingMapping
|
||||
mappings = []
|
||||
for index, f in enumerate(findings):
|
||||
scan_resource_cache: set[tuple] = set()
|
||||
for index, finding_instance in enumerate(findings):
|
||||
resource_instance = resources[findings_resources_mapping[index]]
|
||||
mappings.append(
|
||||
ResourceFindingMapping(
|
||||
tenant_id=tenant_id,
|
||||
resource=resources[findings_resources_mapping[index]],
|
||||
finding=f,
|
||||
resource=resource_instance,
|
||||
finding=finding_instance,
|
||||
)
|
||||
)
|
||||
scan_resource_cache.add(
|
||||
(
|
||||
str(resource_instance.id),
|
||||
resource_instance.service,
|
||||
resource_instance.region,
|
||||
resource_instance.type,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -220,6 +236,38 @@ class Command(BaseCommand):
|
||||
"Resource-finding mappings created successfully.\n\n"
|
||||
)
|
||||
)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
scan.progress = 99
|
||||
scan.save()
|
||||
|
||||
self.stdout.write(self.style.WARNING("Creating finding filter values..."))
|
||||
resource_scan_summaries = [
|
||||
ResourceScanSummary(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=str(scan.id),
|
||||
resource_id=resource_id,
|
||||
service=service,
|
||||
region=region,
|
||||
resource_type=resource_type,
|
||||
)
|
||||
for resource_id, service, region, resource_type in scan_resource_cache
|
||||
]
|
||||
num_batches = ceil(len(resource_scan_summaries) / batch_size)
|
||||
with rls_transaction(tenant_id):
|
||||
for i in tqdm(
|
||||
range(0, len(resource_scan_summaries), batch_size),
|
||||
total=num_batches,
|
||||
):
|
||||
with rls_transaction(tenant_id):
|
||||
ResourceScanSummary.objects.bulk_create(
|
||||
resource_scan_summaries[i : i + batch_size],
|
||||
ignore_conflicts=True,
|
||||
)
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS("Finding filter values created successfully.\n\n")
|
||||
)
|
||||
except Exception as e:
|
||||
self.stdout.write(self.style.ERROR(f"Failed to populate test data: {e}"))
|
||||
scan_state = "failed"
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
# Generated by Django 5.1.7 on 2025-05-05 10:01
|
||||
|
||||
import uuid
|
||||
|
||||
import django.db.models.deletion
|
||||
import uuid6
|
||||
from django.db import migrations, models
|
||||
|
||||
import api.rls
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0017_m365_provider"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name="ResourceScanSummary",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.BigAutoField(
|
||||
auto_created=True,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
verbose_name="ID",
|
||||
),
|
||||
),
|
||||
("scan_id", models.UUIDField(db_index=True, default=uuid6.uuid7)),
|
||||
("resource_id", models.UUIDField(db_index=True, default=uuid.uuid4)),
|
||||
("service", models.CharField(max_length=100)),
|
||||
("region", models.CharField(max_length=100)),
|
||||
("resource_type", models.CharField(max_length=100)),
|
||||
(
|
||||
"tenant",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"db_table": "resource_scan_summaries",
|
||||
"indexes": [
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "service"],
|
||||
name="rss_tenant_scan_svc_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "region"],
|
||||
name="rss_tenant_scan_reg_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "resource_type"],
|
||||
name="rss_tenant_scan_type_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "region", "service"],
|
||||
name="rss_tenant_scan_reg_svc_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "service", "resource_type"],
|
||||
name="rss_tenant_scan_svc_type_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "region", "resource_type"],
|
||||
name="rss_tenant_scan_reg_type_idx",
|
||||
),
|
||||
],
|
||||
"unique_together": {("tenant_id", "scan_id", "resource_id")},
|
||||
},
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="resourcescansummary",
|
||||
constraint=api.rls.RowLevelSecurityConstraint(
|
||||
"tenant_id",
|
||||
name="rls_on_resourcescansummary",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,42 @@
|
||||
import django.contrib.postgres.fields
|
||||
import django.contrib.postgres.indexes
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0018_resource_scan_summaries"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="finding",
|
||||
name="resource_regions",
|
||||
field=django.contrib.postgres.fields.ArrayField(
|
||||
base_field=models.CharField(max_length=100),
|
||||
blank=True,
|
||||
null=True,
|
||||
size=None,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="finding",
|
||||
name="resource_services",
|
||||
field=django.contrib.postgres.fields.ArrayField(
|
||||
base_field=models.CharField(max_length=100),
|
||||
blank=True,
|
||||
null=True,
|
||||
size=None,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="finding",
|
||||
name="resource_types",
|
||||
field=django.contrib.postgres.fields.ArrayField(
|
||||
base_field=models.CharField(max_length=100),
|
||||
blank=True,
|
||||
null=True,
|
||||
size=None,
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,86 @@
|
||||
from functools import partial
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from api.db_utils import create_index_on_partitions, drop_index_on_partitions
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0019_finding_denormalize_resource_fields"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="gin_find_service_idx",
|
||||
columns="resource_services",
|
||||
method="GIN",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="gin_find_service_idx",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="gin_find_region_idx",
|
||||
columns="resource_regions",
|
||||
method="GIN",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="gin_find_region_idx",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="gin_find_rtype_idx",
|
||||
columns="resource_types",
|
||||
method="GIN",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="gin_find_rtype_idx",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="findings_uid_idx",
|
||||
),
|
||||
reverse_code=partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="findings_uid_idx",
|
||||
columns="uid",
|
||||
method="BTREE",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="findings_filter_idx",
|
||||
),
|
||||
reverse_code=partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="findings_filter_idx",
|
||||
columns="scan_id, impact, severity, status, check_id, delta",
|
||||
method="BTREE",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,37 @@
|
||||
import django.contrib.postgres.indexes
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0020_findings_new_performance_indexes_partitions"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=django.contrib.postgres.indexes.GinIndex(
|
||||
fields=["resource_services"], name="gin_find_service_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=django.contrib.postgres.indexes.GinIndex(
|
||||
fields=["resource_regions"], name="gin_find_region_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=django.contrib.postgres.indexes.GinIndex(
|
||||
fields=["resource_types"], name="gin_find_rtype_idx"
|
||||
),
|
||||
),
|
||||
migrations.RemoveIndex(
|
||||
model_name="finding",
|
||||
name="findings_uid_idx",
|
||||
),
|
||||
migrations.RemoveIndex(
|
||||
model_name="finding",
|
||||
name="findings_filter_idx",
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,38 @@
|
||||
# Generated by Django 5.1.8 on 2025-05-12 10:04
|
||||
|
||||
from django.contrib.postgres.operations import AddIndexConcurrently
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0021_findings_new_performance_indexes_parent"),
|
||||
("django_celery_beat", "0019_alter_periodictasks_options"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
AddIndexConcurrently(
|
||||
model_name="scan",
|
||||
index=models.Index(
|
||||
condition=models.Q(("state", "completed")),
|
||||
fields=["tenant_id", "provider_id", "state", "-inserted_at"],
|
||||
name="scans_prov_state_ins_desc_idx",
|
||||
),
|
||||
),
|
||||
AddIndexConcurrently(
|
||||
model_name="scansummary",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id", "service"],
|
||||
name="ss_tenant_scan_service_idx",
|
||||
),
|
||||
),
|
||||
AddIndexConcurrently(
|
||||
model_name="scansummary",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id", "severity"],
|
||||
name="ss_tenant_scan_severity_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,28 @@
|
||||
# Generated by Django 5.1.8 on 2025-05-12 10:18
|
||||
|
||||
from django.contrib.postgres.operations import AddIndexConcurrently
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0022_scan_summaries_performance_indexes"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
AddIndexConcurrently(
|
||||
model_name="resource",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "id"], name="resources_tenant_id_idx"
|
||||
),
|
||||
),
|
||||
AddIndexConcurrently(
|
||||
model_name="resource",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "provider_id"],
|
||||
name="resources_tenant_provider_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,29 @@
|
||||
from functools import partial
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from api.db_utils import create_index_on_partitions, drop_index_on_partitions
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0023_resources_lookup_optimization"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_tenant_uid_inserted_idx",
|
||||
columns="tenant_id, uid, inserted_at DESC",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_tenant_uid_inserted_idx",
|
||||
),
|
||||
)
|
||||
]
|
||||
@@ -0,0 +1,17 @@
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0024_findings_uid_index_partitions"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "uid", "-inserted_at"],
|
||||
name="find_tenant_uid_inserted_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,14 @@
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0025_findings_uid_index_parent"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunSQL(
|
||||
"ALTER TYPE provider_secret_type ADD VALUE IF NOT EXISTS 'service_account';",
|
||||
reverse_sql=migrations.RunSQL.noop,
|
||||
),
|
||||
]
|
||||
@@ -1,10 +1,13 @@
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
from uuid import UUID, uuid4
|
||||
|
||||
from config.env import env
|
||||
from cryptography.fernet import Fernet
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import AbstractBaseUser
|
||||
from django.contrib.postgres.fields import ArrayField
|
||||
from django.contrib.postgres.indexes import GinIndex
|
||||
from django.contrib.postgres.search import SearchVector, SearchVectorField
|
||||
from django.core.validators import MinLengthValidator
|
||||
@@ -217,9 +220,13 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
|
||||
@staticmethod
|
||||
def validate_m365_uid(value):
|
||||
if not re.match(r"^[a-zA-Z0-9-]+\.onmicrosoft\.com$", value):
|
||||
if not re.match(
|
||||
r"""^(?!-)[A-Za-z0-9](?:[A-Za-z0-9-]{0,61}[A-Za-z0-9])?(?:\.(?!-)[A-Za-z0-9]"""
|
||||
r"""(?:[A-Za-z0-9-]{0,61}[A-Za-z0-9])?)*\.[A-Za-z]{2,}$""",
|
||||
value,
|
||||
):
|
||||
raise ModelValidationError(
|
||||
detail="M365 tenant ID must be a valid domain.",
|
||||
detail="M365 domain ID must be a valid domain.",
|
||||
code="m365-uid",
|
||||
pointer="/data/attributes/uid",
|
||||
)
|
||||
@@ -237,7 +244,7 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
@staticmethod
|
||||
def validate_kubernetes_uid(value):
|
||||
if not re.match(
|
||||
r"^[a-z0-9][A-Za-z0-9_.:\/-]{1,250}$",
|
||||
r"^[a-zA-Z0-9][a-zA-Z0-9._@:\/-]{1,250}$",
|
||||
value,
|
||||
):
|
||||
raise ModelValidationError(
|
||||
@@ -347,6 +354,42 @@ class ProviderGroupMembership(RowLevelSecurityProtectedModel):
|
||||
resource_name = "provider_groups-provider"
|
||||
|
||||
|
||||
class TaskManager(models.Manager):
|
||||
def get_with_retry(
|
||||
self,
|
||||
id: str,
|
||||
max_retries: int = None,
|
||||
delay_seconds: float = None,
|
||||
):
|
||||
"""
|
||||
Retry fetching a Task by ID in case it hasn't been created yet.
|
||||
|
||||
Args:
|
||||
id (str): The Celery task ID (expected to match Task model PK).
|
||||
max_retries (int, optional): Number of retry attempts. Defaults to env TASK_RETRY_ATTEMPTS or 5.
|
||||
delay_seconds (float, optional): Delay between retries in seconds. Defaults to env TASK_RETRY_DELAY_SECONDS or 0.1.
|
||||
|
||||
Returns:
|
||||
Task: The retrieved Task instance.
|
||||
|
||||
Raises:
|
||||
Task.DoesNotExist: If the task is not found after all retries.
|
||||
"""
|
||||
max_retries = max_retries or env.int("TASK_RETRY_ATTEMPTS", default=5)
|
||||
delay_seconds = delay_seconds or env.float(
|
||||
"TASK_RETRY_DELAY_SECONDS", default=0.1
|
||||
)
|
||||
|
||||
for _attempt in range(max_retries):
|
||||
try:
|
||||
return self.get(id=id)
|
||||
except self.model.DoesNotExist:
|
||||
time.sleep(delay_seconds)
|
||||
raise self.model.DoesNotExist(
|
||||
f"Task with ID {id} not found after {max_retries} retries."
|
||||
)
|
||||
|
||||
|
||||
class Task(RowLevelSecurityProtectedModel):
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
@@ -359,6 +402,8 @@ class Task(RowLevelSecurityProtectedModel):
|
||||
blank=True,
|
||||
)
|
||||
|
||||
objects = TaskManager()
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
db_table = "tasks"
|
||||
|
||||
@@ -425,6 +470,7 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
PeriodicTask, on_delete=models.CASCADE, null=True, blank=True
|
||||
)
|
||||
output_location = models.CharField(blank=True, null=True, max_length=200)
|
||||
|
||||
# TODO: mutelist foreign key
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
@@ -447,6 +493,11 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
fields=["tenant_id", "provider_id", "state", "inserted_at"],
|
||||
name="scans_prov_state_insert_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id", "state", "-inserted_at"],
|
||||
condition=Q(state=StateChoices.COMPLETED),
|
||||
name="scans_prov_state_ins_desc_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -573,6 +624,11 @@ class Resource(RowLevelSecurityProtectedModel):
|
||||
name="resource_tenant_metadata_idx",
|
||||
),
|
||||
GinIndex(fields=["text_search"], name="gin_resources_search_idx"),
|
||||
models.Index(fields=["tenant_id", "id"], name="resources_tenant_id_idx"),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id"],
|
||||
name="resources_tenant_provider_idx",
|
||||
),
|
||||
]
|
||||
|
||||
constraints = [
|
||||
@@ -673,6 +729,21 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
muted = models.BooleanField(default=False, null=False)
|
||||
compliance = models.JSONField(default=dict, null=True, blank=True)
|
||||
|
||||
# Denormalize resource data for performance
|
||||
resource_regions = ArrayField(
|
||||
models.CharField(max_length=100), blank=True, null=True
|
||||
)
|
||||
resource_services = ArrayField(
|
||||
models.CharField(max_length=100),
|
||||
blank=True,
|
||||
null=True,
|
||||
)
|
||||
resource_types = ArrayField(
|
||||
models.CharField(max_length=100),
|
||||
blank=True,
|
||||
null=True,
|
||||
)
|
||||
|
||||
# Relationships
|
||||
scan = models.ForeignKey(to=Scan, related_name="findings", on_delete=models.CASCADE)
|
||||
|
||||
@@ -713,18 +784,6 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
]
|
||||
|
||||
indexes = [
|
||||
models.Index(fields=["uid"], name="findings_uid_idx"),
|
||||
models.Index(
|
||||
fields=[
|
||||
"scan_id",
|
||||
"impact",
|
||||
"severity",
|
||||
"status",
|
||||
"check_id",
|
||||
"delta",
|
||||
],
|
||||
name="findings_filter_idx",
|
||||
),
|
||||
models.Index(fields=["tenant_id", "id"], name="findings_tenant_and_id_idx"),
|
||||
GinIndex(fields=["text_search"], name="gin_findings_search_idx"),
|
||||
models.Index(fields=["tenant_id", "scan_id"], name="find_tenant_scan_idx"),
|
||||
@@ -736,19 +795,42 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
condition=Q(delta="new"),
|
||||
name="find_delta_new_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "uid", "-inserted_at"],
|
||||
name="find_tenant_uid_inserted_idx",
|
||||
),
|
||||
GinIndex(fields=["resource_services"], name="gin_find_service_idx"),
|
||||
GinIndex(fields=["resource_regions"], name="gin_find_region_idx"),
|
||||
GinIndex(fields=["resource_types"], name="gin_find_rtype_idx"),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "findings"
|
||||
|
||||
def add_resources(self, resources: list[Resource] | None):
|
||||
# Add new relationships with the tenant_id field
|
||||
if not resources:
|
||||
return
|
||||
|
||||
self.resource_regions = self.resource_regions or []
|
||||
self.resource_services = self.resource_services or []
|
||||
self.resource_types = self.resource_types or []
|
||||
|
||||
# Deduplication
|
||||
regions = set(self.resource_regions)
|
||||
services = set(self.resource_services)
|
||||
types = set(self.resource_types)
|
||||
|
||||
for resource in resources:
|
||||
ResourceFindingMapping.objects.update_or_create(
|
||||
resource=resource, finding=self, tenant_id=self.tenant_id
|
||||
)
|
||||
regions.add(resource.region)
|
||||
services.add(resource.service)
|
||||
types.add(resource.type)
|
||||
|
||||
# Save the instance
|
||||
self.resource_regions = list(regions)
|
||||
self.resource_services = list(services)
|
||||
self.resource_types = list(types)
|
||||
self.save()
|
||||
|
||||
|
||||
@@ -808,6 +890,7 @@ class ProviderSecret(RowLevelSecurityProtectedModel):
|
||||
class TypeChoices(models.TextChoices):
|
||||
STATIC = "static", _("Key-value pairs")
|
||||
ROLE = "role", _("Role assumption")
|
||||
SERVICE_ACCOUNT = "service_account", _("GCP Service Account Key")
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
@@ -1150,7 +1233,15 @@ class ScanSummary(RowLevelSecurityProtectedModel):
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id"],
|
||||
name="scan_summaries_tenant_scan_idx",
|
||||
)
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "service"],
|
||||
name="ss_tenant_scan_service_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "severity"],
|
||||
name="ss_tenant_scan_severity_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -1232,3 +1323,52 @@ class IntegrationProviderRelationship(RowLevelSecurityProtectedModel):
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class ResourceScanSummary(RowLevelSecurityProtectedModel):
|
||||
scan_id = models.UUIDField(default=uuid7, db_index=True)
|
||||
resource_id = models.UUIDField(default=uuid4, db_index=True)
|
||||
service = models.CharField(max_length=100)
|
||||
region = models.CharField(max_length=100)
|
||||
resource_type = models.CharField(max_length=100)
|
||||
|
||||
class Meta:
|
||||
db_table = "resource_scan_summaries"
|
||||
unique_together = (("tenant_id", "scan_id", "resource_id"),)
|
||||
|
||||
indexes = [
|
||||
# Single-dimension lookups:
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "service"],
|
||||
name="rss_tenant_scan_svc_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "region"],
|
||||
name="rss_tenant_scan_reg_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "resource_type"],
|
||||
name="rss_tenant_scan_type_idx",
|
||||
),
|
||||
# Two-dimension cross-filters:
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "region", "service"],
|
||||
name="rss_tenant_scan_reg_svc_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "service", "resource_type"],
|
||||
name="rss_tenant_scan_svc_type_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "region", "resource_type"],
|
||||
name="rss_tenant_scan_reg_type_idx",
|
||||
),
|
||||
]
|
||||
|
||||
constraints = [
|
||||
RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_%(class)s",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
import uuid
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from api.models import Resource, ResourceTag
|
||||
from api.models import Resource, ResourceTag, Task
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -120,3 +123,35 @@ class TestResourceModel:
|
||||
# compliance={},
|
||||
# )
|
||||
# assert Finding.objects.filter(uid=long_uid).exists()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestTaskManager:
|
||||
def test_get_with_retry_success(self):
|
||||
task_id = uuid.uuid4()
|
||||
call_counter = {"count": 0}
|
||||
|
||||
def side_effect(*args, **kwargs):
|
||||
if call_counter["count"] < 2:
|
||||
call_counter["count"] += 1
|
||||
raise Task.DoesNotExist()
|
||||
return Task(id=task_id)
|
||||
|
||||
with mock.patch.object(Task.objects, "get", side_effect=side_effect):
|
||||
task = Task.objects.get_with_retry(
|
||||
task_id, max_retries=5, delay_seconds=0.01
|
||||
)
|
||||
|
||||
assert task.id == task_id
|
||||
assert call_counter["count"] == 2
|
||||
|
||||
def test_get_with_retry_fail(self):
|
||||
non_existent_id = uuid.uuid4()
|
||||
|
||||
with mock.patch.object(Task.objects, "get", side_effect=Task.DoesNotExist):
|
||||
with pytest.raises(Task.DoesNotExist) as excinfo:
|
||||
Task.objects.get_with_retry(
|
||||
non_existent_id, max_retries=3, delay_seconds=0.01
|
||||
)
|
||||
|
||||
assert str(non_existent_id) in str(excinfo.value)
|
||||
|
||||
@@ -2,7 +2,9 @@ import glob
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from unittest.mock import ANY, MagicMock, Mock, patch
|
||||
|
||||
import jwt
|
||||
@@ -11,6 +13,7 @@ from botocore.exceptions import ClientError, NoCredentialsError
|
||||
from conftest import API_JSON_CONTENT_TYPE, TEST_PASSWORD, TEST_USER
|
||||
from django.conf import settings
|
||||
from django.urls import reverse
|
||||
from django_celery_results.models import TaskResult
|
||||
from rest_framework import status
|
||||
|
||||
from api.compliance import get_compliance_frameworks
|
||||
@@ -911,11 +914,41 @@ class TestProviderViewSet:
|
||||
"uid": "gke_aaaa-dev_europe-test1_dev-aaaa-test-cluster-long-name-123456789",
|
||||
"alias": "GKE",
|
||||
},
|
||||
{
|
||||
"provider": "kubernetes",
|
||||
"uid": "gke_project/cluster-name",
|
||||
"alias": "GKE",
|
||||
},
|
||||
{
|
||||
"provider": "kubernetes",
|
||||
"uid": "admin@k8s-demo",
|
||||
"alias": "test",
|
||||
},
|
||||
{
|
||||
"provider": "azure",
|
||||
"uid": "8851db6b-42e5-4533-aa9e-30a32d67e875",
|
||||
"alias": "test",
|
||||
},
|
||||
{
|
||||
"provider": "m365",
|
||||
"uid": "TestingPro.onmicrosoft.com",
|
||||
"alias": "test",
|
||||
},
|
||||
{
|
||||
"provider": "m365",
|
||||
"uid": "subdomain.domain.es",
|
||||
"alias": "test",
|
||||
},
|
||||
{
|
||||
"provider": "m365",
|
||||
"uid": "microsoft.net",
|
||||
"alias": "test",
|
||||
},
|
||||
{
|
||||
"provider": "m365",
|
||||
"uid": "subdomain1.subdomain2.subdomain3.subdomain4.domain.net",
|
||||
"alias": "test",
|
||||
},
|
||||
]
|
||||
),
|
||||
)
|
||||
@@ -984,6 +1017,51 @@ class TestProviderViewSet:
|
||||
"invalid_choice",
|
||||
"provider",
|
||||
),
|
||||
(
|
||||
{
|
||||
"provider": "m365",
|
||||
"uid": "https://test.com",
|
||||
"alias": "test",
|
||||
},
|
||||
"m365-uid",
|
||||
"uid",
|
||||
),
|
||||
(
|
||||
{
|
||||
"provider": "m365",
|
||||
"uid": "thisisnotadomain",
|
||||
"alias": "test",
|
||||
},
|
||||
"m365-uid",
|
||||
"uid",
|
||||
),
|
||||
(
|
||||
{
|
||||
"provider": "m365",
|
||||
"uid": "http://test.com",
|
||||
"alias": "test",
|
||||
},
|
||||
"m365-uid",
|
||||
"uid",
|
||||
),
|
||||
(
|
||||
{
|
||||
"provider": "m365",
|
||||
"uid": f"{'a' * 64}.domain.com",
|
||||
"alias": "test",
|
||||
},
|
||||
"m365-uid",
|
||||
"uid",
|
||||
),
|
||||
(
|
||||
{
|
||||
"provider": "m365",
|
||||
"uid": f"subdomain.{'a' * 64}.com",
|
||||
"alias": "test",
|
||||
},
|
||||
"m365-uid",
|
||||
"uid",
|
||||
),
|
||||
]
|
||||
),
|
||||
)
|
||||
@@ -1611,6 +1689,26 @@ class TestProviderSecretViewSet:
|
||||
"refresh_token": "refresh-token",
|
||||
},
|
||||
),
|
||||
# GCP with Service Account Key secret
|
||||
(
|
||||
Provider.ProviderChoices.GCP.value,
|
||||
ProviderSecret.TypeChoices.SERVICE_ACCOUNT,
|
||||
{
|
||||
"service_account_key": {
|
||||
"type": "service_account",
|
||||
"project_id": "project-id",
|
||||
"private_key_id": "private-key-id",
|
||||
"private_key": "private-key",
|
||||
"client_email": "client-email",
|
||||
"client_id": "client-id",
|
||||
"auth_uri": "auth-uri",
|
||||
"token_uri": "token-uri",
|
||||
"auth_provider_x509_cert_url": "auth-provider-x509-cert-url",
|
||||
"client_x509_cert_url": "client-x509-cert-url",
|
||||
"universe_domain": "universe-domain",
|
||||
},
|
||||
},
|
||||
),
|
||||
# Kubernetes with STATIC secret
|
||||
(
|
||||
Provider.ProviderChoices.KUBERNETES.value,
|
||||
@@ -2236,7 +2334,10 @@ class TestScanViewSet:
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
assert response.json()["errors"]["detail"] == "The scan has no reports."
|
||||
assert (
|
||||
response.json()["errors"]["detail"]
|
||||
== "The scan has no reports, or the report generation task has not started yet."
|
||||
)
|
||||
|
||||
def test_report_s3_no_credentials(
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
@@ -2304,7 +2405,7 @@ class TestScanViewSet:
|
||||
):
|
||||
"""
|
||||
When output_location is a local path and glob.glob returns an empty list,
|
||||
the view should return HTTP 404 with detail "The scan has no reports."
|
||||
the view should return HTTP 404 with detail "The scan has no reports, or the report generation task has not started yet."
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
scan.output_location = "/tmp/nonexistent_report_pattern.zip"
|
||||
@@ -2316,37 +2417,39 @@ class TestScanViewSet:
|
||||
response = authenticated_client.get(url)
|
||||
|
||||
assert response.status_code == 404
|
||||
assert response.json()["errors"]["detail"] == "The scan has no reports."
|
||||
|
||||
def test_report_local_file(
|
||||
self, authenticated_client, scans_fixture, tmp_path, monkeypatch
|
||||
):
|
||||
"""
|
||||
When output_location is a local file path, the view should read the file from disk
|
||||
and return it with proper headers.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
file_content = b"local zip file content"
|
||||
file_path = tmp_path / "report.zip"
|
||||
file_path.write_bytes(file_content)
|
||||
|
||||
scan.output_location = str(file_path)
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.save()
|
||||
|
||||
monkeypatch.setattr(
|
||||
glob,
|
||||
"glob",
|
||||
lambda pattern: [str(file_path)] if pattern == str(file_path) else [],
|
||||
assert (
|
||||
response.json()["errors"]["detail"]
|
||||
== "The scan has no reports, or the report generation task has not started yet."
|
||||
)
|
||||
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
assert response.status_code == 200
|
||||
assert response.content == file_content
|
||||
content_disposition = response.get("Content-Disposition")
|
||||
assert content_disposition.startswith('attachment; filename="')
|
||||
assert f'filename="{file_path.name}"' in content_disposition
|
||||
def test_report_local_file(self, authenticated_client, scans_fixture, monkeypatch):
|
||||
scan = scans_fixture[0]
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp_path = Path(tmp)
|
||||
base_tmp = tmp_path / "report_local_file"
|
||||
base_tmp.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
file_content = b"local zip file content"
|
||||
file_path = base_tmp / "report.zip"
|
||||
file_path.write_bytes(file_content)
|
||||
|
||||
scan.output_location = str(file_path)
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.save()
|
||||
|
||||
monkeypatch.setattr(
|
||||
glob,
|
||||
"glob",
|
||||
lambda pattern: [str(file_path)] if pattern == str(file_path) else [],
|
||||
)
|
||||
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
assert response.status_code == 200
|
||||
assert response.content == file_content
|
||||
content_disposition = response.get("Content-Disposition")
|
||||
assert content_disposition.startswith('attachment; filename="')
|
||||
assert f'filename="{file_path.name}"' in content_disposition
|
||||
|
||||
def test_compliance_invalid_framework(self, authenticated_client, scans_fixture):
|
||||
scan = scans_fixture[0]
|
||||
@@ -2392,7 +2495,10 @@ class TestScanViewSet:
|
||||
url = reverse("scan-compliance", kwargs={"pk": scan.id, "name": framework})
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_404_NOT_FOUND
|
||||
assert resp.json()["errors"]["detail"] == "The scan has no reports."
|
||||
assert (
|
||||
resp.json()["errors"]["detail"]
|
||||
== "The scan has no reports, or the report generation task has not started yet."
|
||||
)
|
||||
|
||||
def test_compliance_s3_no_credentials(
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
@@ -2481,31 +2587,36 @@ class TestScanViewSet:
|
||||
)
|
||||
|
||||
def test_compliance_local_file(
|
||||
self, authenticated_client, scans_fixture, tmp_path, monkeypatch
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
):
|
||||
scan = scans_fixture[0]
|
||||
scan.state = StateChoices.COMPLETED
|
||||
base = tmp_path / "reports"
|
||||
comp_dir = base / "compliance"
|
||||
comp_dir.mkdir(parents=True)
|
||||
fname = comp_dir / "scan_cis.csv"
|
||||
fname.write_bytes(b"ignored")
|
||||
|
||||
scan.output_location = str(base / "scan.zip")
|
||||
scan.save()
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp_path = Path(tmp)
|
||||
base = tmp_path / "reports"
|
||||
comp_dir = base / "compliance"
|
||||
comp_dir.mkdir(parents=True, exist_ok=True)
|
||||
fname = comp_dir / "scan_cis.csv"
|
||||
fname.write_bytes(b"ignored")
|
||||
|
||||
monkeypatch.setattr(
|
||||
glob,
|
||||
"glob",
|
||||
lambda p: [str(fname)] if p.endswith("*_cis_1.4_aws.csv") else [],
|
||||
)
|
||||
scan.output_location = str(base / "scan.zip")
|
||||
scan.save()
|
||||
|
||||
url = reverse("scan-compliance", kwargs={"pk": scan.id, "name": "cis_1.4_aws"})
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_200_OK
|
||||
cd = resp["Content-Disposition"]
|
||||
assert cd.startswith('attachment; filename="')
|
||||
assert cd.endswith(f'filename="{fname.name}"')
|
||||
monkeypatch.setattr(
|
||||
glob,
|
||||
"glob",
|
||||
lambda p: [str(fname)] if p.endswith("*_cis_1.4_aws.csv") else [],
|
||||
)
|
||||
|
||||
url = reverse(
|
||||
"scan-compliance", kwargs={"pk": scan.id, "name": "cis_1.4_aws"}
|
||||
)
|
||||
resp = authenticated_client.get(url)
|
||||
assert resp.status_code == status.HTTP_200_OK
|
||||
cd = resp["Content-Disposition"]
|
||||
assert cd.startswith('attachment; filename="')
|
||||
assert cd.endswith(f'filename="{fname.name}"')
|
||||
|
||||
@patch("api.v1.views.Task.objects.get")
|
||||
@patch("api.v1.views.TaskSerializer")
|
||||
@@ -2529,6 +2640,36 @@ class TestScanViewSet:
|
||||
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
@patch("api.v1.views.TaskSerializer")
|
||||
def test__get_task_status_finds_task_using_kwargs(
|
||||
self, mock_task_serializer, authenticated_client, scans_fixture
|
||||
):
|
||||
scan = scans_fixture[0]
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.output_location = "dummy"
|
||||
scan.save()
|
||||
|
||||
task_result = TaskResult.objects.create(
|
||||
task_name="scan-report",
|
||||
task_kwargs={"scan_id": str(scan.id)},
|
||||
)
|
||||
|
||||
task = Task.objects.create(
|
||||
tenant_id=scan.tenant_id,
|
||||
task_runner_task=task_result,
|
||||
)
|
||||
|
||||
mock_task_serializer.return_value.data = {
|
||||
"id": str(task.id),
|
||||
"state": StateChoices.EXECUTING,
|
||||
}
|
||||
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
|
||||
assert response.status_code == status.HTTP_202_ACCEPTED
|
||||
assert response.data["id"] == str(task.id)
|
||||
|
||||
@patch("api.v1.views.get_s3_client")
|
||||
@patch("api.v1.views.sentry_sdk.capture_exception")
|
||||
def test_compliance_list_objects_client_error(
|
||||
@@ -2579,7 +2720,10 @@ class TestScanViewSet:
|
||||
response = authenticated_client.get(url)
|
||||
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
assert response.json()["errors"]["detail"] == "The scan has no reports."
|
||||
assert (
|
||||
response.json()["errors"]["detail"]
|
||||
== "The scan has no reports, or the report generation task has not started yet."
|
||||
)
|
||||
|
||||
@patch("api.v1.views.get_s3_client")
|
||||
def test_report_s3_client_error_other(
|
||||
@@ -3096,7 +3240,9 @@ class TestFindingViewSet:
|
||||
)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
def test_findings_metadata_retrieve(self, authenticated_client, findings_fixture):
|
||||
def test_findings_metadata_retrieve(
|
||||
self, authenticated_client, findings_fixture, backfill_scan_metadata_fixture
|
||||
):
|
||||
finding_1, *_ = findings_fixture
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-metadata"),
|
||||
@@ -3119,14 +3265,14 @@ class TestFindingViewSet:
|
||||
)
|
||||
# assert data["data"]["attributes"]["tags"] == expected_tags
|
||||
|
||||
def test_findings_metadata_severity_retrieve(
|
||||
self, authenticated_client, findings_fixture
|
||||
def test_findings_metadata_resource_filter_retrieve(
|
||||
self, authenticated_client, findings_fixture, backfill_scan_metadata_fixture
|
||||
):
|
||||
finding_1, *_ = findings_fixture
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-metadata"),
|
||||
{
|
||||
"filter[severity__in]": ["low", "medium"],
|
||||
"filter[region]": "eu-west-1",
|
||||
"filter[inserted_at]": finding_1.inserted_at.strftime("%Y-%m-%d"),
|
||||
},
|
||||
)
|
||||
@@ -3177,6 +3323,29 @@ class TestFindingViewSet:
|
||||
]
|
||||
}
|
||||
|
||||
def test_findings_latest(self, authenticated_client, latest_scan_finding):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-latest"),
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
# The latest scan only has one finding, in comparison with `GET /findings`
|
||||
assert len(response.json()["data"]) == 1
|
||||
assert (
|
||||
response.json()["data"][0]["attributes"]["status"]
|
||||
== latest_scan_finding.status
|
||||
)
|
||||
|
||||
def test_findings_metadata_latest(self, authenticated_client, latest_scan_finding):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-metadata_latest"),
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
attributes = response.json()["data"]["attributes"]
|
||||
|
||||
assert attributes["services"] == latest_scan_finding.resource_services
|
||||
assert attributes["regions"] == latest_scan_finding.resource_regions
|
||||
assert attributes["resource_types"] == latest_scan_finding.resource_types
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestJWTFields:
|
||||
@@ -4814,9 +4983,8 @@ class TestOverviewViewSet:
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["pass"] == 2
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["fail"] == 1
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["muted"] == 1
|
||||
assert response.json()["data"][0]["attributes"]["resources"]["total"] == len(
|
||||
resources_fixture
|
||||
)
|
||||
# Since we rely on completed scans, there are only 2 resources now
|
||||
assert response.json()["data"][0]["attributes"]["resources"]["total"] == 2
|
||||
|
||||
def test_overview_services_list_no_required_filters(
|
||||
self, authenticated_client, scan_summaries_fixture
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from allauth.socialaccount.providers.oauth2.client import OAuth2Client
|
||||
from django.contrib.postgres.aggregates import ArrayAgg
|
||||
from django.db.models import Subquery
|
||||
from rest_framework.exceptions import NotFound, ValidationError
|
||||
|
||||
from api.db_router import MainRouter
|
||||
from api.exceptions import InvitationTokenExpiredException
|
||||
from api.models import Invitation, Provider
|
||||
from api.models import Invitation, Provider, Resource
|
||||
from api.v1.serializers import FindingMetadataSerializer
|
||||
from prowler.providers.aws.aws_provider import AwsProvider
|
||||
from prowler.providers.azure.azure_provider import AzureProvider
|
||||
from prowler.providers.common.models import Connection
|
||||
@@ -205,3 +208,33 @@ def validate_invitation(
|
||||
)
|
||||
|
||||
return invitation
|
||||
|
||||
|
||||
# ToRemove after removing the fallback mechanism in /findings/metadata
|
||||
def get_findings_metadata_no_aggregations(tenant_id: str, filtered_queryset):
|
||||
filtered_ids = filtered_queryset.order_by().values("id")
|
||||
|
||||
relevant_resources = Resource.all_objects.filter(
|
||||
tenant_id=tenant_id, findings__id__in=Subquery(filtered_ids)
|
||||
).only("service", "region", "type")
|
||||
|
||||
aggregation = relevant_resources.aggregate(
|
||||
services=ArrayAgg("service", flat=True),
|
||||
regions=ArrayAgg("region", flat=True),
|
||||
resource_types=ArrayAgg("type", flat=True),
|
||||
)
|
||||
|
||||
services = sorted(set(aggregation["services"] or []))
|
||||
regions = sorted({region for region in aggregation["regions"] or [] if region})
|
||||
resource_types = sorted(set(aggregation["resource_types"] or []))
|
||||
|
||||
result = {
|
||||
"services": services,
|
||||
"regions": regions,
|
||||
"resource_types": resource_types,
|
||||
}
|
||||
|
||||
serializer = FindingMetadataSerializer(data=result)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
|
||||
return serializer.data
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
from rest_framework.response import Response
|
||||
|
||||
|
||||
class PaginateByPkMixin:
|
||||
"""
|
||||
Mixin to paginate on a list of PKs (cheaper than heavy JOINs),
|
||||
re-fetch the full objects with the desired select/prefetch,
|
||||
re-sort them to preserve DB ordering, then serialize + return.
|
||||
"""
|
||||
|
||||
def paginate_by_pk(
|
||||
self,
|
||||
request, # noqa: F841
|
||||
base_queryset,
|
||||
manager,
|
||||
select_related: list[str] | None = None,
|
||||
prefetch_related: list[str] | None = None,
|
||||
) -> Response:
|
||||
pk_list = base_queryset.values_list("id", flat=True)
|
||||
page = self.paginate_queryset(pk_list)
|
||||
if page is None:
|
||||
return Response(self.get_serializer(base_queryset, many=True).data)
|
||||
|
||||
queryset = manager.filter(id__in=page)
|
||||
if select_related:
|
||||
queryset = queryset.select_related(*select_related)
|
||||
if prefetch_related:
|
||||
queryset = queryset.prefetch_related(*prefetch_related)
|
||||
|
||||
queryset = sorted(queryset, key=lambda obj: page.index(obj.id))
|
||||
|
||||
serialized = self.get_serializer(queryset, many=True).data
|
||||
return self.get_paginated_response(serialized)
|
||||
@@ -119,9 +119,9 @@ from rest_framework_json_api import serializers
|
||||
"type": "email",
|
||||
"description": "User microsoft email address.",
|
||||
},
|
||||
"encrypted_password": {
|
||||
"password": {
|
||||
"type": "string",
|
||||
"description": "User encrypted password.",
|
||||
"description": "User password.",
|
||||
},
|
||||
},
|
||||
"required": [
|
||||
@@ -129,7 +129,7 @@ from rest_framework_json_api import serializers
|
||||
"client_secret",
|
||||
"tenant_id",
|
||||
"user",
|
||||
"encrypted_password",
|
||||
"password",
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -154,6 +154,17 @@ from rest_framework_json_api import serializers
|
||||
},
|
||||
"required": ["client_id", "client_secret", "refresh_token"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "GCP Service Account Key",
|
||||
"properties": {
|
||||
"service_account_key": {
|
||||
"type": "object",
|
||||
"description": "The service account key for GCP.",
|
||||
}
|
||||
},
|
||||
"required": ["service_account_key"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "Kubernetes Static Credentials",
|
||||
|
||||
@@ -1159,6 +1159,8 @@ class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
|
||||
)
|
||||
elif secret_type == ProviderSecret.TypeChoices.ROLE:
|
||||
serializer = AWSRoleAssumptionProviderSecret(data=secret)
|
||||
elif secret_type == ProviderSecret.TypeChoices.SERVICE_ACCOUNT:
|
||||
serializer = GCPServiceAccountProviderSecret(data=secret)
|
||||
else:
|
||||
raise serializers.ValidationError(
|
||||
{"secret_type": f"Secret type not supported: {secret_type}"}
|
||||
@@ -1197,7 +1199,7 @@ class M365ProviderSecret(serializers.Serializer):
|
||||
client_secret = serializers.CharField()
|
||||
tenant_id = serializers.CharField()
|
||||
user = serializers.EmailField()
|
||||
encrypted_password = serializers.CharField()
|
||||
password = serializers.CharField()
|
||||
|
||||
class Meta:
|
||||
resource_name = "provider-secrets"
|
||||
@@ -1212,6 +1214,13 @@ class GCPProviderSecret(serializers.Serializer):
|
||||
resource_name = "provider-secrets"
|
||||
|
||||
|
||||
class GCPServiceAccountProviderSecret(serializers.Serializer):
|
||||
service_account_key = serializers.JSONField()
|
||||
|
||||
class Meta:
|
||||
resource_name = "provider-secrets"
|
||||
|
||||
|
||||
class KubernetesProviderSecret(serializers.Serializer):
|
||||
kubeconfig_content = serializers.CharField()
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import glob
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
import sentry_sdk
|
||||
from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter
|
||||
@@ -20,6 +21,7 @@ from django.db.models import Count, Exists, F, OuterRef, Prefetch, Q, Subquery,
|
||||
from django.db.models.functions import Coalesce
|
||||
from django.http import HttpResponse
|
||||
from django.urls import reverse
|
||||
from django.utils.dateparse import parse_date
|
||||
from django.utils.decorators import method_decorator
|
||||
from django.views.decorators.cache import cache_control
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
@@ -48,6 +50,7 @@ from rest_framework_simplejwt.exceptions import InvalidToken, TokenError
|
||||
from tasks.beat import schedule_provider_scan
|
||||
from tasks.jobs.export import get_s3_client
|
||||
from tasks.tasks import (
|
||||
backfill_scan_resource_summaries_task,
|
||||
check_provider_connection_task,
|
||||
delete_provider_task,
|
||||
delete_tenant_task,
|
||||
@@ -62,6 +65,7 @@ from api.filters import (
|
||||
FindingFilter,
|
||||
IntegrationFilter,
|
||||
InvitationFilter,
|
||||
LatestFindingFilter,
|
||||
MembershipFilter,
|
||||
ProviderFilter,
|
||||
ProviderGroupFilter,
|
||||
@@ -87,6 +91,7 @@ from api.models import (
|
||||
ProviderSecret,
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
ResourceScanSummary,
|
||||
Role,
|
||||
RoleProviderGroupRelationship,
|
||||
Scan,
|
||||
@@ -100,7 +105,13 @@ from api.models import (
|
||||
from api.pagination import ComplianceOverviewPagination
|
||||
from api.rbac.permissions import Permissions, get_providers, get_role
|
||||
from api.rls import Tenant
|
||||
from api.utils import CustomOAuth2Client, validate_invitation
|
||||
from api.utils import (
|
||||
CustomOAuth2Client,
|
||||
get_findings_metadata_no_aggregations,
|
||||
validate_invitation,
|
||||
)
|
||||
from api.uuid_utils import datetime_to_uuid7, uuid7_start
|
||||
from api.v1.mixins import PaginateByPkMixin
|
||||
from api.v1.serializers import (
|
||||
ComplianceOverviewFullSerializer,
|
||||
ComplianceOverviewMetadataSerializer,
|
||||
@@ -249,7 +260,7 @@ class SchemaView(SpectacularAPIView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.7.0"
|
||||
spectacular_settings.VERSION = "1.9.0"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -1075,7 +1086,7 @@ class ProviderViewSet(BaseRLSViewSet):
|
||||
task = check_provider_connection_task.delay(
|
||||
provider_id=pk, tenant_id=self.request.tenant_id
|
||||
)
|
||||
prowler_task = Task.objects.get(id=task.id)
|
||||
prowler_task = Task.objects.get_with_retry(id=task.id)
|
||||
serializer = TaskSerializer(prowler_task)
|
||||
return Response(
|
||||
data=serializer.data,
|
||||
@@ -1098,7 +1109,7 @@ class ProviderViewSet(BaseRLSViewSet):
|
||||
task = delete_provider_task.delay(
|
||||
provider_id=pk, tenant_id=self.request.tenant_id
|
||||
)
|
||||
prowler_task = Task.objects.get(id=task.id)
|
||||
prowler_task = Task.objects.get_with_retry(id=task.id)
|
||||
serializer = TaskSerializer(prowler_task)
|
||||
return Response(
|
||||
data=serializer.data,
|
||||
@@ -1149,7 +1160,9 @@ class ProviderViewSet(BaseRLSViewSet):
|
||||
200: OpenApiResponse(description="Report obtained successfully"),
|
||||
202: OpenApiResponse(description="The task is in progress"),
|
||||
403: OpenApiResponse(description="There is a problem with credentials"),
|
||||
404: OpenApiResponse(description="The scan has no reports"),
|
||||
404: OpenApiResponse(
|
||||
description="The scan has no reports, or the report generation task has not started yet"
|
||||
),
|
||||
},
|
||||
),
|
||||
compliance=extend_schema(
|
||||
@@ -1270,7 +1283,7 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
try:
|
||||
task = Task.objects.get(
|
||||
task_runner_task__task_name="scan-report",
|
||||
task_runner_task__task_args__contains=str(scan_instance.id),
|
||||
task_runner_task__task_kwargs__contains=str(scan_instance.id),
|
||||
)
|
||||
except Task.DoesNotExist:
|
||||
return None
|
||||
@@ -1352,7 +1365,9 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
code = e.response.get("Error", {}).get("Code")
|
||||
if code == "NoSuchKey":
|
||||
return Response(
|
||||
{"detail": "The scan has no reports."},
|
||||
{
|
||||
"detail": "The scan has no reports, or the report generation task has not started yet."
|
||||
},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
return Response(
|
||||
@@ -1365,7 +1380,9 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
files = glob.glob(path_pattern)
|
||||
if not files:
|
||||
return Response(
|
||||
{"detail": "The scan has no reports."},
|
||||
{
|
||||
"detail": "The scan has no reports, or the report generation task has not started yet."
|
||||
},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
filepath = files[0]
|
||||
@@ -1391,7 +1408,10 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
|
||||
if not scan.output_location:
|
||||
return Response(
|
||||
{"detail": "The scan has no reports."}, status=status.HTTP_404_NOT_FOUND
|
||||
{
|
||||
"detail": "The scan has no reports, or the report generation task has not started yet."
|
||||
},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
|
||||
if scan.output_location.startswith("s3://"):
|
||||
@@ -1429,7 +1449,10 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
|
||||
if not scan.output_location:
|
||||
return Response(
|
||||
{"detail": "The scan has no reports."}, status=status.HTTP_404_NOT_FOUND
|
||||
{
|
||||
"detail": "The scan has no reports, or the report generation task has not started yet."
|
||||
},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
|
||||
if scan.output_location.startswith("s3://"):
|
||||
@@ -1466,10 +1489,10 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
},
|
||||
)
|
||||
|
||||
prowler_task = Task.objects.get_with_retry(id=task.id)
|
||||
scan.task_id = task.id
|
||||
scan.save(update_fields=["task_id"])
|
||||
|
||||
prowler_task = Task.objects.get(id=task.id)
|
||||
self.response_serializer_class = TaskSerializer
|
||||
output_serializer = self.get_serializer(prowler_task)
|
||||
|
||||
@@ -1662,10 +1685,24 @@ class ResourceViewSet(BaseRLSViewSet):
|
||||
],
|
||||
filters=True,
|
||||
),
|
||||
latest=extend_schema(
|
||||
tags=["Finding"],
|
||||
summary="List the latest findings",
|
||||
description="Retrieve a list of the latest findings from the latest scans for each provider with options for "
|
||||
"filtering by various criteria.",
|
||||
filters=True,
|
||||
),
|
||||
metadata_latest=extend_schema(
|
||||
tags=["Finding"],
|
||||
summary="Retrieve metadata values from the latest findings",
|
||||
description="Fetch unique metadata values from a set of findings from the latest scans for each provider. "
|
||||
"This is useful for dynamic filtering.",
|
||||
filters=True,
|
||||
),
|
||||
)
|
||||
@method_decorator(CACHE_DECORATOR, name="list")
|
||||
@method_decorator(CACHE_DECORATOR, name="retrieve")
|
||||
class FindingViewSet(BaseRLSViewSet):
|
||||
class FindingViewSet(PaginateByPkMixin, BaseRLSViewSet):
|
||||
queryset = Finding.all_objects.all()
|
||||
serializer_class = FindingSerializer
|
||||
filterset_class = FindingFilter
|
||||
@@ -1697,11 +1734,16 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
def get_serializer_class(self):
|
||||
if self.action == "findings_services_regions":
|
||||
return FindingDynamicFilterSerializer
|
||||
elif self.action == "metadata":
|
||||
elif self.action in ["metadata", "metadata_latest"]:
|
||||
return FindingMetadataSerializer
|
||||
|
||||
return super().get_serializer_class()
|
||||
|
||||
def get_filterset_class(self):
|
||||
if self.action in ["latest", "metadata_latest"]:
|
||||
return LatestFindingFilter
|
||||
return FindingFilter
|
||||
|
||||
def get_queryset(self):
|
||||
tenant_id = self.request.tenant_id
|
||||
user_roles = get_role(self.request.user)
|
||||
@@ -1741,21 +1783,14 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
return super().filter_queryset(queryset)
|
||||
|
||||
def list(self, request, *args, **kwargs):
|
||||
base_qs = self.filter_queryset(self.get_queryset())
|
||||
paginated_ids = self.paginate_queryset(base_qs.values_list("id", flat=True))
|
||||
if paginated_ids is not None:
|
||||
ids = list(paginated_ids)
|
||||
findings = (
|
||||
Finding.all_objects.filter(tenant_id=self.request.tenant_id, id__in=ids)
|
||||
.select_related("scan")
|
||||
.prefetch_related("resources")
|
||||
)
|
||||
# Re-sort in Python to preserve ordering:
|
||||
findings = sorted(findings, key=lambda x: ids.index(x.id))
|
||||
serializer = self.get_serializer(findings, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
serializer = self.get_serializer(base_qs, many=True)
|
||||
return Response(serializer.data)
|
||||
filtered_queryset = self.filter_queryset(self.get_queryset())
|
||||
return self.paginate_by_pk(
|
||||
request,
|
||||
filtered_queryset,
|
||||
manager=Finding.all_objects,
|
||||
select_related=["scan"],
|
||||
prefetch_related=["resources"],
|
||||
)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="findings_services_regions")
|
||||
def findings_services_regions(self, request):
|
||||
@@ -1780,25 +1815,103 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="metadata")
|
||||
def metadata(self, request):
|
||||
tenant_id = self.request.tenant_id
|
||||
queryset = self.get_queryset()
|
||||
filtered_queryset = self.filter_queryset(queryset)
|
||||
# Force filter validation
|
||||
filtered_queryset = self.filter_queryset(self.get_queryset())
|
||||
|
||||
filtered_ids = filtered_queryset.order_by().values("id")
|
||||
tenant_id = request.tenant_id
|
||||
query_params = request.query_params
|
||||
|
||||
relevant_resources = Resource.all_objects.filter(
|
||||
tenant_id=tenant_id, findings__id__in=Subquery(filtered_ids)
|
||||
).only("service", "region", "type")
|
||||
queryset = ResourceScanSummary.objects.filter(tenant_id=tenant_id)
|
||||
scan_based_filters = {}
|
||||
|
||||
aggregation = relevant_resources.aggregate(
|
||||
services=ArrayAgg("service", flat=True),
|
||||
regions=ArrayAgg("region", flat=True),
|
||||
resource_types=ArrayAgg("type", flat=True),
|
||||
if scans := query_params.get("filter[scan__in]") or query_params.get(
|
||||
"filter[scan]"
|
||||
):
|
||||
queryset = queryset.filter(scan_id__in=scans.split(","))
|
||||
scan_based_filters = {"id__in": scans.split(",")}
|
||||
else:
|
||||
exact = query_params.get("filter[inserted_at]")
|
||||
gte = query_params.get("filter[inserted_at__gte]")
|
||||
lte = query_params.get("filter[inserted_at__lte]")
|
||||
|
||||
date_filters = {}
|
||||
if exact:
|
||||
date = parse_date(exact)
|
||||
datetime_start = datetime.combine(
|
||||
date, datetime.min.time(), tzinfo=timezone.utc
|
||||
)
|
||||
datetime_end = datetime_start + timedelta(days=1)
|
||||
date_filters["scan_id__gte"] = uuid7_start(
|
||||
datetime_to_uuid7(datetime_start)
|
||||
)
|
||||
date_filters["scan_id__lt"] = uuid7_start(
|
||||
datetime_to_uuid7(datetime_end)
|
||||
)
|
||||
else:
|
||||
if gte:
|
||||
date_start = parse_date(gte)
|
||||
datetime_start = datetime.combine(
|
||||
date_start, datetime.min.time(), tzinfo=timezone.utc
|
||||
)
|
||||
date_filters["scan_id__gte"] = uuid7_start(
|
||||
datetime_to_uuid7(datetime_start)
|
||||
)
|
||||
if lte:
|
||||
date_end = parse_date(lte)
|
||||
datetime_end = datetime.combine(
|
||||
date_end + timedelta(days=1),
|
||||
datetime.min.time(),
|
||||
tzinfo=timezone.utc,
|
||||
)
|
||||
date_filters["scan_id__lt"] = uuid7_start(
|
||||
datetime_to_uuid7(datetime_end)
|
||||
)
|
||||
|
||||
if date_filters:
|
||||
queryset = queryset.filter(**date_filters)
|
||||
scan_based_filters = {
|
||||
key.lstrip("scan_"): value for key, value in date_filters.items()
|
||||
}
|
||||
|
||||
# ToRemove: Temporary fallback mechanism
|
||||
if not queryset.exists():
|
||||
scan_ids = Scan.objects.filter(
|
||||
tenant_id=tenant_id, **scan_based_filters
|
||||
).values_list("id", flat=True)
|
||||
for scan_id in scan_ids:
|
||||
backfill_scan_resource_summaries_task.apply_async(
|
||||
kwargs={"tenant_id": tenant_id, "scan_id": scan_id}
|
||||
)
|
||||
return Response(
|
||||
get_findings_metadata_no_aggregations(tenant_id, filtered_queryset)
|
||||
)
|
||||
|
||||
if service_filter := query_params.get("filter[service]") or query_params.get(
|
||||
"filter[service__in]"
|
||||
):
|
||||
queryset = queryset.filter(service__in=service_filter.split(","))
|
||||
if region_filter := query_params.get("filter[region]") or query_params.get(
|
||||
"filter[region__in]"
|
||||
):
|
||||
queryset = queryset.filter(region__in=region_filter.split(","))
|
||||
if resource_type_filter := query_params.get(
|
||||
"filter[resource_type]"
|
||||
) or query_params.get("filter[resource_type__in]"):
|
||||
queryset = queryset.filter(
|
||||
resource_type__in=resource_type_filter.split(",")
|
||||
)
|
||||
|
||||
services = list(
|
||||
queryset.values_list("service", flat=True).distinct().order_by("service")
|
||||
)
|
||||
regions = list(
|
||||
queryset.values_list("region", flat=True).distinct().order_by("region")
|
||||
)
|
||||
resource_types = list(
|
||||
queryset.values_list("resource_type", flat=True)
|
||||
.distinct()
|
||||
.order_by("resource_type")
|
||||
)
|
||||
|
||||
services = sorted(set(aggregation["services"] or []))
|
||||
regions = sorted({region for region in aggregation["regions"] or [] if region})
|
||||
resource_types = sorted(set(aggregation["resource_types"] or []))
|
||||
|
||||
result = {
|
||||
"services": services,
|
||||
@@ -1808,7 +1921,108 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
|
||||
serializer = self.get_serializer(data=result)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
return Response(serializer.data)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="latest")
|
||||
def latest(self, request):
|
||||
tenant_id = request.tenant_id
|
||||
filtered_queryset = self.filter_queryset(self.get_queryset())
|
||||
|
||||
latest_scan_ids = (
|
||||
Scan.all_objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
filtered_queryset = filtered_queryset.filter(
|
||||
tenant_id=tenant_id, scan_id__in=latest_scan_ids
|
||||
)
|
||||
|
||||
return self.paginate_by_pk(
|
||||
request,
|
||||
filtered_queryset,
|
||||
manager=Finding.all_objects,
|
||||
select_related=["scan"],
|
||||
prefetch_related=["resources"],
|
||||
)
|
||||
|
||||
@action(
|
||||
detail=False,
|
||||
methods=["get"],
|
||||
url_name="metadata_latest",
|
||||
url_path="metadata/latest",
|
||||
)
|
||||
def metadata_latest(self, request):
|
||||
tenant_id = request.tenant_id
|
||||
query_params = request.query_params
|
||||
|
||||
latest_scans_queryset = (
|
||||
Scan.all_objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
)
|
||||
latest_scans_ids = list(latest_scans_queryset.values_list("id", flat=True))
|
||||
|
||||
queryset = ResourceScanSummary.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
scan_id__in=latest_scans_queryset.values_list("id", flat=True),
|
||||
)
|
||||
# ToRemove: Temporary fallback mechanism
|
||||
present_ids = set(
|
||||
ResourceScanSummary.objects.filter(
|
||||
tenant_id=tenant_id, scan_id__in=latest_scans_ids
|
||||
)
|
||||
.values_list("scan_id", flat=True)
|
||||
.distinct()
|
||||
)
|
||||
missing_scan_ids = [sid for sid in latest_scans_ids if sid not in present_ids]
|
||||
if missing_scan_ids:
|
||||
for scan_id in missing_scan_ids:
|
||||
backfill_scan_resource_summaries_task.apply_async(
|
||||
kwargs={"tenant_id": tenant_id, "scan_id": scan_id}
|
||||
)
|
||||
return Response(
|
||||
get_findings_metadata_no_aggregations(
|
||||
tenant_id, self.filter_queryset(self.get_queryset())
|
||||
)
|
||||
)
|
||||
|
||||
if service_filter := query_params.get("filter[service]") or query_params.get(
|
||||
"filter[service__in]"
|
||||
):
|
||||
queryset = queryset.filter(service__in=service_filter.split(","))
|
||||
if region_filter := query_params.get("filter[region]") or query_params.get(
|
||||
"filter[region__in]"
|
||||
):
|
||||
queryset = queryset.filter(region__in=region_filter.split(","))
|
||||
if resource_type_filter := query_params.get(
|
||||
"filter[resource_type]"
|
||||
) or query_params.get("filter[resource_type__in]"):
|
||||
queryset = queryset.filter(
|
||||
resource_type__in=resource_type_filter.split(",")
|
||||
)
|
||||
|
||||
services = list(
|
||||
queryset.values_list("service", flat=True).distinct().order_by("service")
|
||||
)
|
||||
regions = list(
|
||||
queryset.values_list("region", flat=True).distinct().order_by("region")
|
||||
)
|
||||
resource_types = list(
|
||||
queryset.values_list("resource_type", flat=True)
|
||||
.distinct()
|
||||
.order_by("resource_type")
|
||||
)
|
||||
|
||||
result = {
|
||||
"services": services,
|
||||
"regions": regions,
|
||||
"resource_types": resource_types,
|
||||
}
|
||||
|
||||
serializer = self.get_serializer(data=result)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
@@ -2375,8 +2589,8 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
|
||||
def _get_filtered_queryset(model):
|
||||
if role.unlimited_visibility:
|
||||
return model.objects.filter(tenant_id=self.request.tenant_id)
|
||||
return model.objects.filter(
|
||||
return model.all_objects.filter(tenant_id=self.request.tenant_id)
|
||||
return model.all_objects.filter(
|
||||
tenant_id=self.request.tenant_id, scan__provider__in=providers
|
||||
)
|
||||
|
||||
@@ -2420,18 +2634,20 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
tenant_id = self.request.tenant_id
|
||||
|
||||
latest_scan_ids = (
|
||||
Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
state=StateChoices.COMPLETED,
|
||||
)
|
||||
Scan.all_objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
|
||||
findings_aggregated = (
|
||||
ScanSummary.objects.filter(tenant_id=tenant_id, scan_id__in=latest_scan_ids)
|
||||
.values("scan__provider__provider")
|
||||
ScanSummary.all_objects.filter(
|
||||
tenant_id=tenant_id, scan_id__in=latest_scan_ids
|
||||
)
|
||||
.values(
|
||||
"scan__provider_id",
|
||||
provider=F("scan__provider__provider"),
|
||||
)
|
||||
.annotate(
|
||||
findings_passed=Coalesce(Sum("_pass"), 0),
|
||||
findings_failed=Coalesce(Sum("fail"), 0),
|
||||
@@ -2441,22 +2657,20 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
)
|
||||
|
||||
resources_aggregated = (
|
||||
Resource.objects.filter(tenant_id=tenant_id)
|
||||
.values("provider__provider")
|
||||
Resource.all_objects.filter(tenant_id=tenant_id)
|
||||
.values("provider_id")
|
||||
.annotate(total_resources=Count("id"))
|
||||
)
|
||||
resources_dict = {
|
||||
row["provider__provider"]: row["total_resources"]
|
||||
for row in resources_aggregated
|
||||
resource_map = {
|
||||
row["provider_id"]: row["total_resources"] for row in resources_aggregated
|
||||
}
|
||||
|
||||
overview = []
|
||||
for row in findings_aggregated:
|
||||
provider_type = row["scan__provider__provider"]
|
||||
overview.append(
|
||||
{
|
||||
"provider": provider_type,
|
||||
"total_resources": resources_dict.get(provider_type, 0),
|
||||
"provider": row["provider"],
|
||||
"total_resources": resource_map.get(row["scan__provider_id"], 0),
|
||||
"total_findings": row["total_findings"],
|
||||
"findings_passed": row["findings_passed"],
|
||||
"findings_failed": row["findings_failed"],
|
||||
@@ -2464,8 +2678,10 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
}
|
||||
)
|
||||
|
||||
serializer = OverviewProviderSerializer(overview, many=True)
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
return Response(
|
||||
OverviewProviderSerializer(overview, many=True).data,
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="findings")
|
||||
def findings(self, request):
|
||||
@@ -2473,22 +2689,16 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
queryset = self.get_queryset()
|
||||
filtered_queryset = self.filter_queryset(queryset)
|
||||
|
||||
latest_scan_subquery = (
|
||||
Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
latest_scan_ids = (
|
||||
Scan.all_objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
|
||||
annotated_queryset = filtered_queryset.annotate(
|
||||
latest_scan_id=Subquery(latest_scan_subquery)
|
||||
filtered_queryset = filtered_queryset.filter(
|
||||
tenant_id=tenant_id, scan_id__in=latest_scan_ids
|
||||
)
|
||||
|
||||
filtered_queryset = annotated_queryset.filter(scan_id=F("latest_scan_id"))
|
||||
|
||||
aggregated_totals = filtered_queryset.aggregate(
|
||||
_pass=Sum("_pass") or 0,
|
||||
fail=Sum("fail") or 0,
|
||||
@@ -2518,22 +2728,16 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
queryset = self.get_queryset()
|
||||
filtered_queryset = self.filter_queryset(queryset)
|
||||
|
||||
latest_scan_subquery = (
|
||||
Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
latest_scan_ids = (
|
||||
Scan.all_objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
|
||||
annotated_queryset = filtered_queryset.annotate(
|
||||
latest_scan_id=Subquery(latest_scan_subquery)
|
||||
filtered_queryset = filtered_queryset.filter(
|
||||
tenant_id=tenant_id, scan_id__in=latest_scan_ids
|
||||
)
|
||||
|
||||
filtered_queryset = annotated_queryset.filter(scan_id=F("latest_scan_id"))
|
||||
|
||||
severity_counts = (
|
||||
filtered_queryset.values("severity")
|
||||
.annotate(count=Sum("total"))
|
||||
@@ -2554,22 +2758,16 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
queryset = self.get_queryset()
|
||||
filtered_queryset = self.filter_queryset(queryset)
|
||||
|
||||
latest_scan_subquery = (
|
||||
Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
latest_scan_ids = (
|
||||
Scan.all_objects.filter(tenant_id=tenant_id, state=StateChoices.COMPLETED)
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
|
||||
annotated_queryset = filtered_queryset.annotate(
|
||||
latest_scan_id=Subquery(latest_scan_subquery)
|
||||
filtered_queryset = filtered_queryset.filter(
|
||||
tenant_id=tenant_id, scan_id__in=latest_scan_ids
|
||||
)
|
||||
|
||||
filtered_queryset = annotated_queryset.filter(scan_id=F("latest_scan_id"))
|
||||
|
||||
services_data = (
|
||||
filtered_queryset.values("service")
|
||||
.annotate(_pass=Sum("_pass"))
|
||||
@@ -2625,7 +2823,7 @@ class ScheduleViewSet(BaseRLSViewSet):
|
||||
with transaction.atomic():
|
||||
task = schedule_provider_scan(provider_instance)
|
||||
|
||||
prowler_task = Task.objects.get(id=task.id)
|
||||
prowler_task = Task.objects.get_with_retry(id=task.id)
|
||||
self.response_serializer_class = TaskSerializer
|
||||
output_serializer = self.get_serializer(prowler_task)
|
||||
|
||||
|
||||
@@ -111,6 +111,7 @@ SPECTACULAR_SETTINGS = {
|
||||
"PREPROCESSING_HOOKS": [
|
||||
"drf_spectacular_jsonapi.hooks.fix_nested_path_parameters",
|
||||
],
|
||||
"TITLE": "API Reference - Prowler",
|
||||
}
|
||||
|
||||
WSGI_APPLICATION = "config.wsgi.application"
|
||||
|
||||
@@ -10,6 +10,7 @@ from django.urls import reverse
|
||||
from django_celery_results.models import TaskResult
|
||||
from rest_framework import status
|
||||
from rest_framework.test import APIClient
|
||||
from tasks.jobs.backfill import backfill_resource_scan_summaries
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import (
|
||||
@@ -920,6 +921,55 @@ def integrations_fixture(providers_fixture):
|
||||
return integration1, integration2
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backfill_scan_metadata_fixture(scans_fixture, findings_fixture):
|
||||
for scan_instance in scans_fixture:
|
||||
tenant_id = scan_instance.tenant_id
|
||||
scan_id = scan_instance.id
|
||||
backfill_resource_scan_summaries(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def latest_scan_finding(authenticated_client, providers_fixture, resources_fixture):
|
||||
provider = providers_fixture[0]
|
||||
tenant_id = str(providers_fixture[0].tenant_id)
|
||||
resource = resources_fixture[0]
|
||||
scan = Scan.objects.create(
|
||||
name="latest completed scan",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.COMPLETED,
|
||||
tenant_id=tenant_id,
|
||||
)
|
||||
finding = Finding.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
uid="test_finding_uid_1",
|
||||
scan=scan,
|
||||
delta="new",
|
||||
status=Status.FAIL,
|
||||
status_extended="test status extended ",
|
||||
impact=Severity.critical,
|
||||
impact_extended="test impact extended one",
|
||||
severity=Severity.critical,
|
||||
raw_result={
|
||||
"status": Status.FAIL,
|
||||
"impact": Severity.critical,
|
||||
"severity": Severity.critical,
|
||||
},
|
||||
tags={"test": "dev-qa"},
|
||||
check_id="test_check_id",
|
||||
check_metadata={
|
||||
"CheckId": "test_check_id",
|
||||
"Description": "test description apple sauce",
|
||||
},
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
|
||||
finding.add_resources([resource])
|
||||
backfill_resource_scan_summaries(tenant_id, str(scan.id))
|
||||
return finding
|
||||
|
||||
|
||||
def get_authorization_header(access_token: str) -> dict:
|
||||
return {"Authorization": f"Bearer {access_token}"}
|
||||
|
||||
|
||||
@@ -0,0 +1,61 @@
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import (
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
ResourceScanSummary,
|
||||
Scan,
|
||||
StateChoices,
|
||||
)
|
||||
|
||||
|
||||
def backfill_resource_scan_summaries(tenant_id: str, scan_id: str):
|
||||
with rls_transaction(tenant_id):
|
||||
if ResourceScanSummary.objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
).exists():
|
||||
return {"status": "already backfilled"}
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
if not Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
id=scan_id,
|
||||
state__in=(StateChoices.COMPLETED, StateChoices.FAILED),
|
||||
).exists():
|
||||
return {"status": "scan is not completed"}
|
||||
|
||||
resource_ids_qs = (
|
||||
ResourceFindingMapping.objects.filter(
|
||||
tenant_id=tenant_id, finding__scan_id=scan_id
|
||||
)
|
||||
.values_list("resource_id", flat=True)
|
||||
.distinct()
|
||||
)
|
||||
|
||||
resource_ids = list(resource_ids_qs)
|
||||
|
||||
if not resource_ids:
|
||||
return {"status": "no resources to backfill"}
|
||||
|
||||
resources_qs = Resource.objects.filter(
|
||||
tenant_id=tenant_id, id__in=resource_ids
|
||||
).only("id", "service", "region", "type")
|
||||
|
||||
summaries = []
|
||||
for resource in resources_qs.iterator():
|
||||
summaries.append(
|
||||
ResourceScanSummary(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
resource_id=str(resource.id),
|
||||
service=resource.service,
|
||||
region=resource.region,
|
||||
resource_type=resource.type,
|
||||
)
|
||||
)
|
||||
|
||||
for i in range(0, len(summaries), 500):
|
||||
ResourceScanSummary.objects.bulk_create(
|
||||
summaries[i : i + 500], ignore_conflicts=True
|
||||
)
|
||||
|
||||
return {"status": "backfilled", "inserted": len(summaries)}
|
||||
@@ -45,6 +45,9 @@ from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_azur
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_gcp import (
|
||||
ProwlerThreatScoreGCP,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_m365 import (
|
||||
ProwlerThreatScoreM365,
|
||||
)
|
||||
from prowler.lib.outputs.csv.csv import CSV
|
||||
from prowler.lib.outputs.html.html import HTML
|
||||
from prowler.lib.outputs.ocsf.ocsf import OCSF
|
||||
@@ -85,6 +88,7 @@ COMPLIANCE_CLASS_MAP = {
|
||||
],
|
||||
"m365": [
|
||||
(lambda name: name.startswith("cis_"), M365CIS),
|
||||
(lambda name: name == "prowler_threatscore_m365", ProwlerThreatScoreM365),
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ from api.models import (
|
||||
Finding,
|
||||
Provider,
|
||||
Resource,
|
||||
ResourceScanSummary,
|
||||
ResourceTag,
|
||||
Scan,
|
||||
ScanSummary,
|
||||
@@ -121,7 +122,9 @@ def perform_prowler_scan(
|
||||
check_status_by_region = {}
|
||||
exception = None
|
||||
unique_resources = set()
|
||||
scan_resource_cache: set[tuple[str, str, str, str]] = set()
|
||||
start_time = time.time()
|
||||
exc = None
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
provider_instance = Provider.objects.get(pk=provider_id)
|
||||
@@ -137,7 +140,7 @@ def perform_prowler_scan(
|
||||
provider_instance.connected = True
|
||||
except Exception as e:
|
||||
provider_instance.connected = False
|
||||
raise ValueError(
|
||||
exc = ValueError(
|
||||
f"Provider {provider_instance.provider} is not connected: {e}"
|
||||
)
|
||||
finally:
|
||||
@@ -146,6 +149,11 @@ def perform_prowler_scan(
|
||||
)
|
||||
provider_instance.save()
|
||||
|
||||
# If the provider is not connected, raise an exception outside the transaction.
|
||||
# If raised within the transaction, the transaction will be rolled back and the provider will not be marked as not connected.
|
||||
if exc:
|
||||
raise exc
|
||||
|
||||
prowler_scan = ProwlerScan(provider=prowler_provider, checks=checks_to_execute)
|
||||
|
||||
resource_cache = {}
|
||||
@@ -295,6 +303,16 @@ def perform_prowler_scan(
|
||||
continue
|
||||
region_dict[finding.check_id] = finding.status.value
|
||||
|
||||
# Update scan resource summaries
|
||||
scan_resource_cache.add(
|
||||
(
|
||||
str(resource_instance.id),
|
||||
resource_instance.service,
|
||||
resource_instance.region,
|
||||
resource_instance.type,
|
||||
)
|
||||
)
|
||||
|
||||
# Update scan progress
|
||||
with rls_transaction(tenant_id):
|
||||
scan_instance.progress = progress
|
||||
@@ -314,66 +332,90 @@ def perform_prowler_scan(
|
||||
scan_instance.unique_resource_count = len(unique_resources)
|
||||
scan_instance.save()
|
||||
|
||||
if exception is None:
|
||||
try:
|
||||
regions = prowler_provider.get_regions()
|
||||
except AttributeError:
|
||||
regions = set()
|
||||
|
||||
compliance_template = PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE[
|
||||
provider_instance.provider
|
||||
]
|
||||
compliance_overview_by_region = {
|
||||
region: deepcopy(compliance_template) for region in regions
|
||||
}
|
||||
|
||||
for region, check_status in check_status_by_region.items():
|
||||
compliance_data = compliance_overview_by_region.setdefault(
|
||||
region, deepcopy(compliance_template)
|
||||
)
|
||||
for check_name, status in check_status.items():
|
||||
generate_scan_compliance(
|
||||
compliance_data,
|
||||
provider_instance.provider,
|
||||
check_name,
|
||||
status,
|
||||
)
|
||||
|
||||
# Prepare compliance overview objects
|
||||
compliance_overview_objects = []
|
||||
for region, compliance_data in compliance_overview_by_region.items():
|
||||
for compliance_id, compliance in compliance_data.items():
|
||||
compliance_overview_objects.append(
|
||||
ComplianceOverview(
|
||||
tenant_id=tenant_id,
|
||||
scan=scan_instance,
|
||||
region=region,
|
||||
compliance_id=compliance_id,
|
||||
framework=compliance["framework"],
|
||||
version=compliance["version"],
|
||||
description=compliance["description"],
|
||||
requirements=compliance["requirements"],
|
||||
requirements_passed=compliance["requirements_status"]["passed"],
|
||||
requirements_failed=compliance["requirements_status"]["failed"],
|
||||
requirements_manual=compliance["requirements_status"]["manual"],
|
||||
total_requirements=compliance["total_requirements"],
|
||||
)
|
||||
)
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
ComplianceOverview.objects.bulk_create(
|
||||
compliance_overview_objects, batch_size=100
|
||||
)
|
||||
except Exception as overview_exception:
|
||||
import sentry_sdk
|
||||
|
||||
sentry_sdk.capture_exception(overview_exception)
|
||||
logger.error(
|
||||
f"Error storing compliance overview for scan {scan_id}: {overview_exception}"
|
||||
)
|
||||
if exception is not None:
|
||||
raise exception
|
||||
|
||||
try:
|
||||
regions = prowler_provider.get_regions()
|
||||
except AttributeError:
|
||||
regions = set()
|
||||
|
||||
compliance_template = PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE[
|
||||
provider_instance.provider
|
||||
]
|
||||
compliance_overview_by_region = {
|
||||
region: deepcopy(compliance_template) for region in regions
|
||||
}
|
||||
|
||||
for region, check_status in check_status_by_region.items():
|
||||
compliance_data = compliance_overview_by_region.setdefault(
|
||||
region, deepcopy(compliance_template)
|
||||
)
|
||||
for check_name, status in check_status.items():
|
||||
generate_scan_compliance(
|
||||
compliance_data,
|
||||
provider_instance.provider,
|
||||
check_name,
|
||||
status,
|
||||
)
|
||||
|
||||
# Prepare compliance overview objects
|
||||
compliance_overview_objects = []
|
||||
for region, compliance_data in compliance_overview_by_region.items():
|
||||
for compliance_id, compliance in compliance_data.items():
|
||||
compliance_overview_objects.append(
|
||||
ComplianceOverview(
|
||||
tenant_id=tenant_id,
|
||||
scan=scan_instance,
|
||||
region=region,
|
||||
compliance_id=compliance_id,
|
||||
framework=compliance["framework"],
|
||||
version=compliance["version"],
|
||||
description=compliance["description"],
|
||||
requirements=compliance["requirements"],
|
||||
requirements_passed=compliance["requirements_status"]["passed"],
|
||||
requirements_failed=compliance["requirements_status"]["failed"],
|
||||
requirements_manual=compliance["requirements_status"]["manual"],
|
||||
total_requirements=compliance["total_requirements"],
|
||||
)
|
||||
)
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
ComplianceOverview.objects.bulk_create(
|
||||
compliance_overview_objects, batch_size=500
|
||||
)
|
||||
except Exception as overview_exception:
|
||||
import sentry_sdk
|
||||
|
||||
sentry_sdk.capture_exception(overview_exception)
|
||||
logger.error(
|
||||
f"Error storing compliance overview for scan {scan_id}: {overview_exception}"
|
||||
)
|
||||
|
||||
try:
|
||||
resource_scan_summaries = [
|
||||
ResourceScanSummary(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
resource_id=resource_id,
|
||||
service=service,
|
||||
region=region,
|
||||
resource_type=resource_type,
|
||||
)
|
||||
for resource_id, service, region, resource_type in scan_resource_cache
|
||||
]
|
||||
with rls_transaction(tenant_id):
|
||||
ResourceScanSummary.objects.bulk_create(
|
||||
resource_scan_summaries, batch_size=500, ignore_conflicts=True
|
||||
)
|
||||
except Exception as filter_exception:
|
||||
import sentry_sdk
|
||||
|
||||
sentry_sdk.capture_exception(filter_exception)
|
||||
logger.error(
|
||||
f"Error storing filter values for scan {scan_id}: {filter_exception}"
|
||||
)
|
||||
|
||||
serializer = ScanTaskSerializer(instance=scan_instance)
|
||||
return serializer.data
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ from celery.utils.log import get_task_logger
|
||||
from config.celery import RLSTask
|
||||
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE, DJANGO_TMP_OUTPUT_DIRECTORY
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from tasks.jobs.backfill import backfill_resource_scan_summaries
|
||||
from tasks.jobs.connection import check_provider_connection
|
||||
from tasks.jobs.deletion import delete_provider, delete_tenant
|
||||
from tasks.jobs.export import (
|
||||
@@ -358,3 +359,15 @@ def generate_outputs(scan_id: str, provider_id: str, tenant_id: str):
|
||||
Scan.all_objects.filter(id=scan_id).update(output_location=final_location)
|
||||
logger.info(f"Scan outputs at {final_location}")
|
||||
return {"upload": did_upload}
|
||||
|
||||
|
||||
@shared_task(name="backfill-scan-resource-summaries", queue="backfill")
|
||||
def backfill_scan_resource_summaries_task(tenant_id: str, scan_id: str):
|
||||
"""
|
||||
Tries to backfill the resource scan summaries table for a given scan.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier.
|
||||
scan_id (str): The scan identifier.
|
||||
"""
|
||||
return backfill_resource_scan_summaries(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
from tasks.jobs.backfill import backfill_resource_scan_summaries
|
||||
|
||||
from api.models import ResourceScanSummary, Scan, StateChoices
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestBackfillResourceScanSummaries:
|
||||
@pytest.fixture(scope="function")
|
||||
def resource_scan_summary_data(self, scans_fixture):
|
||||
scan = scans_fixture[0]
|
||||
return ResourceScanSummary.objects.create(
|
||||
tenant_id=scan.tenant_id,
|
||||
scan_id=scan.id,
|
||||
resource_id=str(uuid4()),
|
||||
service="aws",
|
||||
region="us-east-1",
|
||||
resource_type="instance",
|
||||
)
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def get_not_completed_scans(self, providers_fixture):
|
||||
provider_id = providers_fixture[0].id
|
||||
tenant_id = providers_fixture[0].tenant_id
|
||||
scan_1 = Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.EXECUTING,
|
||||
provider_id=provider_id,
|
||||
)
|
||||
scan_2 = Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.AVAILABLE,
|
||||
provider_id=provider_id,
|
||||
)
|
||||
return scan_1, scan_2
|
||||
|
||||
def test_already_backfilled(self, resource_scan_summary_data):
|
||||
tenant_id = resource_scan_summary_data.tenant_id
|
||||
scan_id = resource_scan_summary_data.scan_id
|
||||
|
||||
result = backfill_resource_scan_summaries(tenant_id, scan_id)
|
||||
|
||||
assert result == {"status": "already backfilled"}
|
||||
|
||||
def test_not_completed_scan(self, get_not_completed_scans):
|
||||
for scan_instance in get_not_completed_scans:
|
||||
tenant_id = scan_instance.tenant_id
|
||||
scan_id = scan_instance.id
|
||||
result = backfill_resource_scan_summaries(tenant_id, scan_id)
|
||||
|
||||
assert result == {"status": "scan is not completed"}
|
||||
|
||||
def test_successful_backfill_inserts_one_summary(
|
||||
self, resources_fixture, findings_fixture
|
||||
):
|
||||
tenant_id = findings_fixture[0].tenant_id
|
||||
scan_id = findings_fixture[0].scan_id
|
||||
|
||||
# This scan affects the first two resources
|
||||
resources = resources_fixture[:2]
|
||||
|
||||
result = backfill_resource_scan_summaries(tenant_id, scan_id)
|
||||
assert result == {"status": "backfilled", "inserted": len(resources)}
|
||||
|
||||
# Verify correct values
|
||||
summaries = ResourceScanSummary.objects.filter(
|
||||
tenant_id=tenant_id, scan_id=scan_id
|
||||
)
|
||||
assert summaries.count() == len(resources)
|
||||
for resource in resources:
|
||||
summary = summaries.get(resource_id=resource.id)
|
||||
assert summary.resource_id == resource.id
|
||||
assert summary.service == resource.service
|
||||
assert summary.region == resource.region
|
||||
assert summary.resource_type == resource.type
|
||||
@@ -1,5 +1,6 @@
|
||||
import os
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
@@ -14,8 +15,9 @@ from tasks.jobs.export import (
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestOutputs:
|
||||
def test_compress_output_files_creates_zip(self, tmp_path):
|
||||
output_dir = tmp_path / "output"
|
||||
def test_compress_output_files_creates_zip(self, tmpdir):
|
||||
base_tmp = Path(str(tmpdir.mkdir("compress_output")))
|
||||
output_dir = base_tmp / "output"
|
||||
output_dir.mkdir()
|
||||
file_path = output_dir / "result.csv"
|
||||
file_path.write_text("data")
|
||||
@@ -54,16 +56,16 @@ class TestOutputs:
|
||||
|
||||
@patch("tasks.jobs.export.get_s3_client")
|
||||
@patch("tasks.jobs.export.base")
|
||||
def test_upload_to_s3_success(self, mock_base, mock_get_client, tmp_path):
|
||||
def test_upload_to_s3_success(self, mock_base, mock_get_client, tmpdir):
|
||||
mock_base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET = "test-bucket"
|
||||
|
||||
zip_path = tmp_path / "outputs.zip"
|
||||
base_tmp = Path(str(tmpdir.mkdir("upload_success")))
|
||||
zip_path = base_tmp / "outputs.zip"
|
||||
zip_path.write_bytes(b"dummy")
|
||||
|
||||
compliance_dir = tmp_path / "compliance"
|
||||
compliance_dir = base_tmp / "compliance"
|
||||
compliance_dir.mkdir()
|
||||
compliance_file = compliance_dir / "report.csv"
|
||||
compliance_file.write_text("ok")
|
||||
(compliance_dir / "report.csv").write_text("ok")
|
||||
|
||||
client_mock = MagicMock()
|
||||
mock_get_client.return_value = client_mock
|
||||
@@ -72,7 +74,6 @@ class TestOutputs:
|
||||
|
||||
expected_uri = "s3://test-bucket/tenant-id/scan-id/outputs.zip"
|
||||
assert result == expected_uri
|
||||
|
||||
assert client_mock.upload_file.call_count == 2
|
||||
|
||||
@patch("tasks.jobs.export.get_s3_client")
|
||||
@@ -84,12 +85,14 @@ class TestOutputs:
|
||||
|
||||
@patch("tasks.jobs.export.get_s3_client")
|
||||
@patch("tasks.jobs.export.base")
|
||||
def test_upload_to_s3_skips_non_files(self, mock_base, mock_get_client, tmp_path):
|
||||
def test_upload_to_s3_skips_non_files(self, mock_base, mock_get_client, tmpdir):
|
||||
mock_base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET = "test-bucket"
|
||||
zip_path = tmp_path / "results.zip"
|
||||
base_tmp = Path(str(tmpdir.mkdir("upload_skips_non_files")))
|
||||
|
||||
zip_path = base_tmp / "results.zip"
|
||||
zip_path.write_bytes(b"zip")
|
||||
|
||||
compliance_dir = tmp_path / "compliance"
|
||||
compliance_dir = base_tmp / "compliance"
|
||||
compliance_dir.mkdir()
|
||||
(compliance_dir / "subdir").mkdir()
|
||||
|
||||
@@ -100,7 +103,6 @@ class TestOutputs:
|
||||
|
||||
expected_uri = "s3://test-bucket/tenant/scan/results.zip"
|
||||
assert result == expected_uri
|
||||
|
||||
client_mock.upload_file.assert_called_once()
|
||||
|
||||
@patch(
|
||||
@@ -110,23 +112,26 @@ class TestOutputs:
|
||||
@patch("tasks.jobs.export.base")
|
||||
@patch("tasks.jobs.export.logger.error")
|
||||
def test_upload_to_s3_failure_logs_error(
|
||||
self, mock_logger, mock_base, mock_get_client, tmp_path
|
||||
self, mock_logger, mock_base, mock_get_client, tmpdir
|
||||
):
|
||||
mock_base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET = "bucket"
|
||||
zip_path = tmp_path / "zipfile.zip"
|
||||
|
||||
base_tmp = Path(str(tmpdir.mkdir("upload_failure_logs")))
|
||||
zip_path = base_tmp / "zipfile.zip"
|
||||
zip_path.write_bytes(b"zip")
|
||||
|
||||
compliance_dir = tmp_path / "compliance"
|
||||
compliance_dir = base_tmp / "compliance"
|
||||
compliance_dir.mkdir()
|
||||
(compliance_dir / "report.csv").write_text("csv")
|
||||
|
||||
_upload_to_s3("tenant", str(zip_path), "scan")
|
||||
mock_logger.assert_called()
|
||||
|
||||
def test_generate_output_directory_creates_paths(self, tmp_path):
|
||||
def test_generate_output_directory_creates_paths(self, tmpdir):
|
||||
from prowler.config.config import output_file_timestamp
|
||||
|
||||
base_dir = str(tmp_path)
|
||||
base_tmp = Path(str(tmpdir.mkdir("generate_output")))
|
||||
base_dir = str(base_tmp)
|
||||
tenant_id = "t1"
|
||||
scan_id = "s1"
|
||||
provider = "aws"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import json
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
@@ -206,6 +207,10 @@ class TestPerformScan:
|
||||
scan.refresh_from_db()
|
||||
assert scan.state == StateChoices.FAILED
|
||||
|
||||
provider.refresh_from_db()
|
||||
assert provider.connected is False
|
||||
assert isinstance(provider.connection_last_checked_at, datetime)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"last_status, new_status, expected_delta",
|
||||
[
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -122,8 +123,7 @@ def main() -> None:
|
||||
args = parser.parse_args()
|
||||
|
||||
metrics_dir = Path(args.metrics_dir)
|
||||
if not metrics_dir.is_dir():
|
||||
sys.exit(f"Metrics directory not found: {metrics_dir}")
|
||||
os.makedirs(metrics_dir, exist_ok=True)
|
||||
|
||||
metrics_data: dict[str, pd.DataFrame] = {}
|
||||
for csv_file in sorted(metrics_dir.glob("*.csv")):
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
locust==2.34.1
|
||||
matplotlib==3.10.1
|
||||
pandas==2.2.3
|
||||
|
||||
@@ -180,13 +180,27 @@ class APIUser(APIUserBase):
|
||||
)
|
||||
|
||||
endpoint = (
|
||||
f"/findings?filter[{filter_name}]={filter_value}"
|
||||
f"/findings/metadata?filter[{filter_name}]={filter_value}"
|
||||
f"&filter[inserted_at]={TARGET_INSERTED_AT}"
|
||||
f"&{get_sort_value(FINDINGS_UI_SORT_VALUES)}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task
|
||||
@task(3)
|
||||
def findings_metadata_resource_filter_scan_large(self):
|
||||
name = "/findings/metadata?filter[resource_filter]&filter[scan_id] - 500k"
|
||||
filter_name, filter_value = get_next_resource_filter(
|
||||
self.available_resource_filters
|
||||
)
|
||||
|
||||
endpoint = (
|
||||
f"/findings/metadata?filter[{filter_name}]={filter_value}"
|
||||
f"&filter[scan]={self.l_scan_id}"
|
||||
f"&{get_sort_value(FINDINGS_UI_SORT_VALUES)}"
|
||||
)
|
||||
self.client.get(endpoint, headers=get_auth_headers(self.token), name=name)
|
||||
|
||||
@task(2)
|
||||
def findings_resource_filter_large_scan_include(self):
|
||||
name = "/findings?filter[resource_filter][scan]&include - 500k"
|
||||
filter_name, filter_value = get_next_resource_filter(
|
||||
|
||||
@@ -161,7 +161,7 @@ def update_nav_bar(pathname):
|
||||
html.Span(
|
||||
[
|
||||
html.Img(src="assets/favicon.ico", className="w-5"),
|
||||
"Subscribe to prowler SaaS",
|
||||
"Subscribe to Prowler Cloud",
|
||||
],
|
||||
className="flex items-center gap-x-3 text-white",
|
||||
),
|
||||
|
||||
@@ -2569,6 +2569,356 @@ def get_section_containers_3_levels(data, section_1, section_2, section_3):
|
||||
return html.Div(section_containers, className="compliance-data-layout")
|
||||
|
||||
|
||||
def get_section_containers_threatscore(data, section_1, section_2, section_3):
|
||||
data["STATUS"] = data["STATUS"].apply(map_status_to_icon)
|
||||
findings_counts_marco = (
|
||||
data.groupby([section_1, "STATUS"]).size().unstack(fill_value=0)
|
||||
)
|
||||
section_containers = []
|
||||
data[section_1] = data[section_1].astype(str)
|
||||
data[section_2] = data[section_2].astype(str)
|
||||
data[section_3] = data[section_3].astype(str)
|
||||
|
||||
data.sort_values(
|
||||
by=section_3,
|
||||
key=lambda x: x.map(extract_numeric_values),
|
||||
ascending=True,
|
||||
inplace=True,
|
||||
)
|
||||
|
||||
for marco in data[section_1].unique():
|
||||
success_marco = findings_counts_marco.loc[marco].get(pass_emoji, 0)
|
||||
failed_marco = findings_counts_marco.loc[marco].get(fail_emoji, 0)
|
||||
|
||||
fig_name = go.Figure(
|
||||
[
|
||||
go.Bar(
|
||||
name="Failed",
|
||||
x=[failed_marco],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#e77676"),
|
||||
width=[0.8],
|
||||
),
|
||||
go.Bar(
|
||||
name="Success",
|
||||
x=[success_marco],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#45cc6e"),
|
||||
width=[0.8],
|
||||
),
|
||||
]
|
||||
)
|
||||
fig_name.update_layout(
|
||||
barmode="stack",
|
||||
margin=dict(l=10, r=10, t=10, b=10),
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
plot_bgcolor="rgba(0,0,0,0)",
|
||||
showlegend=False,
|
||||
width=350,
|
||||
height=30,
|
||||
xaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
yaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
annotations=[
|
||||
dict(
|
||||
x=success_marco + failed_marco,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(success_marco),
|
||||
showarrow=False,
|
||||
font=dict(color="#45cc6e", size=14),
|
||||
xanchor="left",
|
||||
yanchor="middle",
|
||||
),
|
||||
dict(
|
||||
x=0,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(failed_marco),
|
||||
showarrow=False,
|
||||
font=dict(color="#e77676", size=14),
|
||||
xanchor="right",
|
||||
yanchor="middle",
|
||||
),
|
||||
],
|
||||
)
|
||||
fig_name.add_annotation(
|
||||
x=failed_marco,
|
||||
y=0.3,
|
||||
text="|",
|
||||
showarrow=False,
|
||||
font=dict(size=20),
|
||||
xanchor="center",
|
||||
yanchor="middle",
|
||||
)
|
||||
|
||||
graph_div = html.Div(
|
||||
dcc.Graph(
|
||||
figure=fig_name, config={"staticPlot": True}, className="info-bar"
|
||||
),
|
||||
className="graph-section",
|
||||
)
|
||||
direct_internal_items = []
|
||||
|
||||
for categoria in data[data[section_1] == marco][section_2].unique():
|
||||
specific_data = data[
|
||||
(data[section_1] == marco) & (data[section_2] == categoria)
|
||||
]
|
||||
findings_counts_categoria = (
|
||||
specific_data.groupby([section_2, "STATUS"])
|
||||
.size()
|
||||
.unstack(fill_value=0)
|
||||
)
|
||||
success_categoria = findings_counts_categoria.loc[categoria].get(
|
||||
pass_emoji, 0
|
||||
)
|
||||
failed_categoria = findings_counts_categoria.loc[categoria].get(
|
||||
fail_emoji, 0
|
||||
)
|
||||
|
||||
fig_section = go.Figure(
|
||||
[
|
||||
go.Bar(
|
||||
name="Failed",
|
||||
x=[failed_categoria],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#e77676"),
|
||||
width=[0.8],
|
||||
),
|
||||
go.Bar(
|
||||
name="Success",
|
||||
x=[success_categoria],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#45cc6e"),
|
||||
width=[0.8],
|
||||
),
|
||||
]
|
||||
)
|
||||
fig_section.update_layout(
|
||||
barmode="stack",
|
||||
margin=dict(l=10, r=10, t=10, b=10),
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
plot_bgcolor="rgba(0,0,0,0)",
|
||||
showlegend=False,
|
||||
width=350,
|
||||
height=30,
|
||||
xaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
yaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
annotations=[
|
||||
dict(
|
||||
x=success_categoria + failed_categoria,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(success_categoria),
|
||||
showarrow=False,
|
||||
font=dict(color="#45cc6e", size=14),
|
||||
xanchor="left",
|
||||
yanchor="middle",
|
||||
),
|
||||
dict(
|
||||
x=0,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(failed_categoria),
|
||||
showarrow=False,
|
||||
font=dict(color="#e77676", size=14),
|
||||
xanchor="right",
|
||||
yanchor="middle",
|
||||
),
|
||||
],
|
||||
)
|
||||
fig_section.add_annotation(
|
||||
x=failed_categoria,
|
||||
y=0.3,
|
||||
text="|",
|
||||
showarrow=False,
|
||||
font=dict(size=20),
|
||||
xanchor="center",
|
||||
yanchor="middle",
|
||||
)
|
||||
|
||||
graph_div_section = html.Div(
|
||||
dcc.Graph(
|
||||
figure=fig_section,
|
||||
config={"staticPlot": True},
|
||||
className="info-bar-child",
|
||||
),
|
||||
className="graph-section-req",
|
||||
)
|
||||
direct_internal_items_idgrupocontrol = []
|
||||
|
||||
for idgrupocontrol in specific_data[section_3].unique():
|
||||
specific_data2 = specific_data[
|
||||
specific_data[section_3] == idgrupocontrol
|
||||
]
|
||||
findings_counts_idgrupocontrol = (
|
||||
specific_data2.groupby([section_3, "STATUS"])
|
||||
.size()
|
||||
.unstack(fill_value=0)
|
||||
)
|
||||
success_idgrupocontrol = findings_counts_idgrupocontrol.loc[
|
||||
idgrupocontrol
|
||||
].get(pass_emoji, 0)
|
||||
failed_idgrupocontrol = findings_counts_idgrupocontrol.loc[
|
||||
idgrupocontrol
|
||||
].get(fail_emoji, 0)
|
||||
|
||||
fig_idgrupocontrol = go.Figure(
|
||||
[
|
||||
go.Bar(
|
||||
name="Failed",
|
||||
x=[failed_idgrupocontrol],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#e77676"),
|
||||
width=[0.8],
|
||||
),
|
||||
go.Bar(
|
||||
name="Success",
|
||||
x=[success_idgrupocontrol],
|
||||
y=[""],
|
||||
orientation="h",
|
||||
marker=dict(color="#45cc6e"),
|
||||
width=[0.8],
|
||||
),
|
||||
]
|
||||
)
|
||||
fig_idgrupocontrol.update_layout(
|
||||
barmode="stack",
|
||||
margin=dict(l=10, r=10, t=10, b=10),
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
plot_bgcolor="rgba(0,0,0,0)",
|
||||
showlegend=False,
|
||||
width=350,
|
||||
height=30,
|
||||
xaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
yaxis=dict(showticklabels=False, showgrid=False, zeroline=False),
|
||||
annotations=[
|
||||
dict(
|
||||
x=success_idgrupocontrol + failed_idgrupocontrol,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(success_idgrupocontrol),
|
||||
showarrow=False,
|
||||
font=dict(color="#45cc6e", size=14),
|
||||
xanchor="left",
|
||||
yanchor="middle",
|
||||
),
|
||||
dict(
|
||||
x=0,
|
||||
y=0,
|
||||
xref="x",
|
||||
yref="y",
|
||||
text=str(failed_idgrupocontrol),
|
||||
showarrow=False,
|
||||
font=dict(color="#e77676", size=14),
|
||||
xanchor="right",
|
||||
yanchor="middle",
|
||||
),
|
||||
],
|
||||
)
|
||||
fig_idgrupocontrol.add_annotation(
|
||||
x=failed_idgrupocontrol,
|
||||
y=0.3,
|
||||
text="|",
|
||||
showarrow=False,
|
||||
font=dict(size=20),
|
||||
xanchor="center",
|
||||
yanchor="middle",
|
||||
)
|
||||
|
||||
graph_div_idgrupocontrol = html.Div(
|
||||
dcc.Graph(
|
||||
figure=fig_idgrupocontrol,
|
||||
config={"staticPlot": True},
|
||||
className="info-bar-child",
|
||||
),
|
||||
className="graph-section-req",
|
||||
)
|
||||
|
||||
data_table = dash_table.DataTable(
|
||||
data=specific_data2.to_dict("records"),
|
||||
columns=[
|
||||
{"name": i, "id": i}
|
||||
for i in [
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
],
|
||||
style_table={"overflowX": "auto"},
|
||||
style_as_list_view=True,
|
||||
style_cell={"textAlign": "left", "padding": "5px"},
|
||||
)
|
||||
|
||||
title_internal = f"{idgrupocontrol} - {specific_data2['REQUIREMENTS_DESCRIPTION'].iloc[0]}"
|
||||
|
||||
# Cut the title if it's too long
|
||||
title_internal = (
|
||||
title_internal[:130] + " ..."
|
||||
if len(title_internal) > 130
|
||||
else title_internal
|
||||
)
|
||||
|
||||
internal_accordion_item_2 = dbc.AccordionItem(
|
||||
title=title_internal,
|
||||
children=[
|
||||
graph_div_idgrupocontrol,
|
||||
html.Div([data_table], className="inner-accordion-content"),
|
||||
],
|
||||
)
|
||||
direct_internal_items_idgrupocontrol.append(
|
||||
html.Div(
|
||||
[
|
||||
graph_div_idgrupocontrol,
|
||||
dbc.Accordion(
|
||||
[internal_accordion_item_2],
|
||||
start_collapsed=True,
|
||||
flush=True,
|
||||
),
|
||||
],
|
||||
className="accordion-inner--child",
|
||||
)
|
||||
)
|
||||
|
||||
internal_accordion_item = dbc.AccordionItem(
|
||||
title=categoria,
|
||||
children=direct_internal_items_idgrupocontrol,
|
||||
)
|
||||
internal_section_container = html.Div(
|
||||
[
|
||||
graph_div_section,
|
||||
dbc.Accordion(
|
||||
[internal_accordion_item], start_collapsed=True, flush=True
|
||||
),
|
||||
],
|
||||
className="accordion-inner--child",
|
||||
)
|
||||
direct_internal_items.append(internal_section_container)
|
||||
|
||||
accordion_item = dbc.AccordionItem(title=marco, children=direct_internal_items)
|
||||
section_container = html.Div(
|
||||
[
|
||||
graph_div,
|
||||
dbc.Accordion([accordion_item], start_collapsed=True, flush=True),
|
||||
],
|
||||
className="accordion-inner",
|
||||
)
|
||||
section_containers.append(section_container)
|
||||
|
||||
return html.Div(section_containers, className="compliance-data-layout")
|
||||
|
||||
|
||||
# This function extracts and compares up to two numeric values, ensuring correct sorting for version-like strings.
|
||||
def extract_numeric_values(value):
|
||||
numbers = re.findall(r"\d+", str(value))
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -0,0 +1,43 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_3_levels
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
data["REQUIREMENTS_DESCRIPTION"] = (
|
||||
data["REQUIREMENTS_ID"] + " - " + data["REQUIREMENTS_DESCRIPTION"]
|
||||
)
|
||||
|
||||
data["REQUIREMENTS_DESCRIPTION"] = data["REQUIREMENTS_DESCRIPTION"].apply(
|
||||
lambda x: x[:150] + "..." if len(str(x)) > 150 else x
|
||||
)
|
||||
|
||||
data["REQUIREMENTS_ATTRIBUTES_SECTION"] = data[
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
].apply(lambda x: x[:80] + "..." if len(str(x)) > 80 else x)
|
||||
|
||||
data["REQUIREMENTS_ATTRIBUTES_SUBSECTION"] = data[
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION"
|
||||
].apply(lambda x: x[:150] + "..." if len(str(x)) > 150 else x)
|
||||
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
]
|
||||
|
||||
return get_section_containers_3_levels(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
)
|
||||
@@ -0,0 +1,43 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_3_levels
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
data["REQUIREMENTS_DESCRIPTION"] = (
|
||||
data["REQUIREMENTS_ID"] + " - " + data["REQUIREMENTS_DESCRIPTION"]
|
||||
)
|
||||
|
||||
data["REQUIREMENTS_DESCRIPTION"] = data["REQUIREMENTS_DESCRIPTION"].apply(
|
||||
lambda x: x[:150] + "..." if len(str(x)) > 150 else x
|
||||
)
|
||||
|
||||
data["REQUIREMENTS_ATTRIBUTES_SECTION"] = data[
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
].apply(lambda x: x[:80] + "..." if len(str(x)) > 80 else x)
|
||||
|
||||
data["REQUIREMENTS_ATTRIBUTES_SUBSECTION"] = data[
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION"
|
||||
].apply(lambda x: x[:150] + "..." if len(str(x)) > 150 else x)
|
||||
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
]
|
||||
|
||||
return get_section_containers_3_levels(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
)
|
||||
@@ -1,6 +1,6 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
from dashboard.common_methods import get_section_containers_threatscore
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
@@ -11,6 +11,7 @@ def get_table(data):
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
@@ -19,6 +20,9 @@ def get_table(data):
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
return get_section_containers_threatscore(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"REQUIREMENTS_ID",
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
from dashboard.common_methods import get_section_containers_threatscore
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
@@ -11,6 +11,7 @@ def get_table(data):
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
@@ -19,6 +20,9 @@ def get_table(data):
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
return get_section_containers_threatscore(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"REQUIREMENTS_ID",
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
from dashboard.common_methods import get_section_containers_threatscore
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
@@ -11,6 +11,7 @@ def get_table(data):
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
@@ -19,6 +20,9 @@ def get_table(data):
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
return get_section_containers_threatscore(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"REQUIREMENTS_ID",
|
||||
)
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_threatscore
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_threatscore(
|
||||
aux,
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SUBSECTION",
|
||||
"REQUIREMENTS_ID",
|
||||
)
|
||||
@@ -90,12 +90,28 @@ def create_layout_overview(
|
||||
),
|
||||
html.Div(
|
||||
[
|
||||
(
|
||||
html.Label(
|
||||
"Table Rows:",
|
||||
className="text-prowler-stone-900 font-bold text-sm",
|
||||
style={"margin-right": "10px"},
|
||||
)
|
||||
html.Label(
|
||||
"Search:",
|
||||
className="text-prowler-stone-900 font-bold text-sm",
|
||||
style={"margin-right": "10px"},
|
||||
),
|
||||
dcc.Input(
|
||||
id="search-input",
|
||||
type="text",
|
||||
placeholder="Search by check title, service, region...",
|
||||
debounce=True,
|
||||
style={
|
||||
"padding": "4px 8px",
|
||||
"border": "1px solid #ccc",
|
||||
"borderRadius": "4px",
|
||||
"marginRight": "20px",
|
||||
"width": "250px",
|
||||
},
|
||||
),
|
||||
html.Label(
|
||||
"Table Rows:",
|
||||
className="text-prowler-stone-900 font-bold text-sm",
|
||||
style={"margin-right": "10px"},
|
||||
),
|
||||
table_row_dropdown,
|
||||
download_button_csv,
|
||||
@@ -132,7 +148,7 @@ def create_layout_compliance(
|
||||
html.A(
|
||||
[
|
||||
html.Img(src="assets/favicon.ico", className="w-5 mr-3"),
|
||||
html.Span("Subscribe to prowler SaaS"),
|
||||
html.Span("Subscribe to Prowler Cloud"),
|
||||
],
|
||||
href="https://prowler.pro/",
|
||||
target="_blank",
|
||||
|
||||
@@ -651,58 +651,114 @@ def get_table(current_compliance, table):
|
||||
|
||||
|
||||
def get_threatscore_mean_by_pillar(df):
|
||||
modified_df = df[df["STATUS"] == "FAIL"]
|
||||
score_per_pillar = {}
|
||||
max_score_per_pillar = {}
|
||||
|
||||
modified_df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"] = pd.to_numeric(
|
||||
modified_df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"], errors="coerce"
|
||||
)
|
||||
for _, row in df.iterrows():
|
||||
pillar = (
|
||||
row["REQUIREMENTS_ATTRIBUTES_SECTION"].split(" - ")[0]
|
||||
if isinstance(row["REQUIREMENTS_ATTRIBUTES_SECTION"], str)
|
||||
else "Unknown"
|
||||
)
|
||||
|
||||
pillar_means = (
|
||||
modified_df.groupby("REQUIREMENTS_ATTRIBUTES_SECTION")[
|
||||
"REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"
|
||||
]
|
||||
.mean()
|
||||
.round(2)
|
||||
)
|
||||
if pillar not in score_per_pillar:
|
||||
score_per_pillar[pillar] = 0
|
||||
max_score_per_pillar[pillar] = 0
|
||||
|
||||
level_of_risk = pd.to_numeric(
|
||||
row["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"], errors="coerce"
|
||||
)
|
||||
level_of_risk = 1 if pd.isna(level_of_risk) else level_of_risk
|
||||
|
||||
weight = 1
|
||||
if "REQUIREMENTS_ATTRIBUTES_WEIGHT" in row and not pd.isna(
|
||||
row["REQUIREMENTS_ATTRIBUTES_WEIGHT"]
|
||||
):
|
||||
weight = pd.to_numeric(
|
||||
row["REQUIREMENTS_ATTRIBUTES_WEIGHT"], errors="coerce"
|
||||
)
|
||||
weight = 1 if pd.isna(weight) else weight
|
||||
|
||||
max_score_per_pillar[pillar] += level_of_risk * weight
|
||||
|
||||
if row["STATUS"] == "PASS":
|
||||
score_per_pillar[pillar] += level_of_risk * weight
|
||||
|
||||
output = []
|
||||
for pillar, mean in pillar_means.items():
|
||||
output.append(f"{pillar} - [{mean}]")
|
||||
for pillar in max_score_per_pillar:
|
||||
risk_score = 0
|
||||
if max_score_per_pillar[pillar] > 0:
|
||||
risk_score = (score_per_pillar[pillar] / max_score_per_pillar[pillar]) * 100
|
||||
|
||||
output.append(f"{pillar} - [{risk_score:.1f}%]")
|
||||
|
||||
for value in output:
|
||||
if value.split(" - ")[0] in df["REQUIREMENTS_ATTRIBUTES_SECTION"].values:
|
||||
base_pillar = value.split(" - ")[0]
|
||||
if base_pillar in df["REQUIREMENTS_ATTRIBUTES_SECTION"].values:
|
||||
df.loc[
|
||||
df["REQUIREMENTS_ATTRIBUTES_SECTION"] == value.split(" - ")[0],
|
||||
df["REQUIREMENTS_ATTRIBUTES_SECTION"] == base_pillar,
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
] = value
|
||||
|
||||
return df
|
||||
|
||||
|
||||
def get_table_prowler_threatscore(df):
|
||||
df = df[df["STATUS"] == "FAIL"]
|
||||
score_per_pillar = {}
|
||||
max_score_per_pillar = {}
|
||||
pillars = {}
|
||||
|
||||
# Delete " - " from the column REQUIREMENTS_ATTRIBUTES_SECTION
|
||||
df["REQUIREMENTS_ATTRIBUTES_SECTION"] = (
|
||||
df["REQUIREMENTS_ATTRIBUTES_SECTION"].str.split(" - ").str[0]
|
||||
)
|
||||
df_copy = df.copy()
|
||||
|
||||
df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"] = pd.to_numeric(
|
||||
df["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"], errors="coerce"
|
||||
)
|
||||
|
||||
score_df = (
|
||||
df.groupby("REQUIREMENTS_ATTRIBUTES_SECTION")[
|
||||
"REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"
|
||||
]
|
||||
.mean()
|
||||
.reset_index()
|
||||
.rename(
|
||||
columns={
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION": "Pillar",
|
||||
"REQUIREMENTS_ATTRIBUTES_LEVELOFRISK": "Score",
|
||||
}
|
||||
for _, row in df_copy.iterrows():
|
||||
pillar = (
|
||||
row["REQUIREMENTS_ATTRIBUTES_SECTION"].split(" - ")[0]
|
||||
if isinstance(row["REQUIREMENTS_ATTRIBUTES_SECTION"], str)
|
||||
else "Unknown"
|
||||
)
|
||||
)
|
||||
|
||||
if pillar not in pillars:
|
||||
pillars[pillar] = {"FAIL": 0, "PASS": 0, "MUTED": 0}
|
||||
score_per_pillar[pillar] = 0
|
||||
max_score_per_pillar[pillar] = 0
|
||||
|
||||
level_of_risk = pd.to_numeric(
|
||||
row["REQUIREMENTS_ATTRIBUTES_LEVELOFRISK"], errors="coerce"
|
||||
)
|
||||
level_of_risk = 1 if pd.isna(level_of_risk) else level_of_risk
|
||||
|
||||
weight = 1
|
||||
if "REQUIREMENTS_ATTRIBUTES_WEIGHT" in row and not pd.isna(
|
||||
row["REQUIREMENTS_ATTRIBUTES_WEIGHT"]
|
||||
):
|
||||
weight = pd.to_numeric(
|
||||
row["REQUIREMENTS_ATTRIBUTES_WEIGHT"], errors="coerce"
|
||||
)
|
||||
weight = 1 if pd.isna(weight) else weight
|
||||
|
||||
max_score_per_pillar[pillar] += level_of_risk * weight
|
||||
|
||||
if row["STATUS"] == "PASS":
|
||||
pillars[pillar]["PASS"] += 1
|
||||
score_per_pillar[pillar] += level_of_risk * weight
|
||||
elif row["STATUS"] == "FAIL":
|
||||
pillars[pillar]["FAIL"] += 1
|
||||
|
||||
if "MUTED" in row and row["MUTED"] == "True":
|
||||
pillars[pillar]["MUTED"] += 1
|
||||
|
||||
result_df = []
|
||||
|
||||
for pillar in pillars.keys():
|
||||
risk_score = 0
|
||||
if max_score_per_pillar[pillar] > 0:
|
||||
risk_score = (score_per_pillar[pillar] / max_score_per_pillar[pillar]) * 100
|
||||
|
||||
result_df.append({"Pillar": pillar, "Score": risk_score})
|
||||
|
||||
score_df = pd.DataFrame(result_df)
|
||||
|
||||
score_df = score_df.sort_values("Score", ascending=True)
|
||||
|
||||
fig = px.bar(
|
||||
score_df,
|
||||
@@ -710,22 +766,25 @@ def get_table_prowler_threatscore(df):
|
||||
y="Score",
|
||||
color="Score",
|
||||
color_continuous_scale=[
|
||||
"#45cc6e",
|
||||
"#f4d44d",
|
||||
"#e77676",
|
||||
], # verde → amarillo → rojo
|
||||
hover_data={"Score": True, "Pillar": True},
|
||||
labels={"Score": "Average Risk Score", "Pillar": "Section"},
|
||||
"#f4d44d",
|
||||
"#45cc6e",
|
||||
],
|
||||
labels={"Score": "Risk Score (%)", "Pillar": "Section"},
|
||||
height=400,
|
||||
text="Score",
|
||||
)
|
||||
|
||||
fig.update_traces(texttemplate="%{text:.1f}%", textposition="outside")
|
||||
|
||||
fig.update_layout(
|
||||
xaxis_title="Pillar",
|
||||
yaxis_title="Level of Risk",
|
||||
yaxis_title="Risk Score (%)",
|
||||
margin=dict(l=20, r=20, t=30, b=20),
|
||||
plot_bgcolor="rgba(0,0,0,0)",
|
||||
paper_bgcolor="rgba(0,0,0,0)",
|
||||
coloraxis_colorbar=dict(title="Risk"),
|
||||
coloraxis_colorbar=dict(title="Risk %"),
|
||||
yaxis=dict(range=[0, 110]),
|
||||
)
|
||||
|
||||
return dcc.Graph(
|
||||
|
||||
@@ -518,6 +518,7 @@ else:
|
||||
Input("service-filter", "value"),
|
||||
Input("table-rows", "value"),
|
||||
Input("status-filter", "value"),
|
||||
Input("search-input", "value"),
|
||||
Input("aws_card", "n_clicks"),
|
||||
Input("azure_card", "n_clicks"),
|
||||
Input("gcp_card", "n_clicks"),
|
||||
@@ -540,6 +541,7 @@ def filter_data(
|
||||
service_values,
|
||||
table_row_values,
|
||||
status_values,
|
||||
search_value,
|
||||
aws_clicks,
|
||||
azure_clicks,
|
||||
gcp_clicks,
|
||||
@@ -1144,6 +1146,15 @@ def filter_data(
|
||||
}
|
||||
|
||||
index_count = 0
|
||||
if search_value:
|
||||
search_value = search_value.lower()
|
||||
filtered_data = filtered_data[
|
||||
filtered_data["CHECK_TITLE"].str.lower().str.contains(search_value)
|
||||
| filtered_data["SERVICE_NAME"].str.lower().str.contains(search_value)
|
||||
| filtered_data["REGION"].str.lower().str.contains(search_value)
|
||||
| filtered_data["STATUS"].str.lower().str.contains(search_value)
|
||||
]
|
||||
|
||||
full_filtered_data = filtered_data.copy()
|
||||
filtered_data = filtered_data.head(table_row_values)
|
||||
# Sort the filtered_data
|
||||
@@ -1342,13 +1353,13 @@ def filter_data(
|
||||
"m365", m365_provider_logo, "Accounts", full_filtered_data
|
||||
)
|
||||
|
||||
# Subscribe to prowler SaaS card
|
||||
# Subscribe to Prowler Cloud card
|
||||
subscribe_card = [
|
||||
html.Div(
|
||||
html.A(
|
||||
[
|
||||
html.Img(src="assets/favicon.ico", className="w-5 mr-3"),
|
||||
html.Span("Subscribe to prowler SaaS"),
|
||||
html.Span("Subscribe to Prowler Cloud"),
|
||||
],
|
||||
href="https://prowler.pro/",
|
||||
target="_blank",
|
||||
|
||||
@@ -70,7 +70,7 @@ The other three cases does not need additional configuration, `--az-cli-auth` an
|
||||
Prowler for Azure needs two types of permission scopes to be set:
|
||||
|
||||
- **Microsoft Entra ID permissions**: used to retrieve metadata from the identity assumed by Prowler and specific Entra checks (not mandatory to have access to execute the tool). The permissions required by the tool are the following:
|
||||
- `Directory.Read.All`
|
||||
- `Domain.Read.All`
|
||||
- `Policy.Read.All`
|
||||
- `UserAuthenticationMethod.Read.All` (used only for the Entra checks related with multifactor authentication)
|
||||
- **Subscription scope permissions**: required to launch the checks against your resources, mandatory to launch the tool. It is required to add the following RBAC builtin roles per subscription to the entity that is going to be assumed by the tool:
|
||||
@@ -81,6 +81,9 @@ Prowler for Azure needs two types of permission scopes to be set:
|
||||
|
||||
To assign the permissions, follow the instructions in the [Microsoft Entra ID permissions](../tutorials/azure/create-prowler-service-principal.md#assigning-the-proper-permissions) section and the [Azure subscriptions permissions](../tutorials/azure/subscriptions.md#assign-the-appropriate-permissions-to-the-identity-that-is-going-to-be-assumed-by-prowler) section, respectively.
|
||||
|
||||
???+ warning
|
||||
Some permissions in `ProwlerRole` are considered **write** permissions, so if you have a `ReadOnly` lock attached to some resources you may get an error and will not get a finding for that check.
|
||||
|
||||
#### Checks that require ProwlerRole
|
||||
|
||||
The following checks require the `ProwlerRole` permissions to be executed, if you want to run them, make sure you have assigned the role to the identity that is going to be assumed by Prowler:
|
||||
@@ -153,77 +156,31 @@ With this credentials you will only be able to run the checks that work through
|
||||
|
||||
Authentication flag: `--env-auth`
|
||||
|
||||
This authentication method follows the same approach as the service principal method but introduces two additional environment variables for user credentials: `M365_USER` and `M365_ENCRYPTED_PASSWORD`.
|
||||
This authentication method follows the same approach as the service principal method but introduces two additional environment variables for user credentials: `M365_USER` and `M365_PASSWORD`.
|
||||
|
||||
```console
|
||||
export AZURE_CLIENT_ID="XXXXXXXXX"
|
||||
export AZURE_CLIENT_SECRET="XXXXXXXXX"
|
||||
export AZURE_TENANT_ID="XXXXXXXXX"
|
||||
export M365_USER="your_email@example.com"
|
||||
export M365_ENCRYPTED_PASSWORD="6500780061006d0070006c006500700061007300730077006f0072006400" # replace this to yours
|
||||
export M365_PASSWORD="examplepassword"
|
||||
```
|
||||
|
||||
These two new environment variables are **required** to execute the PowerShell modules needed to retrieve information from M365 services. Prowler uses Service Principal authentication to access Microsoft Graph and user credentials to authenticate to Microsoft PowerShell modules.
|
||||
|
||||
- `M365_USER` should be your Microsoft account email using the default domain. This means it must look like `example@YourCompany.onmicrosoft.com`.
|
||||
- `M365_USER` should be your Microsoft account email using the **assigned domain in the tenant**. This means it must look like `example@YourCompany.onmicrosoft.com` or `example@YourCompany.com`, but it must be the exact domain assigned to that user in the tenant.
|
||||
|
||||
To ensure that you are using the default domain you can see how to verify it [here](../tutorials/microsoft365/getting-started-m365.md#step-1-obtain-your-domain).
|
||||
???+ warning
|
||||
Using a tenant domain other than the one assigned — even if it belongs to the same tenant — will cause Prowler to fail, as Microsoft authentication will not succeed.
|
||||
|
||||
If you don't have a user created with that domain, Prowler will not work as it will not be able to ensure both app an user belong to the same tenant. To proceed, you can either create a new user with that domain or modify the domain of an existing user.
|
||||
Ensure you are using the right domain for the user you are trying to authenticate with.
|
||||
|
||||

|
||||
|
||||
- `M365_ENCRYPTED_PASSWORD` must be an encrypted SecureString. To convert your password into a valid encrypted string, you need to use PowerShell.
|
||||
|
||||
???+ warning
|
||||
Passwords encrypted using ConvertTo-SecureString can only be decrypted on the same OS/user context. If you generate an encrypted password on macOS or Linux (both UNIX), it should fail on Windows and vice versa. As Prowler Cloud runs on UNIX if you generate your password using Windows it won't work so you'll need to generate a new password using any UNIX distro (example above)
|
||||
|
||||
If you are working from Windows and you will use your encrypted password in a different system (like for example executing Prowler in macOS or adding your password to Prowler Cloud), you will need to generate a "UNIX compatible" version of your encrypted password. This can be done using WSL which is so easy to install on Windows.
|
||||
|
||||
=== "UNIX"
|
||||
|
||||
Open a PowerShell cmd with a [supported version](requirements.md#supported-powershell-versions) and then run the following command:
|
||||
|
||||
```console
|
||||
$securePassword = ConvertTo-SecureString "examplepassword" -AsPlainText -Force
|
||||
$encryptedPassword = $securePassword | ConvertFrom-SecureString
|
||||
Write-Output $encryptedPassword
|
||||
6500780061006d0070006c006500700061007300730077006f0072006400
|
||||
```
|
||||
|
||||
If everything is done correctly, you will see the encrypted string that you need to set as the `M365_ENCRYPTED_PASSWORD` environment variable.
|
||||
|
||||
=== "Windows"
|
||||
|
||||
|
||||
How to install WSL and PowerShell on it to generate that password (you can use a different distro but this one will work for sure):
|
||||
|
||||
```console
|
||||
wsl --install -d Ubuntu-22.04
|
||||
```
|
||||
|
||||
Then, open the Ubuntu terminal and run the following commands:
|
||||
|
||||
```console
|
||||
sudo apt update && sudo apt install -y wget apt-transport-https software-properties-common
|
||||
wget -q "https://packages.microsoft.com/config/ubuntu/$(lsb_release -rs)/packages-microsoft-prod.deb"
|
||||
sudo dpkg -i packages-microsoft-prod.deb
|
||||
sudo apt update
|
||||
sudo apt install -y powershell
|
||||
pwsh
|
||||
```
|
||||
|
||||
With this done you will see now that a prompt running PowerShell with the latest version is open so here you will be able to generate your encrypted password:
|
||||
|
||||
```console
|
||||
$securePassword = ConvertTo-SecureString "examplepassword" -AsPlainText -Force
|
||||
$encryptedPassword = $securePassword | ConvertFrom-SecureString
|
||||
Write-Output $encryptedPassword
|
||||
6500780061006d0070006c006500700061007300730077006f0072006400
|
||||
```
|
||||
|
||||
If everything is done correctly, you will see the encrypted string that you need to set as the `M365_ENCRYPTED_PASSWORD` environment variable.
|
||||
- `M365_PASSWORD` must be the user password.
|
||||
|
||||
???+ note
|
||||
Before we asked for a encrypted password, but now we ask for the user password directly. Prowler will now handle the password encryption for you.
|
||||
|
||||
|
||||
### Interactive Browser authentication
|
||||
@@ -242,11 +199,11 @@ Since this is a delegated permission authentication method, necessary permission
|
||||
Prowler for M365 requires two types of permission scopes to be set (if you want to run the full provider including PowerShell checks). Both must be configured using Microsoft Entra ID:
|
||||
|
||||
- **Service Principal Application Permissions**: These are set at the **application** level and are used to retrieve data from the identity being assessed:
|
||||
- `Directory.Read.All`: Required for all services.
|
||||
- `Domain.Read.All`: Required for all services.
|
||||
- `Policy.Read.All`: Required for all services.
|
||||
- `User.Read` (IMPORTANT: this must be set as **delegated**): Required for the sign-in.
|
||||
- `Sites.Read.All`: Required for SharePoint service.
|
||||
- `SharePointTenantSettings.Read.All`: Required for SharePoint service.
|
||||
- `AuditLog.Read.All`: Required for Entra service.
|
||||
|
||||
- **Powershell Modules Permissions**: These are set at the `M365_USER` level, so the user used to run Prowler must have one of the following roles:
|
||||
- `Global Reader` (recommended): this allows you to read all roles needed.
|
||||
@@ -466,3 +423,19 @@ The required modules are:
|
||||
|
||||
- [ExchangeOnlineManagement](https://www.powershellgallery.com/packages/ExchangeOnlineManagement/3.6.0): Minimum version 3.6.0. Required for several checks across Exchange, Defender, and Purview.
|
||||
- [MicrosoftTeams](https://www.powershellgallery.com/packages/MicrosoftTeams/6.6.0): Minimum version 6.6.0. Required for all Teams checks.
|
||||
|
||||
## GitHub
|
||||
### Authentication
|
||||
|
||||
Prowler supports multiple methods to [authenticate with GitHub](https://docs.github.com/en/rest/authentication/authenticating-to-the-rest-api). These include:
|
||||
|
||||
- **Personal Access Token (PAT)**
|
||||
- **OAuth App Token**
|
||||
- **GitHub App Credentials**
|
||||
|
||||
This flexibility allows you to scan and analyze your GitHub account, including repositories, organizations, and applications, using the method that best suits your use case.
|
||||
|
||||
The provided credentials must have the appropriate permissions to perform all the required checks.
|
||||
|
||||
???+ note
|
||||
GitHub App Credentials support less checks than other authentication methods.
|
||||
|
||||
|
Before Width: | Height: | Size: 376 KiB After Width: | Height: | Size: 383 KiB |
|
After Width: | Height: | Size: 65 KiB |
|
After Width: | Height: | Size: 9.0 KiB |
|
Before Width: | Height: | Size: 153 KiB After Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 347 KiB After Width: | Height: | Size: 119 KiB |
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 54 KiB |
|
After Width: | Height: | Size: 16 KiB |
@@ -565,6 +565,7 @@ kubectl logs prowler-XXXXX --namespace prowler-ns
|
||||
???+ note
|
||||
By default, `prowler` will scan all namespaces in your active Kubernetes context. Use the flag `--context` to specify the context to be scanned and `--namespaces` to specify the namespaces to be scanned.
|
||||
|
||||
|
||||
#### Microsoft 365
|
||||
|
||||
With M365 you need to specify which auth method is going to be used:
|
||||
@@ -587,5 +588,29 @@ prowler m365 --browser-auth --tenant-id "XXXXXXXX"
|
||||
|
||||
See more details about M365 Authentication in [Requirements](getting-started/requirements.md#microsoft-365)
|
||||
|
||||
#### GitHub
|
||||
|
||||
Prowler allows you to scan your GitHub account, including your repositories, organizations or applications.
|
||||
|
||||
There are several supported login methods:
|
||||
|
||||
```console
|
||||
# Personal Access Token (PAT):
|
||||
prowler github --personal-access-token pat
|
||||
|
||||
# OAuth App Token:
|
||||
prowler github --oauth-app-token oauth_token
|
||||
|
||||
# GitHub App Credentials:
|
||||
prowler github --github-app-id app_id --github-app-key app_key
|
||||
```
|
||||
|
||||
???+ note
|
||||
If no login method is explicitly provided, Prowler will automatically attempt to authenticate using environment variables in the following order of precedence:
|
||||
|
||||
1. `GITHUB_PERSONAL_ACCESS_TOKEN`
|
||||
2. `OAUTH_APP_TOKEN`
|
||||
3. `GITHUB_APP_ID` and `GITHUB_APP_KEY`
|
||||
|
||||
## Prowler v2 Documentation
|
||||
For **Prowler v2 Documentation**, please check it out [here](https://github.com/prowler-cloud/prowler/blob/8818f47333a0c1c1a457453c87af0ea5b89a385f/README.md).
|
||||
|
||||
@@ -40,7 +40,7 @@ az ad sp create-for-rbac --name "ProwlerApp"
|
||||
|
||||
To allow Prowler to retrieve metadata from the identity assumed and run specific Entra checks, it is needed to assign the following permissions:
|
||||
|
||||
- `Directory.Read.All`
|
||||
- `Domain.Read.All`
|
||||
- `Policy.Read.All`
|
||||
- `UserAuthenticationMethod.Read.All` (used only for the Entra checks related with multifactor authentication)
|
||||
|
||||
@@ -58,7 +58,7 @@ To assign the permissions you can make it from the Azure Portal or using the Azu
|
||||
5. Then click on "+ Add a permission" and select "Microsoft Graph"
|
||||
6. Once in the "Microsoft Graph" view, select "Application permissions"
|
||||
7. Finally, search for "Directory", "Policy" and "UserAuthenticationMethod" select the following permissions:
|
||||
- `Directory.Read.All`
|
||||
- `Domain.Read.All`
|
||||
- `Policy.Read.All`
|
||||
- `UserAuthenticationMethod.Read.All`
|
||||
8. Click on "Add permissions" to apply the new permissions.
|
||||
|
||||
@@ -90,7 +90,7 @@ A Service Principal is required to grant Prowler the necessary privileges.
|
||||
|
||||
Assign the following Microsoft Graph permissions:
|
||||
|
||||
- Directory.Read.All
|
||||
- Domain.Read.All
|
||||
|
||||
- Policy.Read.All
|
||||
|
||||
@@ -107,11 +107,11 @@ Assign the following Microsoft Graph permissions:
|
||||
|
||||
3. Search and select:
|
||||
|
||||
- `Directory.Read.All`
|
||||
- `Domain.Read.All`
|
||||
- `Policy.Read.All`
|
||||
- `UserAuthenticationMethod.Read.All`
|
||||
|
||||

|
||||

|
||||
|
||||
4. Click `Add permissions`, then grant admin consent
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 82 KiB |
|
After Width: | Height: | Size: 160 KiB |
@@ -23,55 +23,7 @@ In order to see which compliance frameworks are cover by Prowler, you can use op
|
||||
prowler <provider> --list-compliance
|
||||
```
|
||||
|
||||
### AWS
|
||||
|
||||
- `aws_account_security_onboarding_aws`
|
||||
- `aws_audit_manager_control_tower_guardrails_aws`
|
||||
- `aws_foundational_security_best_practices_aws`
|
||||
- `aws_foundational_technical_review_aws`
|
||||
- `aws_well_architected_framework_reliability_pillar_aws`
|
||||
- `aws_well_architected_framework_security_pillar_aws`
|
||||
- `cis_1.4_aws`
|
||||
- `cis_1.5_aws`
|
||||
- `cis_2.0_aws`
|
||||
- `cis_3.0_aws`
|
||||
- `cisa_aws`
|
||||
- `ens_rd2022_aws`
|
||||
- `fedramp_low_revision_4_aws`
|
||||
- `fedramp_moderate_revision_4_aws`
|
||||
- `ffiec_aws`
|
||||
- `gdpr_aws`
|
||||
- `gxp_21_cfr_part_11_aws`
|
||||
- `gxp_eu_annex_11_aws`
|
||||
- `hipaa_aws`
|
||||
- `iso27001_2013_aws`
|
||||
- `kisa_isms_p_2023_aws`
|
||||
- `kisa_isms_p_2023_korean_aws`
|
||||
- `mitre_attack_aws`
|
||||
- `nist_800_171_revision_2_aws`
|
||||
- `nist_800_53_revision_4_aws`
|
||||
- `nist_800_53_revision_5_aws`
|
||||
- `nist_csf_1.1_aws`
|
||||
- `pci_3.2.1_aws`
|
||||
- `rbi_cyber_security_framework_aws`
|
||||
- `soc2_aws`
|
||||
|
||||
### Azure
|
||||
|
||||
- `cis_2.0_azure`
|
||||
- `cis_2.1_azure`
|
||||
- `ens_rd2022_azure`
|
||||
- `mitre_attack_azure`
|
||||
|
||||
### GCP
|
||||
|
||||
- `cis_2.0_gcp`
|
||||
- `ens_rd2022_gcp`
|
||||
- `mitre_attack_gcp`
|
||||
|
||||
### Kubernetes
|
||||
|
||||
- `cis_1.8_kubernetes`
|
||||
The full and updated list of supported compliance frameworks for each provider is available at [Prowler Hub](https://hub.prowler.com/compliance).
|
||||
|
||||
## List Requirements of Compliance Frameworks
|
||||
For each compliance framework, you can use option `--list-compliance-requirements` to list its requirements:
|
||||
|
||||
@@ -109,6 +109,15 @@ The following list includes all the Microsoft 365 checks with configurable varia
|
||||
| `exchange_organization_mailtips_enabled` | `recommended_mailtips_large_audience_threshold` | Integer |
|
||||
|
||||
|
||||
## GitHub
|
||||
|
||||
### Configurable Checks
|
||||
The following list includes all the GitHub checks with configurable variables that can be changed in the configuration yaml file:
|
||||
|
||||
| Check Name | Value | Type |
|
||||
|--------------------------------------------|---------------------------------------------|---------|
|
||||
| `repository_inactive_not_archived` | `inactive_not_archived_days_threshold` | Integer |
|
||||
|
||||
## Config YAML File Structure
|
||||
|
||||
???+ note
|
||||
@@ -525,5 +534,10 @@ m365:
|
||||
# m365.exchange_organization_mailtips_enabled
|
||||
recommended_mailtips_large_audience_threshold: 25 # maximum number of recipients
|
||||
|
||||
# GitHub Configuration
|
||||
github:
|
||||
# github.repository_inactive_not_archived
|
||||
inactive_not_archived_days_threshold: 180
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
# GitHub Authentication
|
||||
|
||||
Prowler supports multiple methods to [authenticate with GitHub](https://docs.github.com/en/rest/authentication/authenticating-to-the-rest-api). These include:
|
||||
|
||||
- **Personal Access Token (PAT)**
|
||||
- **OAuth App Token**
|
||||
- **GitHub App Credentials**
|
||||
|
||||
This flexibility allows you to scan and analyze your GitHub account, including repositories, organizations, and applications, using the method that best suits your use case.
|
||||
|
||||
## Supported Login Methods
|
||||
|
||||
Here are the available login methods and their respective flags:
|
||||
|
||||
### Personal Access Token (PAT)
|
||||
Use this method by providing your personal access token directly.
|
||||
|
||||
```console
|
||||
prowler github --personal-access-token pat
|
||||
```
|
||||
|
||||
### OAuth App Token
|
||||
Authenticate using an OAuth app token.
|
||||
|
||||
```console
|
||||
prowler github --oauth-app-token oauth_token
|
||||
```
|
||||
|
||||
### GitHub App Credentials
|
||||
Use GitHub App credentials by specifying the App ID and the private key.
|
||||
|
||||
```console
|
||||
prowler github --github-app-id app_id --github-app-key app_key
|
||||
```
|
||||
|
||||
### Automatic Login Method Detection
|
||||
If no login method is explicitly provided, Prowler will automatically attempt to authenticate using environment variables in the following order of precedence:
|
||||
|
||||
1. `GITHUB_PERSONAL_ACCESS_TOKEN`
|
||||
2. `OAUTH_APP_TOKEN`
|
||||
3. `GITHUB_APP_ID` and `GITHUB_APP_KEY`
|
||||
|
||||
???+ note
|
||||
Ensure the corresponding environment variables are set up before running Prowler for automatic detection if you don't plan to specify the login method.
|
||||
@@ -20,3 +20,19 @@ kubectl logs prowler-XXXXX --namespace prowler-ns
|
||||
|
||||
???+ note
|
||||
By default, `prowler` will scan all namespaces in your active Kubernetes context. Use the [`--namespace`](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/kubernetes/namespace/) flag to specify the namespace(s) to be scanned.
|
||||
|
||||
???+ tip "Identifying the cluster in reports"
|
||||
When running in in-cluster mode, the Kubernetes API does not expose the actual cluster name by default.
|
||||
|
||||
To uniquely identify the cluster in logs and reports, you can:
|
||||
|
||||
- Use the `--cluster-name` flag to manually set the cluster name:
|
||||
```bash
|
||||
prowler -p kubernetes --cluster-name production-cluster
|
||||
```
|
||||
- Or set the `CLUSTER_NAME` environment variable:
|
||||
```yaml
|
||||
env:
|
||||
- name: CLUSTER_NAME
|
||||
value: production-cluster
|
||||
```
|
||||
|
||||
@@ -21,3 +21,23 @@ To specify the namespace(s) to be scanned, use the `--namespace` flag followed b
|
||||
```console
|
||||
prowler --namespace namespace1 namespace2
|
||||
```
|
||||
|
||||
## Proxy and TLS Verification
|
||||
|
||||
If your Kubernetes cluster is only accessible via an internal proxy, Prowler will respect the `HTTPS_PROXY` or `https_proxy` environment variable:
|
||||
|
||||
```console
|
||||
export HTTPS_PROXY=http://my.internal.proxy:8888
|
||||
prowler kubernetes ...
|
||||
```
|
||||
|
||||
If you need to skip TLS verification for internal proxies, you can set the `K8S_SKIP_TLS_VERIFY` environment variable:
|
||||
|
||||
```console
|
||||
export K8S_SKIP_TLS_VERIFY=true
|
||||
prowler kubernetes ...
|
||||
```
|
||||
|
||||
This will allow Prowler to connect to the cluster even if the proxy uses a self-signed certificate.
|
||||
|
||||
These environment variables are supported both when using an external `kubeconfig` and in in-cluster mode.
|
||||
|
||||
@@ -4,9 +4,9 @@ Set up your M365 account to enable security scanning using Prowler Cloud/App.
|
||||
|
||||
## Requirements
|
||||
|
||||
To configure your M365 account, you’ll need:
|
||||
To configure your M365 account, you'll need:
|
||||
|
||||
1. Obtain your `Default Domain` from the Entra ID portal.
|
||||
1. Obtain a domain from the Entra ID portal.
|
||||
|
||||
2. Access Prowler Cloud/App and add a new cloud provider `Microsoft 365`.
|
||||
|
||||
@@ -18,8 +18,6 @@ To configure your M365 account, you’ll need:
|
||||
|
||||
3.3 Assign the required roles to your user.
|
||||
|
||||
3.4 Retrieve your encrypted password.
|
||||
|
||||
4. Add the credentials to Prowler Cloud/App.
|
||||
|
||||
## Step 1: Obtain your Domain
|
||||
@@ -32,9 +30,7 @@ Go to the Entra ID portal, then you can search for `Domain` or go to Identity >
|
||||
|
||||

|
||||
|
||||
Once you are there just look for the `Default Domain` this should be something similar to `YourCompany.onmicrosoft.com`. To ensure that you are picking the correct domain just click on it and verify that the type is `Initial` and you can't delete it.
|
||||
|
||||

|
||||
Once you are there just select the domain you want to use.
|
||||
|
||||
---
|
||||
|
||||
@@ -78,11 +74,11 @@ A Service Principal is required to grant Prowler the necessary privileges.
|
||||
|
||||

|
||||
|
||||
4. Go to `Certificates & secrets` > `+ New client secret`
|
||||
4. Go to `Certificates & secrets` > `Client secrets` > `+ New client secret`
|
||||
|
||||

|
||||
|
||||
5. Fill in the required fields and click `Add`, then copy the generated value (that value will be `AZURE_CLIENT_SECRET`)
|
||||
5. Fill in the required fields and click `Add`, then copy the generated `value` (that value will be `AZURE_CLIENT_SECRET`)
|
||||
|
||||

|
||||
|
||||
@@ -99,12 +95,11 @@ With this done you will have all the needed keys, summarized in the following ta
|
||||
### Grant required API permissions
|
||||
|
||||
Assign the following Microsoft Graph permissions:
|
||||
|
||||
- `Directory.Read.All`: Required for all services.
|
||||
- `AuditLog.Read.All`: Required for Entra service.
|
||||
- `Domain.Read.All`: Required for all services.
|
||||
- `Policy.Read.All`: Required for all services.
|
||||
- `User.Read` (IMPORTANT: this is set as **delegated**): Required for the sign-in.
|
||||
- `Sites.Read.All`: Required for SharePoint service.
|
||||
- `SharePointTenantSettings.Read.All`: Required for SharePoint service.
|
||||
- `User.Read` (IMPORTANT: this is set as **delegated**): Required for the sign-in.
|
||||
|
||||
Follow these steps to assign the permissions:
|
||||
|
||||
@@ -117,12 +112,12 @@ Follow these steps to assign the permissions:
|
||||

|
||||
|
||||
3. Search and select every permission below and once all are selected click on `Add permissions`:
|
||||
|
||||
- `Directory.Read.All`
|
||||
- `AuditLog.Read.All`: Required for Entra service.
|
||||
- `Domain.Read.All`
|
||||
- `Policy.Read.All`
|
||||
- `Sites.Read.All`
|
||||
- `SharePointTenantSettings.Read.All`
|
||||
|
||||
|
||||

|
||||
|
||||
4. Click `Add permissions`, then grant admin consent
|
||||
@@ -174,25 +169,20 @@ Follow these steps to assign the role:
|
||||
|
||||
---
|
||||
|
||||
### Get your encrypted password
|
||||
|
||||
For this step you will need to use PowerShell, here you will have to create your Encrypted Password based on the password of the User that you are going to use. For more information about how to generate this Password go [here](../../getting-started/requirements.md#service-principal-and-user-credentials-authentication-recommended) and follow the steps needed to obtain `M365_ENCRYPTED_PASSWORD`.
|
||||
|
||||
---
|
||||
|
||||
## Step 4: Add credentials to Prowler Cloud/App
|
||||
|
||||
1. Go to your App Registration overview and copy the `Client ID` and `Tenant ID`
|
||||
|
||||

|
||||
|
||||
|
||||
2. Go to Prowler Cloud/App and paste:
|
||||
|
||||
- `Client ID`
|
||||
- `Tenant ID`
|
||||
- `AZURE_CLIENT_SECRET` from earlier
|
||||
- `M365_USER` your user using the default domain, more info [here](../../getting-started/requirements.md#service-principal-and-user-credentials-authentication-recommended)
|
||||
- `M365_ENCRYPTED_PASSWORD` generated before
|
||||
- `M365_USER` the user using the correct assigned domain, more info [here](../../getting-started/requirements.md#service-principal-and-user-credentials-authentication-recommended)
|
||||
- `M365_PASSWORD` the password of the user
|
||||
|
||||

|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 459 KiB After Width: | Height: | Size: 453 KiB |
|
Before Width: | Height: | Size: 438 KiB After Width: | Height: | Size: 439 KiB |
|
Before Width: | Height: | Size: 347 KiB After Width: | Height: | Size: 119 KiB |
@@ -38,11 +38,11 @@ prowler <provider> --list-checks
|
||||
```
|
||||
- Execute specific check(s):
|
||||
```console
|
||||
prowler <provider> -c/--checks s3_bucket_public_access
|
||||
prowler <provider> -c/--checks s3_bucket_public_access iam_root_mfa_enabled
|
||||
```
|
||||
- Exclude specific check(s):
|
||||
```console
|
||||
prowler <provider> -e/--excluded-checks ec2 rds
|
||||
prowler <provider> -e/--excluded-checks s3_bucket_public_access iam_root_mfa_enabled
|
||||
```
|
||||
- Execute checks that appears in a json file:
|
||||
```json
|
||||
|
||||
@@ -194,15 +194,45 @@ To view all `new` findings that have not been seen prior to this scan, click the
|
||||
|
||||
## **Step 9: Download the Outputs**
|
||||
|
||||
Once the scan is complete, you can download the output files generated by Prowler as a single `zip` file. This archive contains the CSV, JSON-OSCF, and HTML reports detailing the findings.
|
||||
Once a scan is complete, navigate to the Scan Jobs section to download the output files generated by Prowler:
|
||||
|
||||
To download these files, click the **Download** button. This button becomes available only after the scan has finished.
|
||||
<img src="../../img/scan_jobs_section.png" alt="Scan Jobs section" width="700"/>
|
||||
|
||||
These outputs are bundled into a single .zip archive containing:
|
||||
|
||||
- CSV report
|
||||
|
||||
- JSON-OSCF formatted results
|
||||
|
||||
- HTML report
|
||||
|
||||
- A folder with individual compliance reports
|
||||
|
||||
???+ note "Note"
|
||||
The Download button only becomes active after a scan completes successfully.
|
||||
|
||||
<img src="../../img/download_output.png" alt="Download output" width="700"/>
|
||||
|
||||
This action downloads a `zip` file containing an `output` folder, which includes the files mentioned above: CSV, JSON-OSCF, and HTML reports.
|
||||
The `zip` file unpacks into a folder named like `prowler-output-<provider_id>-<timestamp>`, which includes all of the above outputs. In the example below, you can see the `.csv`, .`json`, and `.html` reports alongside a subfolder for detailed compliance checks.
|
||||
|
||||
<img src="../../img/output_folder.png" alt="Output folder" width="700"/>
|
||||
|
||||
???+ note "API Note"
|
||||
To learn more about the API endpoint the UI uses to download ZIP exports, see: [Prowler API Reference - Download Scan Output](https://api.prowler.com/api/v1/docs#tag/Scan/operation/scans_report_retrieve)
|
||||
For more information about the API endpoint used by the UI to download the ZIP archive, refer to: [Prowler API Reference - Download Scan Output](https://api.prowler.com/api/v1/docs#tag/Scan/operation/scans_report_retrieve)
|
||||
|
||||
## **Step 10: Download specified compliance report**
|
||||
|
||||
Once your scan has finished, you don’t need to grab the entire ZIP—just pull down the specific compliance report you want:
|
||||
|
||||
- Navigate to the **Compliance** section of the UI.
|
||||
|
||||
<img src="../../img/compliance_section.png" alt="Compliance section" width="700"/>
|
||||
|
||||
- Find the Framework report you need.
|
||||
|
||||
- Click its **Download** icon to retrieve that report’s CSV file with all the detailed findings.
|
||||
|
||||
<img src="../../img/compliance_download.png" alt="Download compliance output" width="700"/>
|
||||
|
||||
???+ note "API Note"
|
||||
To fetch a single compliance report via API, see the Retrieve compliance report as CSV endpoint in the Prowler API Reference.[Prowler API Reference - Retrieve compliance report as CSV](https://api.prowler.com/api/v1/docs#tag/Scan/operation/scans_compliance_retrieve)
|
||||
|
||||
@@ -884,7 +884,6 @@ description = "Foreign Function Interface for Python calling C code."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main", "dev"]
|
||||
markers = "platform_python_implementation != \"PyPy\""
|
||||
files = [
|
||||
{file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
|
||||
{file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
|
||||
@@ -954,6 +953,7 @@ files = [
|
||||
{file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"},
|
||||
{file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"},
|
||||
]
|
||||
markers = {dev = "platform_python_implementation != \"PyPy\""}
|
||||
|
||||
[package.dependencies]
|
||||
pycparser = "*"
|
||||
@@ -1907,14 +1907,14 @@ typing-extensions = {version = ">=4,<5", markers = "python_version < \"3.10\""}
|
||||
|
||||
[[package]]
|
||||
name = "h11"
|
||||
version = "0.14.0"
|
||||
version = "0.16.0"
|
||||
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
|
||||
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
|
||||
{file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
|
||||
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1947,19 +1947,19 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "httpcore"
|
||||
version = "1.0.8"
|
||||
version = "1.0.9"
|
||||
description = "A minimal low-level HTTP client."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be"},
|
||||
{file = "httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad"},
|
||||
{file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
|
||||
{file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
certifi = "*"
|
||||
h11 = ">=0.13,<0.15"
|
||||
h11 = ">=0.16"
|
||||
|
||||
[package.extras]
|
||||
asyncio = ["anyio (>=4.0,<5.0)"]
|
||||
@@ -3753,11 +3753,11 @@ description = "C parser in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main", "dev"]
|
||||
markers = "platform_python_implementation != \"PyPy\""
|
||||
files = [
|
||||
{file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"},
|
||||
{file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"},
|
||||
]
|
||||
markers = {dev = "platform_python_implementation != \"PyPy\""}
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
@@ -3838,6 +3838,26 @@ files = [
|
||||
{file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pygithub"
|
||||
version = "2.5.0"
|
||||
description = "Use the full Github API v3"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "PyGithub-2.5.0-py3-none-any.whl", hash = "sha256:b0b635999a658ab8e08720bdd3318893ff20e2275f6446fcf35bf3f44f2c0fd2"},
|
||||
{file = "pygithub-2.5.0.tar.gz", hash = "sha256:e1613ac508a9be710920d26eb18b1905ebd9926aa49398e88151c1b526aad3cf"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
Deprecated = "*"
|
||||
pyjwt = {version = ">=2.4.0", extras = ["crypto"]}
|
||||
pynacl = ">=1.4.0"
|
||||
requests = ">=2.14.0"
|
||||
typing-extensions = ">=4.0.0"
|
||||
urllib3 = ">=1.26.0"
|
||||
|
||||
[[package]]
|
||||
name = "pygments"
|
||||
version = "2.19.1"
|
||||
@@ -3924,6 +3944,33 @@ pyyaml = "*"
|
||||
[package.extras]
|
||||
extra = ["pygments (>=2.19.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pynacl"
|
||||
version = "1.5.0"
|
||||
description = "Python binding to the Networking and Cryptography (NaCl) library"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"},
|
||||
{file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"},
|
||||
{file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"},
|
||||
{file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"},
|
||||
{file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"},
|
||||
{file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"},
|
||||
{file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"},
|
||||
{file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"},
|
||||
{file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"},
|
||||
{file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
cffi = ">=1.4.1"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"]
|
||||
tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyparsing"
|
||||
version = "3.2.3"
|
||||
@@ -5409,4 +5456,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">3.9.1,<3.13"
|
||||
content-hash = "cc15c8ee6b064b3fda85177c0f1c24a57880c401682fe62daefc2d0f4043150a"
|
||||
content-hash = "f504af1d00a1da9dd65269509daf32f919c13be6c46f25cd9ef9bfba6b9c9a07"
|
||||
|
||||
@@ -2,6 +2,67 @@
|
||||
|
||||
All notable changes to the **Prowler SDK** are documented in this file.
|
||||
|
||||
## [5.8.0] (Prowler v5.8.0)
|
||||
|
||||
### Added
|
||||
- Add CIS 1.11 compliance framework for Kubernetes. [(#7790)](https://github.com/prowler-cloud/prowler/pull/7790)
|
||||
- Support `HTTPS_PROXY` and `K8S_SKIP_TLS_VERIFY` in Kubernetes. [(#7720)](https://github.com/prowler-cloud/prowler/pull/7720)
|
||||
- Add Weight for Prowler ThreatScore scoring. [(7795)](https://github.com/prowler-cloud/prowler/pull/7795)
|
||||
- Add new check `entra_users_mfa_capable` for M365 provider. [(#7734)](https://github.com/prowler-cloud/prowler/pull/7734)
|
||||
- Add new check `admincenter_organization_customer_lockbox_enabled` for M365 provider. [(#7732)](https://github.com/prowler-cloud/prowler/pull/7732)
|
||||
- Add new check `admincenter_external_calendar_sharing_disabled` for M365 provider. [(#7733)](https://github.com/prowler-cloud/prowler/pull/7733)
|
||||
- Add a level for Prowler ThreatScore in the accordion in Dashboard. [(#7739)](https://github.com/prowler-cloud/prowler/pull/7739)
|
||||
- Add CIS 4.0 compliance framework for GCP. [(7785)](https://github.com/prowler-cloud/prowler/pull/7785)
|
||||
- Add `repository_has_codeowners_file` check for GitHub provider. [(#7752)](https://github.com/prowler-cloud/prowler/pull/7752)
|
||||
- Add `repository_default_branch_requires_signed_commits` check for GitHub provider. [(#7777)](https://github.com/prowler-cloud/prowler/pull/7777)
|
||||
- Add `repository_inactive_not_archived` check for GitHub provider. [(#7786)](https://github.com/prowler-cloud/prowler/pull/7786)
|
||||
- Add `repository_dependency_scanning_enabled` check for GitHub provider. [(#7771)](https://github.com/prowler-cloud/prowler/pull/7771)
|
||||
- Add `repository_secret_scanning_enabled` check for GitHub provider. [(#7759)](https://github.com/prowler-cloud/prowler/pull/7759)
|
||||
- Add `repository_default_branch_requires_codeowners_review` check for GitHub provider. [(#7753)](https://github.com/prowler-cloud/prowler/pull/7753)
|
||||
- Add NIS 2 compliance framework for AWS. [(7839)](https://github.com/prowler-cloud/prowler/pull/7839)
|
||||
- Add NIS 2 compliance framework for Azure. [(7857)](https://github.com/prowler-cloud/prowler/pull/7857)
|
||||
- Add search bar in Dashboard Overview page. [(#7804)](https://github.com/prowler-cloud/prowler/pull/7804)
|
||||
|
||||
### Fixed
|
||||
- Fix `m365_powershell test_credentials` to use sanitized credentials. [(#7761)](https://github.com/prowler-cloud/prowler/pull/7761)
|
||||
- Fix `admincenter_users_admins_reduced_license_footprint` check logic to pass when admin user has no license. [(#7779)](https://github.com/prowler-cloud/prowler/pull/7779)
|
||||
- Fix `m365_powershell` to close the PowerShell sessions in msgraph services. [(#7816)](https://github.com/prowler-cloud/prowler/pull/7816)
|
||||
- Fix `defender_ensure_notify_alerts_severity_is_high`check to accept high or lower severity. [(#7862)](https://github.com/prowler-cloud/prowler/pull/7862)
|
||||
- Replace `Directory.Read.All` permission with `Domain.Read.All` which is more restrictive. [(#7888)](https://github.com/prowler-cloud/prowler/pull/7888)
|
||||
- Split calls to list Azure Functions attributes. [(#7778)](https://github.com/prowler-cloud/prowler/pull/7778)
|
||||
|
||||
---
|
||||
|
||||
## [v5.7.0] (Prowler v5.7.0)
|
||||
|
||||
### Added
|
||||
- Update the compliance list supported for each provider from docs. [(#7694)](https://github.com/prowler-cloud/prowler/pull/7694)
|
||||
- Allow setting cluster name in in-cluster mode in Kubernetes. [(#7695)](https://github.com/prowler-cloud/prowler/pull/7695)
|
||||
- Add Prowler ThreatScore for M365 provider. [(#7692)](https://github.com/prowler-cloud/prowler/pull/7692)
|
||||
- Add GitHub provider. [(#5787)](https://github.com/prowler-cloud/prowler/pull/5787)
|
||||
- Add `repository_default_branch_requires_multiple_approvals` check for GitHub provider. [(#6160)](https://github.com/prowler-cloud/prowler/pull/6160)
|
||||
- Add `repository_default_branch_protection_enabled` check for GitHub provider. [(#6161)](https://github.com/prowler-cloud/prowler/pull/6161)
|
||||
- Add `repository_default_branch_requires_linear_history` check for GitHub provider. [(#6162)](https://github.com/prowler-cloud/prowler/pull/6162)
|
||||
- Add `repository_default_branch_disallows_force_push` check for GitHub provider. [(#6197)](https://github.com/prowler-cloud/prowler/pull/6197)
|
||||
- Add `repository_default_branch_deletion_disabled` check for GitHub provider. [(#6200)](https://github.com/prowler-cloud/prowler/pull/6200)
|
||||
- Add `repository_default_branch_status_checks_required` check for GitHub provider. [(#6204)](https://github.com/prowler-cloud/prowler/pull/6204)
|
||||
- Add `repository_default_branch_protection_applies_to_admins` check for GitHub provider. [(#6205)](https://github.com/prowler-cloud/prowler/pull/6205)
|
||||
- Add `repository_branch_delete_on_merge_enabled` check for GitHub provider. [(#6209)](https://github.com/prowler-cloud/prowler/pull/6209)
|
||||
- Add `repository_default_branch_requires_conversation_resolution` check for GitHub provider. [(#6208)](https://github.com/prowler-cloud/prowler/pull/6208)
|
||||
- Add `organization_members_mfa_required` check for GitHub provider. [(#6304)](https://github.com/prowler-cloud/prowler/pull/6304)
|
||||
- Add GitHub provider documentation and CIS v1.0.0 compliance. [(#6116)](https://github.com/prowler-cloud/prowler/pull/6116)
|
||||
- Add CIS 5.0 compliance framework for AWS. [(7766)](https://github.com/prowler-cloud/prowler/pull/7766)
|
||||
|
||||
### Fixed
|
||||
- Update CIS 4.0 for M365 provider. [(#7699)](https://github.com/prowler-cloud/prowler/pull/7699)
|
||||
- Update and upgrade CIS for all the providers [(#7738)](https://github.com/prowler-cloud/prowler/pull/7738)
|
||||
- Cover policies with conditions with SNS endpoint in `sns_topics_not_publicly_accessible`. [(#7750)](https://github.com/prowler-cloud/prowler/pull/7750)
|
||||
- Change severity logic for `ec2_securitygroup_allow_ingress_from_internet_to_all_ports` check. [(#7764)](https://github.com/prowler-cloud/prowler/pull/7764)
|
||||
- Automatically encrypt password in Microsoft365 provider. [(#7784)](https://github.com/prowler-cloud/prowler/pull/7784)
|
||||
- Split calls to list Azure Functions attributes. [(#7778)](https://github.com/prowler-cloud/prowler/pull/7778)
|
||||
|
||||
---
|
||||
|
||||
## [v5.6.0] (Prowler v5.6.0)
|
||||
|
||||
### Added
|
||||
@@ -51,6 +112,8 @@ All notable changes to the **Prowler SDK** are documented in this file.
|
||||
- Modified check `exchange_mailbox_properties_auditing_enabled` to make it configurable. [(#7662)](https://github.com/prowler-cloud/prowler/pull/7662)
|
||||
- Add snapshots to m365 documentation. [(#7673)](https://github.com/prowler-cloud/prowler/pull/7673)
|
||||
- Add support for static credentials for sending findings to Amazon S3 and AWS Security Hub. [(#7322)](https://github.com/prowler-cloud/prowler/pull/7322)
|
||||
- Add Prowler ThreatScore for M365 provider. [(#7692)](https://github.com/prowler-cloud/prowler/pull/7692)
|
||||
- Add Microsoft User and User Credential auth to reports [(#7681)](https://github.com/prowler-cloud/prowler/pull/7681)
|
||||
|
||||
### Fixed
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@ from prowler.lib.outputs.compliance.aws_well_architected.aws_well_architected im
|
||||
from prowler.lib.outputs.compliance.cis.cis_aws import AWSCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_azure import AzureCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_gcp import GCPCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_github import GithubCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_kubernetes import KubernetesCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_m365 import M365CIS
|
||||
from prowler.lib.outputs.compliance.compliance import display_compliance_table
|
||||
@@ -79,6 +80,9 @@ from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_azur
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_gcp import (
|
||||
ProwlerThreatScoreGCP,
|
||||
)
|
||||
from prowler.lib.outputs.compliance.prowler_threatscore.prowler_threatscore_m365 import (
|
||||
ProwlerThreatScoreM365,
|
||||
)
|
||||
from prowler.lib.outputs.csv.csv import CSV
|
||||
from prowler.lib.outputs.finding import Finding
|
||||
from prowler.lib.outputs.html.html import HTML
|
||||
@@ -93,6 +97,7 @@ from prowler.providers.azure.models import AzureOutputOptions
|
||||
from prowler.providers.common.provider import Provider
|
||||
from prowler.providers.common.quick_inventory import run_provider_quick_inventory
|
||||
from prowler.providers.gcp.models import GCPOutputOptions
|
||||
from prowler.providers.github.models import GithubOutputOptions
|
||||
from prowler.providers.kubernetes.models import KubernetesOutputOptions
|
||||
from prowler.providers.m365.models import M365OutputOptions
|
||||
from prowler.providers.nhn.models import NHNOutputOptions
|
||||
@@ -277,6 +282,10 @@ def prowler():
|
||||
output_options = KubernetesOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
elif provider == "github":
|
||||
output_options = GithubOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
elif provider == "m365":
|
||||
output_options = M365OutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
@@ -726,6 +735,18 @@ def prowler():
|
||||
)
|
||||
generated_outputs["compliance"].append(cis)
|
||||
cis.batch_write_data_to_file()
|
||||
elif compliance_name == "prowler_threatscore_m365":
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
prowler_threatscore = ProwlerThreatScoreM365(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(prowler_threatscore)
|
||||
prowler_threatscore.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
@@ -767,6 +788,35 @@ def prowler():
|
||||
generated_outputs["compliance"].append(generic_compliance)
|
||||
generic_compliance.batch_write_data_to_file()
|
||||
|
||||
elif provider == "github":
|
||||
for compliance_name in input_compliance_frameworks:
|
||||
if compliance_name.startswith("cis_"):
|
||||
# Generate CIS Finding Object
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
cis = GithubCIS(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(cis)
|
||||
cis.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
generic_compliance = GenericCompliance(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
create_file_descriptor=True,
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(generic_compliance)
|
||||
generic_compliance.batch_write_data_to_file()
|
||||
|
||||
# AWS Security Hub Integration
|
||||
if provider == "aws":
|
||||
# Send output to S3 if needed (-B / -D) for all the output formats
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.",
|
||||
@@ -33,7 +33,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.",
|
||||
@@ -54,7 +54,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. Programmatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. AWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.",
|
||||
@@ -76,7 +76,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in 45 or greater days be deactivated or removed.",
|
||||
@@ -97,7 +97,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)",
|
||||
@@ -118,7 +118,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.",
|
||||
@@ -139,7 +139,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. Only the third implementation is recommended.",
|
||||
@@ -161,7 +161,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered a standard security advice to grant _least privilege_ -that is, granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform _only_ those tasks, instead of allowing full administrative privileges.",
|
||||
@@ -182,7 +182,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.",
|
||||
@@ -203,7 +203,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. \"AWS Access\" means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.",
|
||||
@@ -224,7 +224,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates. Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.",
|
||||
@@ -245,7 +245,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS provides customers with the option of specifying the contact information for account's security team. It is recommended that this information be provided.",
|
||||
@@ -266,7 +266,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable IAM Access analyzer for IAM policies about all resources in each region. IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access. Access Analyzer analyzes only policies that are applied to resources in the same AWS Region.",
|
||||
@@ -287,7 +287,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "In multi-account environments, IAM user centralization facilitates greater user control. User access beyond the initial account is then provided via role assumption. Centralization of users can be accomplished through federation with an external identity provider or through the use of AWS Organizations.",
|
||||
@@ -308,7 +308,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established.",
|
||||
@@ -329,7 +329,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be removed.",
|
||||
@@ -350,7 +350,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device. **Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.",
|
||||
@@ -371,7 +371,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the 'root' user account be protected with a hardware MFA.",
|
||||
@@ -392,7 +392,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "With the creation of an AWS account, a 'root user' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.",
|
||||
@@ -413,7 +413,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Password policies are, in part, used to enforce password complexity requirements. IAM password policies can be used to ensure password are at least a given length. It is recommended that the password policy require a minimum password length 14.",
|
||||
@@ -434,7 +434,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM password policies can prevent the reuse of a given password by the same user. It is recommended that the password policy prevent the reuse of passwords.",
|
||||
@@ -455,8 +455,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
|
||||
@@ -477,8 +477,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
|
||||
@@ -499,8 +499,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -521,8 +521,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -544,8 +544,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -566,7 +566,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
@@ -589,8 +589,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.3 Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
@@ -611,7 +611,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail provides a history of AWS API calls for an account, including API calls made via the Management Console, SDKs, command line tools, and higher-level AWS services (such as CloudFormation).",
|
||||
@@ -632,7 +632,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
|
||||
@@ -653,7 +653,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
|
||||
@@ -674,7 +674,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "CloudTrail log file validation creates a digitally signed digest file containing a hash of each log that CloudTrail writes to S3. These digest files can be used to determine whether a log file was changed, deleted, or unchanged after CloudTrail delivered the log. It is recommended that file validation be enabled on all CloudTrails.",
|
||||
@@ -695,7 +695,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "CloudTrail logs a record of every API call made in your AWS account. These logs file are stored in an S3 bucket. It is recommended that the bucket policy or access control list (ACL) applied to the S3 bucket that CloudTrail logs to prevent public access to the CloudTrail logs.",
|
||||
@@ -716,7 +716,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls made in a given AWS account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs within a specified S3 bucket for long term analysis, realtime analysis can be performed by configuring CloudTrail to send logs to CloudWatch Logs. For a trail that is enabled in all regions in an account, CloudTrail sends log files from all those regions to a CloudWatch Logs log group. It is recommended that CloudTrail logs be sent to CloudWatch Logs. Note: The intent of this recommendation is to ensure AWS account activity is being captured, monitored, and appropriately alarmed on. CloudWatch Logs is a native way to accomplish this using AWS services but does not preclude the use of an alternate solution.",
|
||||
@@ -737,7 +737,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items (AWS resources), any configuration changes between resources. It is recommended AWS Config be enabled in all regions.",
|
||||
@@ -758,7 +758,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 Bucket Access Logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that bucket access logging be enabled on the CloudTrail S3 bucket.",
|
||||
@@ -779,7 +779,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.",
|
||||
@@ -800,7 +800,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS Key Management Service (KMS) allows customers to rotate the backing key which is key material stored within the KMS which is tied to the key ID of the Customer Created customer master key (CMK). It is the backing key that is used to perform cryptographic operations such as encryption and decryption. Automated key rotation currently retains all prior backing keys so that decryption of encrypted data can take place transparently. It is recommended that CMK key rotation be enabled for symmetric keys. Key rotation can not be enabled for any asymmetric CMK.",
|
||||
@@ -821,7 +821,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. After you've created a flow log, you can view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow Logs be enabled for packet \"Rejects\" for VPCs.",
|
||||
@@ -842,7 +842,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.",
|
||||
@@ -863,7 +863,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Security Groups are a stateful packet filter that controls ingress and egress traffic within a VPC. It is recommended that a metric filter and alarm be established for detecting changes to Security Groups.",
|
||||
@@ -884,7 +884,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets within a VPC. It is recommended that a metric filter and alarm be established for changes made to NACLs.",
|
||||
@@ -905,7 +905,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Network gateways are required to send/receive traffic to a destination outside of a VPC. It is recommended that a metric filter and alarm be established for changes to network gateways.",
|
||||
@@ -926,7 +926,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Routing tables are used to route network traffic between subnets and to network gateways. It is recommended that a metric filter and alarm be established for changes to route tables.",
|
||||
@@ -947,7 +947,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is possible to have more than 1 VPC within an account, in addition it is also possible to create a peer connection between 2 VPCs enabling network traffic to route between VPCs. It is recommended that a metric filter and alarm be established for changes made to VPCs.",
|
||||
@@ -968,7 +968,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for AWS Organizations changes made in the master AWS Account.",
|
||||
@@ -989,7 +989,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).",
|
||||
@@ -1010,7 +1010,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for 'root' login attempts.",
|
||||
@@ -1031,7 +1031,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established changes made to Identity and Access Management (IAM) policies.",
|
||||
@@ -1052,7 +1052,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
|
||||
@@ -1073,7 +1073,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for failed console authentication attempts.",
|
||||
@@ -1094,7 +1094,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for customer created CMKs which have changed state to disabled or scheduled deletion.",
|
||||
@@ -1115,7 +1115,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes to S3 bucket policies.",
|
||||
@@ -1136,7 +1136,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
|
||||
@@ -1159,7 +1159,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The Network Access Control List (NACL) function provide stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
|
||||
@@ -1182,7 +1182,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
|
||||
@@ -1203,7 +1203,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic. The default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation. **NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.",
|
||||
@@ -1224,7 +1224,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Once a VPC peering connection is established, routing tables must be updated to establish any connections between the peered VPCs. These routes can be as specific as desired - even peering a VPC to only a single host on the other side of the connection.",
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.",
|
||||
@@ -33,7 +33,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.",
|
||||
@@ -54,7 +54,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. Programmatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. AWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.",
|
||||
@@ -76,7 +76,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in 45 or greater days be deactivated or removed.",
|
||||
@@ -97,7 +97,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)",
|
||||
@@ -118,7 +118,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.",
|
||||
@@ -139,7 +139,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. Only the third implementation is recommended.",
|
||||
@@ -161,7 +161,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered a standard security advice to grant _least privilege_ -that is, granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform _only_ those tasks, instead of allowing full administrative privileges.",
|
||||
@@ -182,7 +182,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.",
|
||||
@@ -203,7 +203,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. \"AWS Access\" means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.",
|
||||
@@ -224,7 +224,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates. Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.",
|
||||
@@ -245,7 +245,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS provides customers with the option of specifying the contact information for account's security team. It is recommended that this information be provided.",
|
||||
@@ -266,7 +266,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable IAM Access analyzer for IAM policies about all resources in each region. IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access. Access Analyzer analyzes only policies that are applied to resources in the same AWS Region.",
|
||||
@@ -287,7 +287,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "In multi-account environments, IAM user centralization facilitates greater user control. User access beyond the initial account is then provided via role assumption. Centralization of users can be accomplished through federation with an external identity provider or through the use of AWS Organizations.",
|
||||
@@ -308,7 +308,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established.",
|
||||
@@ -329,7 +329,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be removed.",
|
||||
@@ -350,7 +350,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device. **Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.",
|
||||
@@ -371,7 +371,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the 'root' user account be protected with a hardware MFA.",
|
||||
@@ -392,7 +392,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "With the creation of an AWS account, a 'root user' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.",
|
||||
@@ -413,7 +413,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Password policies are, in part, used to enforce password complexity requirements. IAM password policies can be used to ensure password are at least a given length. It is recommended that the password policy require a minimum password length 14.",
|
||||
@@ -434,7 +434,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM password policies can prevent the reuse of a given password by the same user. It is recommended that the password policy prevent the reuse of passwords.",
|
||||
@@ -455,8 +455,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
|
||||
@@ -477,8 +477,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
|
||||
@@ -499,8 +499,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -521,8 +521,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -544,8 +544,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -566,8 +566,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.2 Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
|
||||
@@ -589,8 +589,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.3 Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
@@ -611,8 +611,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.3 Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
|
||||
@@ -633,8 +633,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.3 Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.",
|
||||
@@ -655,7 +655,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.4 Elastic File System (EFS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -677,7 +677,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail provides a history of AWS API calls for an account, including API calls made via the Management Console, SDKs, command line tools, and higher-level AWS services (such as CloudFormation).",
|
||||
@@ -698,7 +698,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
|
||||
@@ -719,7 +719,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
|
||||
@@ -740,7 +740,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "CloudTrail log file validation creates a digitally signed digest file containing a hash of each log that CloudTrail writes to S3. These digest files can be used to determine whether a log file was changed, deleted, or unchanged after CloudTrail delivered the log. It is recommended that file validation be enabled on all CloudTrails.",
|
||||
@@ -761,7 +761,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "CloudTrail logs a record of every API call made in your AWS account. These logs file are stored in an S3 bucket. It is recommended that the bucket policy or access control list (ACL) applied to the S3 bucket that CloudTrail logs to prevent public access to the CloudTrail logs.",
|
||||
@@ -782,7 +782,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls made in a given AWS account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs within a specified S3 bucket for long term analysis, realtime analysis can be performed by configuring CloudTrail to send logs to CloudWatch Logs. For a trail that is enabled in all regions in an account, CloudTrail sends log files from all those regions to a CloudWatch Logs log group. It is recommended that CloudTrail logs be sent to CloudWatch Logs. Note: The intent of this recommendation is to ensure AWS account activity is being captured, monitored, and appropriately alarmed on. CloudWatch Logs is a native way to accomplish this using AWS services but does not preclude the use of an alternate solution.",
|
||||
@@ -803,7 +803,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items (AWS resources), any configuration changes between resources. It is recommended AWS Config be enabled in all regions.",
|
||||
@@ -824,7 +824,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 Bucket Access Logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that bucket access logging be enabled on the CloudTrail S3 bucket.",
|
||||
@@ -845,7 +845,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.",
|
||||
@@ -866,7 +866,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS Key Management Service (KMS) allows customers to rotate the backing key which is key material stored within the KMS which is tied to the key ID of the Customer Created customer master key (CMK). It is the backing key that is used to perform cryptographic operations such as encryption and decryption. Automated key rotation currently retains all prior backing keys so that decryption of encrypted data can take place transparently. It is recommended that CMK key rotation be enabled for symmetric keys. Key rotation can not be enabled for any asymmetric CMK.",
|
||||
@@ -887,7 +887,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. After you've created a flow log, you can view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow Logs be enabled for packet \"Rejects\" for VPCs.",
|
||||
@@ -908,7 +908,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.",
|
||||
@@ -929,7 +929,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Security Groups are a stateful packet filter that controls ingress and egress traffic within a VPC. It is recommended that a metric filter and alarm be established for detecting changes to Security Groups.",
|
||||
@@ -950,7 +950,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets within a VPC. It is recommended that a metric filter and alarm be established for changes made to NACLs.",
|
||||
@@ -971,7 +971,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Network gateways are required to send/receive traffic to a destination outside of a VPC. It is recommended that a metric filter and alarm be established for changes to network gateways.",
|
||||
@@ -992,7 +992,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Routing tables are used to route network traffic between subnets and to network gateways. It is recommended that a metric filter and alarm be established for changes to route tables.",
|
||||
@@ -1013,7 +1013,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is possible to have more than 1 VPC within an account, in addition it is also possible to create a peer connection between 2 VPCs enabling network traffic to route between VPCs. It is recommended that a metric filter and alarm be established for changes made to VPCs.",
|
||||
@@ -1034,7 +1034,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for AWS Organizations changes made in the master AWS Account.",
|
||||
@@ -1055,7 +1055,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security Hub collects security data from across AWS accounts, services, and supported third-party partner products and helps you analyze your security trends and identify the highest priority security issues. When you enable Security Hub, it begins to consume, aggregate, organize, and prioritize findings from AWS services that you have enabled, such as Amazon GuardDuty, Amazon Inspector, and Amazon Macie. You can also enable integrations with AWS partner security products.",
|
||||
@@ -1076,7 +1076,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).",
|
||||
@@ -1097,7 +1097,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for 'root' login attempts.",
|
||||
@@ -1118,7 +1118,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established changes made to Identity and Access Management (IAM) policies.",
|
||||
@@ -1139,7 +1139,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
|
||||
@@ -1160,7 +1160,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for failed console authentication attempts.",
|
||||
@@ -1181,7 +1181,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for customer created CMKs which have changed state to disabled or scheduled deletion.",
|
||||
@@ -1202,7 +1202,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes to S3 bucket policies.",
|
||||
@@ -1223,7 +1223,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
|
||||
@@ -1246,7 +1246,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The Network Access Control List (NACL) function provide stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
|
||||
@@ -1269,7 +1269,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
|
||||
@@ -1292,7 +1292,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
|
||||
@@ -1313,7 +1313,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic. The default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation. **NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.",
|
||||
@@ -1334,7 +1334,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Once a VPC peering connection is established, routing tables must be updated to establish any connections between the peered VPCs. These routes can be as specific as desired - even peering a VPC to only a single host on the other side of the connection.",
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.",
|
||||
@@ -33,7 +33,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.",
|
||||
@@ -54,7 +54,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. Programmatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. AWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.",
|
||||
@@ -76,7 +76,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in 45 or greater days be deactivated or removed.",
|
||||
@@ -97,7 +97,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)",
|
||||
@@ -118,7 +118,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.",
|
||||
@@ -139,7 +139,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. Only the third implementation is recommended.",
|
||||
@@ -161,7 +161,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered a standard security advice to grant _least privilege_ -that is, granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform _only_ those tasks, instead of allowing full administrative privileges.",
|
||||
@@ -182,7 +182,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.",
|
||||
@@ -203,7 +203,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. \"AWS Access\" means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.",
|
||||
@@ -224,7 +224,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates. Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.",
|
||||
@@ -245,7 +245,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS provides customers with the option of specifying the contact information for account's security team. It is recommended that this information be provided.",
|
||||
@@ -266,7 +266,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable IAM Access analyzer for IAM policies about all resources in each region. IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access. Access Analyzer analyzes only policies that are applied to resources in the same AWS Region.",
|
||||
@@ -287,7 +287,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "In multi-account environments, IAM user centralization facilitates greater user control. User access beyond the initial account is then provided via role assumption. Centralization of users can be accomplished through federation with an external identity provider or through the use of AWS Organizations.",
|
||||
@@ -308,7 +308,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS CloudShell is a convenient way of running CLI commands against AWS services; a managed IAM policy ('AWSCloudShellFullAccess') provides full access to CloudShell, which allows file upload and download capability between a user's local system and the CloudShell environment. Within the CloudShell environment a user has sudo permissions, and can access the internet. So it is feasible to install file transfer software (for example) and move data from CloudShell to external internet servers.",
|
||||
@@ -329,7 +329,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established.",
|
||||
@@ -350,7 +350,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be removed.",
|
||||
@@ -371,7 +371,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device. **Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.",
|
||||
@@ -392,7 +392,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the 'root' user account be protected with a hardware MFA.",
|
||||
@@ -413,7 +413,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "With the creation of an AWS account, a 'root user' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.",
|
||||
@@ -434,7 +434,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Password policies are, in part, used to enforce password complexity requirements. IAM password policies can be used to ensure password are at least a given length. It is recommended that the password policy require a minimum password length 14.",
|
||||
@@ -455,7 +455,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM password policies can prevent the reuse of a given password by the same user. It is recommended that the password policy prevent the reuse of passwords.",
|
||||
@@ -476,7 +476,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
@@ -499,8 +499,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -521,8 +521,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -544,8 +544,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -566,8 +566,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.2 Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
|
||||
@@ -589,8 +589,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.3 Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
@@ -611,8 +611,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.3 Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
|
||||
@@ -633,8 +633,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.3 Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.",
|
||||
@@ -655,7 +655,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.4 Elastic File System (EFS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
@@ -677,7 +677,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail provides a history of AWS API calls for an account, including API calls made via the Management Console, SDKs, command line tools, and higher-level AWS services (such as CloudFormation).",
|
||||
@@ -698,7 +698,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
|
||||
@@ -719,7 +719,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
|
||||
@@ -740,7 +740,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "CloudTrail log file validation creates a digitally signed digest file containing a hash of each log that CloudTrail writes to S3. These digest files can be used to determine whether a log file was changed, deleted, or unchanged after CloudTrail delivered the log. It is recommended that file validation be enabled on all CloudTrails.",
|
||||
@@ -761,7 +761,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "CloudTrail logs a record of every API call made in your AWS account. These logs file are stored in an S3 bucket. It is recommended that the bucket policy or access control list (ACL) applied to the S3 bucket that CloudTrail logs to prevent public access to the CloudTrail logs.",
|
||||
@@ -782,7 +782,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls made in a given AWS account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs within a specified S3 bucket for long term analysis, realtime analysis can be performed by configuring CloudTrail to send logs to CloudWatch Logs. For a trail that is enabled in all regions in an account, CloudTrail sends log files from all those regions to a CloudWatch Logs log group. It is recommended that CloudTrail logs be sent to CloudWatch Logs. Note: The intent of this recommendation is to ensure AWS account activity is being captured, monitored, and appropriately alarmed on. CloudWatch Logs is a native way to accomplish this using AWS services but does not preclude the use of an alternate solution.",
|
||||
@@ -803,7 +803,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items (AWS resources), any configuration changes between resources. It is recommended AWS Config be enabled in all regions.",
|
||||
@@ -824,7 +824,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 Bucket Access Logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that bucket access logging be enabled on the CloudTrail S3 bucket.",
|
||||
@@ -845,7 +845,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.",
|
||||
@@ -866,7 +866,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS Key Management Service (KMS) allows customers to rotate the backing key which is key material stored within the KMS which is tied to the key ID of the Customer Created customer master key (CMK). It is the backing key that is used to perform cryptographic operations such as encryption and decryption. Automated key rotation currently retains all prior backing keys so that decryption of encrypted data can take place transparently. It is recommended that CMK key rotation be enabled for symmetric keys. Key rotation can not be enabled for any asymmetric CMK.",
|
||||
@@ -887,7 +887,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. After you've created a flow log, you can view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow Logs be enabled for packet \"Rejects\" for VPCs.",
|
||||
@@ -908,7 +908,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.",
|
||||
@@ -929,7 +929,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Security Groups are a stateful packet filter that controls ingress and egress traffic within a VPC. It is recommended that a metric filter and alarm be established for detecting changes to Security Groups.",
|
||||
@@ -950,7 +950,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets within a VPC. It is recommended that a metric filter and alarm be established for changes made to NACLs.",
|
||||
@@ -971,7 +971,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Network gateways are required to send/receive traffic to a destination outside of a VPC. It is recommended that a metric filter and alarm be established for changes to network gateways.",
|
||||
@@ -992,7 +992,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Routing tables are used to route network traffic between subnets and to network gateways. It is recommended that a metric filter and alarm be established for changes to route tables.",
|
||||
@@ -1013,7 +1013,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is possible to have more than 1 VPC within an account, in addition it is also possible to create a peer connection between 2 VPCs enabling network traffic to route between VPCs. It is recommended that a metric filter and alarm be established for changes made to VPCs.",
|
||||
@@ -1034,7 +1034,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for AWS Organizations changes made in the master AWS Account.",
|
||||
@@ -1055,7 +1055,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security Hub collects security data from across AWS accounts, services, and supported third-party partner products and helps you analyze your security trends and identify the highest priority security issues. When you enable Security Hub, it begins to consume, aggregate, organize, and prioritize findings from AWS services that you have enabled, such as Amazon GuardDuty, Amazon Inspector, and Amazon Macie. You can also enable integrations with AWS partner security products.",
|
||||
@@ -1076,7 +1076,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).",
|
||||
@@ -1097,7 +1097,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for 'root' login attempts.",
|
||||
@@ -1118,7 +1118,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established changes made to Identity and Access Management (IAM) policies.",
|
||||
@@ -1139,7 +1139,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
|
||||
@@ -1160,7 +1160,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for failed console authentication attempts.",
|
||||
@@ -1181,7 +1181,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for customer created CMKs which have changed state to disabled or scheduled deletion.",
|
||||
@@ -1202,7 +1202,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes to S3 bucket policies.",
|
||||
@@ -1223,7 +1223,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
|
||||
@@ -1246,7 +1246,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The Network Access Control List (NACL) function provide stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
|
||||
@@ -1269,7 +1269,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
|
||||
@@ -1292,7 +1292,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
|
||||
@@ -1313,7 +1313,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic. The default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation. **NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.",
|
||||
@@ -1334,7 +1334,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Once a VPC peering connection is established, routing tables must be updated to establish any connections between the peered VPCs. These routes can be as specific as desired - even peering a VPC to only a single host on the other side of the connection.",
|
||||
@@ -1356,7 +1356,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "When enabling the Metadata Service on AWS EC2 instances, users have the option of using either Instance Metadata Service Version 1 (IMDSv1; a request/response method) or Instance Metadata Service Version 2 (IMDSv2; a session-oriented method).",
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.",
|
||||
@@ -33,7 +33,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.",
|
||||
@@ -54,7 +54,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. Programmatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. AWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.",
|
||||
@@ -76,7 +76,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in 45 or greater days be deactivated or removed.",
|
||||
@@ -97,7 +97,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)",
|
||||
@@ -118,7 +118,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.",
|
||||
@@ -139,7 +139,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. Only the third implementation is recommended.",
|
||||
@@ -161,7 +161,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered a standard security advice to grant _least privilege_ -that is, granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform _only_ those tasks, instead of allowing full administrative privileges.",
|
||||
@@ -182,7 +182,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.",
|
||||
@@ -203,7 +203,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. \"AWS Access\" means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.",
|
||||
@@ -224,7 +224,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates. Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.",
|
||||
@@ -245,7 +245,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS provides customers with the option of specifying the contact information for account's security team. It is recommended that this information be provided.",
|
||||
@@ -266,7 +266,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable IAM Access analyzer for IAM policies about all resources in each region. IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access. Access Analyzer analyzes only policies that are applied to resources in the same AWS Region.",
|
||||
@@ -287,7 +287,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "In multi-account environments, IAM user centralization facilitates greater user control. User access beyond the initial account is then provided via role assumption. Centralization of users can be accomplished through federation with an external identity provider or through the use of AWS Organizations.",
|
||||
@@ -306,7 +306,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS CloudShell is a convenient way of running CLI commands against AWS services; a managed IAM policy ('AWSCloudShellFullAccess') provides full access to CloudShell, which allows file upload and download capability between a user's local system and the CloudShell environment. Within the CloudShell environment a user has sudo permissions, and can access the internet. So it is feasible to install file transfer software (for example) and move data from CloudShell to external internet servers.",
|
||||
@@ -327,7 +327,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established.",
|
||||
@@ -348,7 +348,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be removed.",
|
||||
@@ -369,7 +369,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device. **Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.",
|
||||
@@ -390,7 +390,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the 'root' user account be protected with a hardware MFA.",
|
||||
@@ -411,7 +411,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "With the creation of an AWS account, a 'root user' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.",
|
||||
@@ -432,7 +432,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Password policies are, in part, used to enforce password complexity requirements. IAM password policies can be used to ensure password are at least a given length. It is recommended that the password policy require a minimum password length 14.",
|
||||
@@ -453,7 +453,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM password policies can prevent the reuse of a given password by the same user. It is recommended that the password policy prevent the reuse of passwords.",
|
||||
@@ -474,8 +474,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
|
||||
@@ -496,8 +496,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -518,8 +518,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -541,8 +541,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.1 Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -563,8 +563,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.2 Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
|
||||
@@ -585,8 +585,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.3 Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
@@ -607,8 +607,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.3 Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
|
||||
@@ -629,8 +629,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.3 Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to anypublicly accessible RDS database instance, you must disable the database PubliclyAccessible flag and update the VPC security group associated with the instance",
|
||||
@@ -651,7 +651,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Storage",
|
||||
"Section": "2 Storage",
|
||||
"SubSection": "2.4 Elastic File System (EFS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
@@ -673,7 +673,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail provides a history of AWS API calls for an account, including API calls made via the Management Console, SDKs, command line tools, and higher-level AWS services (such as CloudFormation).",
|
||||
@@ -694,7 +694,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
|
||||
@@ -715,7 +715,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.",
|
||||
@@ -736,7 +736,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "CloudTrail log file validation creates a digitally signed digest file containing a hash of each log that CloudTrail writes to S3. These digest files can be used to determine whether a log file was changed, deleted, or unchanged after CloudTrail delivered the log. It is recommended that file validation be enabled on all CloudTrails.",
|
||||
@@ -757,7 +757,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items (AWS resources), any configuration changes between resources. It is recommended AWS Config be enabled in all regions.",
|
||||
@@ -778,7 +778,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 Bucket Access Logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that bucket access logging be enabled on the CloudTrail S3 bucket.",
|
||||
@@ -799,7 +799,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.",
|
||||
@@ -820,7 +820,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS Key Management Service (KMS) allows customers to rotate the backing key which is key material stored within the KMS which is tied to the key ID of the Customer Created customer master key (CMK). It is the backing key that is used to perform cryptographic operations such as encryption and decryption. Automated key rotation currently retains all prior backing keys so that decryption of encrypted data can take place transparently. It is recommended that CMK key rotation be enabled for symmetric keys. Key rotation can not be enabled for any asymmetric CMK.",
|
||||
@@ -841,7 +841,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Logging",
|
||||
"Section": "3 Logging",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. After you've created a flow log, you can view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow Logs be enabled for packet \"Rejects\" for VPCs.",
|
||||
@@ -862,7 +862,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.",
|
||||
@@ -883,7 +883,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Security Groups are a stateful packet filter that controls ingress and egress traffic within a VPC. It is recommended that a metric filter and alarm be established for detecting changes to Security Groups.",
|
||||
@@ -904,7 +904,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets within a VPC. It is recommended that a metric filter and alarm be established for changes made to NACLs.",
|
||||
@@ -925,7 +925,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Network gateways are required to send/receive traffic to a destination outside of a VPC. It is recommended that a metric filter and alarm be established for changes to network gateways.",
|
||||
@@ -946,7 +946,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Routing tables are used to route network traffic between subnets and to network gateways. It is recommended that a metric filter and alarm be established for changes to route tables.",
|
||||
@@ -967,7 +967,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is possible to have more than 1 VPC within an account, in addition it is also possible to create a peer connection between 2 VPCs enabling network traffic to route between VPCs. It is recommended that a metric filter and alarm be established for changes made to VPCs.",
|
||||
@@ -988,7 +988,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for AWS Organizations changes made in the master AWS Account.",
|
||||
@@ -1009,7 +1009,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security Hub collects security data from across AWS accounts, services, and supported third-party partner products and helps you analyze your security trends and identify the highest priority security issues. When you enable Security Hub, it begins to consume, aggregate, organize, and prioritize findings from AWS services that you have enabled, such as Amazon GuardDuty, Amazon Inspector, and Amazon Macie. You can also enable integrations with AWS partner security products.",
|
||||
@@ -1030,7 +1030,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).",
|
||||
@@ -1051,7 +1051,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for 'root' login attempts.",
|
||||
@@ -1072,7 +1072,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established changes made to Identity and Access Management (IAM) policies.",
|
||||
@@ -1093,7 +1093,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
|
||||
@@ -1114,7 +1114,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for failed console authentication attempts.",
|
||||
@@ -1135,7 +1135,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for customer created CMKs which have changed state to disabled or scheduled deletion.",
|
||||
@@ -1156,7 +1156,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes to S3 bucket policies.",
|
||||
@@ -1177,7 +1177,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Monitoring",
|
||||
"Section": "4 Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.",
|
||||
@@ -1200,7 +1200,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The Network Access Control List (NACL) function provide stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
|
||||
@@ -1223,7 +1223,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
|
||||
@@ -1246,7 +1246,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.",
|
||||
@@ -1267,7 +1267,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic. The default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation. **NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.",
|
||||
@@ -1288,7 +1288,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Once a VPC peering connection is established, routing tables must be updated to establish any connections between the peered VPCs. These routes can be as specific as desired - even peering a VPC to only a single host on the other side of the connection.",
|
||||
@@ -1309,7 +1309,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Networking",
|
||||
"Section": "5 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "When enabling the Metadata Service on AWS EC2 instances, users have the option of using either Instance Metadata Service Version 1 (IMDSv1; a request/response method) or Instance Metadata Service Version 2 (IMDSv2; a session-oriented method).",
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization.An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of the Acceptable Use Policy or indicative of a likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.",
|
||||
@@ -36,7 +35,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS provides customers with the option of specifying the contact information for account's security team. It is recommended that this information be provided.",
|
||||
@@ -59,7 +57,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established.",
|
||||
@@ -82,7 +79,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be deleted.",
|
||||
@@ -105,7 +101,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device.**Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is kept charged and secured, independent of any individual personal devices (non-personal virtual MFA). This lessens the risks of losing access to the MFA due to device loss, device trade-in, or if the individual owning the device is no longer employed at the company.",
|
||||
@@ -128,7 +123,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the 'root' user account be protected with a hardware MFA.",
|
||||
@@ -151,7 +145,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "With the creation of an AWS account, a 'root user' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.",
|
||||
@@ -174,7 +167,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Password policies are, in part, used to enforce password complexity requirements. IAM password policies can be used to ensure passwords are at least a given length. It is recommended that the password policy require a minimum password length 14.",
|
||||
@@ -197,7 +189,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM password policies can prevent the reuse of a given password by the same user. It is recommended that the password policy prevent the reuse of passwords.",
|
||||
@@ -220,7 +211,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.",
|
||||
@@ -243,7 +233,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When creating the IAM User credentials you have to determine what type of access they require. Programmatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. AWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.",
|
||||
@@ -267,7 +256,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused for 45 days or more be deactivated or removed.",
|
||||
@@ -290,7 +278,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)",
|
||||
@@ -313,7 +300,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be rotated regularly.",
|
||||
@@ -336,7 +322,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM users are granted access to services, functions, and data through IAM policies. There are four ways to define policies for a user: 1) Edit the user policy directly, also known as an inline or user policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy; 4) add the user to an IAM group that has an inline policy.Only the third implementation is recommended.",
|
||||
@@ -360,7 +345,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered standard security advice to grant least privilege—that is, granting only the permissions required to perform a task. Determine what users need to do, and then craft policies for them that allow the users to perform only those tasks, instead of granting full administrative privileges.",
|
||||
@@ -383,7 +367,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role, with the appropriate policy assigned, to allow authorized users to manage incidents with AWS Support.",
|
||||
@@ -406,7 +389,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. AWS Access means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.",
|
||||
@@ -429,7 +411,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use AWS Certificate Manager (ACM) or IAM to store and deploy server certificates. Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.",
|
||||
@@ -452,7 +433,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable the IAM Access Analyzer for IAM policies regarding all resources in each active AWS region.IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. The results allow you to determine whether an unintended user is permitted, making it easier for administrators to monitor least privilege access. Access Analyzer analyzes only the policies that are applied to resources in the same AWS Region.",
|
||||
@@ -475,7 +455,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "In multi-account environments, IAM user centralization facilitates greater user control. User access beyond the initial account is then provided via role assumption. Centralization of users can be accomplished through federation with an external identity provider or through the use of AWS Organizations.",
|
||||
@@ -498,7 +477,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1 Identity and Access Management",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "AWS CloudShell is a convenient way of running CLI commands against AWS services; a managed IAM policy ('AWSCloudShellFullAccess') provides full access to CloudShell, which allows file upload and download capability between a user's local system and the CloudShell environment. Within the CloudShell environment, a user has sudo permissions and can access the internet. Therefore, it is feasible to install file transfer software, for example, and move data from CloudShell to external internet servers.",
|
||||
@@ -730,7 +708,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Logging",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail provides a history of AWS API calls for an account, including API calls made via the Management Console, SDKs, command line tools, and higher-level AWS services (such as CloudFormation).",
|
||||
@@ -753,7 +730,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Logging",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "CloudTrail log file validation creates a digitally signed digest file containing a hash of each log that CloudTrail writes to S3. These digest files can be used to determine whether a log file was changed, deleted, or remained unchanged after CloudTrail delivered the log. It is recommended that file validation be enabled for all CloudTrails.",
|
||||
@@ -776,7 +752,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Logging",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration items (AWS resources), relationships between configuration items (AWS resources), and any configuration changes between resources. It is recommended that AWS Config be enabled in all regions.",
|
||||
@@ -799,7 +774,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Logging",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Server access logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that server access logging be enabled on the CloudTrail S3 bucket.",
|
||||
@@ -822,7 +796,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Logging",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer-created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.",
|
||||
@@ -845,7 +818,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Logging",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "AWS Key Management Service (KMS) allows customers to rotate the backing key, which is key material stored within the KMS that is tied to the key ID of the customer-created customer master key (CMK). The backing key is used to perform cryptographic operations such as encryption and decryption. Automated key rotation currently retains all prior backing keys so that decryption of encrypted data can occur transparently. It is recommended that CMK key rotation be enabled for symmetric keys. Key rotation cannot be enabled for any asymmetric CMK.",
|
||||
@@ -868,7 +840,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Logging",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. After you've created a flow log, you can view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow Logs be enabled for packet Rejects for VPCs.",
|
||||
@@ -891,7 +862,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Logging",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 object-level API operations, such as GetObject, DeleteObject, and PutObject, are referred to as data events. By default, CloudTrail trails do not log data events, so it is recommended to enable object-level logging for S3 buckets.",
|
||||
@@ -914,7 +884,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3 Logging",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "S3 object-level API operations, such as GetObject, DeleteObject, and PutObject, are referred to as data events. By default, CloudTrail trails do not log data events, so it is recommended to enable object-level logging for S3 buckets.",
|
||||
@@ -937,7 +906,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.",
|
||||
@@ -960,7 +928,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).",
|
||||
@@ -983,7 +950,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for 'root' login attempts to detect unauthorized use or attempts to use the root account.",
|
||||
@@ -1006,7 +972,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms.It is recommended that a metric filter and alarm be established for changes made to Identity and Access Management (IAM) policies.",
|
||||
@@ -1029,7 +994,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be used to detect changes to CloudTrail's configurations.",
|
||||
@@ -1052,7 +1016,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for failed console authentication attempts.",
|
||||
@@ -1075,7 +1038,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for customer-created CMKs that have changed state to disabled or are scheduled for deletion.",
|
||||
@@ -1098,7 +1060,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes to S3 bucket policies.",
|
||||
@@ -1121,7 +1082,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to AWS Config's configurations.",
|
||||
@@ -1144,7 +1104,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. Security groups are stateful packet filters that control ingress and egress traffic within a VPC.It is recommended that a metric filter and alarm be established to detect changes to security groups.",
|
||||
@@ -1167,7 +1126,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets within a VPC. It is recommended that a metric filter and alarm be established for any changes made to NACLs.",
|
||||
@@ -1190,7 +1148,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. Network gateways are required to send and receive traffic to a destination outside of a VPC. It is recommended that a metric filter and alarm be established for changes to network gateways.",
|
||||
@@ -1213,7 +1170,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. Routing tables are used to route network traffic between subnets and to network gateways.It is recommended that a metric filter and alarm be established for changes to route tables.",
|
||||
@@ -1236,7 +1192,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. It is possible to have more than one VPC within an account; additionally, it is also possible to create a peer connection between two VPCs, enabling network traffic to route between them.It is recommended that a metric filter and alarm be established for changes made to VPCs.",
|
||||
@@ -1259,7 +1214,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs or an external Security Information and Event Management (SIEM) environment, and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes made to AWS Organizations in the master AWS account.",
|
||||
@@ -1282,7 +1236,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4 Monitoring",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security Hub collects security data from various AWS accounts, services, and supported third-party partner products, helping you analyze your security trends and identify the highest-priority security issues. When you enable Security Hub, it begins to consume, aggregate, organize, and prioritize findings from the AWS services that you have enabled, such as Amazon GuardDuty, Amazon Inspector, and Amazon Macie. You can also enable integrations with AWS partner security products.",
|
||||
@@ -1307,7 +1260,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5 Networking",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The Network Access Control List (NACL) function provides stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH on port `22` and RDP on port `3389`, using either the TCP (6), UDP (17), or ALL (-1) protocols.",
|
||||
@@ -1332,7 +1284,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5 Networking",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH on port `22` and RDP on port `3389`, using either the TCP (6), UDP (17), or ALL (-1) protocols.",
|
||||
@@ -1357,7 +1308,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5 Networking",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH on port `22` and RDP on port `3389`.",
|
||||
@@ -1380,7 +1330,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5 Networking",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If a security group is not specified when an instance is launched, it is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic, both inbound and outbound.The default VPC in every region should have its default security group updated to comply with the following: - No inbound rules. - No outbound rules.Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation.**Note:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly, as it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering by discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.",
|
||||
@@ -1403,7 +1352,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5 Networking",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Once a VPC peering connection is established, routing tables must be updated to enable any connections between the peered VPCs. These routes can be as specific as desired, even allowing for the peering of a VPC to only a single host on the other side of the connection.",
|
||||
@@ -1426,7 +1374,6 @@
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5 Networking",
|
||||
"SubSection": "",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "When enabling the Metadata Service on AWS EC2 instances, users have the option of using either Instance Metadata Service Version 1 (IMDSv1; a request/response method) or Instance Metadata Service Version 2 (IMDSv2; a session-oriented method).",
|
||||
|
||||
@@ -17,7 +17,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "The root user account holds the highest level of privileges within an AWS account. Enabling Multi-Factor Authentication (MFA) enhances security by adding an additional layer of protection beyond just a username and password. With MFA activated, users must provide their credentials (username and password) along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.",
|
||||
"AdditionalInformation": "Enabling MFA enhances console security by requiring the authenticating user to both possess a time-sensitive key-generating device and have knowledge of their credentials.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -34,7 +35,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "The root user account in AWS has the highest level of privileges. Multi-Factor Authentication (MFA) enhances security by adding an extra layer of protection beyond a username and password. When MFA is enabled, users must enter their credentials along with a unique authentication code generated by their AWS MFA device when signing into an AWS website.",
|
||||
"AdditionalInformation": "A hardware MFA has a smaller attack surface compared to a virtual MFA. Unlike a virtual MFA, which relies on a mobile device that may be vulnerable to malware or compromise, a hardware MFA operates independently, reducing exposure to potential security threats.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -51,7 +53,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "To enhance security and reduce the risk of unauthorized access, Multi-Factor Authentication (MFA) should be enabled for all IAM users who have access to the AWS Management Console.",
|
||||
"AdditionalInformation": "Without Multi-Factor Authentication (MFA), a compromised password alone is enough to allow an attacker to access the console, gaining full visibility and control over AWS resources.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -68,7 +71,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "Password policies help enforce password complexity requirements to strengthen account security. In AWS IAM, password policies can be configured to ensure that user passwords meet specific criteria, including a minimum length requirement. It is recommended to enforce a minimum password length of 14 characters to enhance security.",
|
||||
"AdditionalInformation": "Requiring longer and more complex passwords reduces the risk of compromise from brute force attacks, credential stuffing, and other password-based threats. A 14-character minimum makes it significantly harder for attackers to guess or crack passwords, improving overall account security and resilience.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -85,7 +89,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "IAM password policies can be configured to prevent users from reusing previous passwords. This ensures that users create new, unique passwords instead of cycling through old ones. It is recommended to enforce password history restrictions to enhance security.",
|
||||
"AdditionalInformation": "Blocking password reuse helps mitigate the risk of credential-based attacks, such as brute force and credential stuffing. It prevents users from reverting to previously compromised passwords, reducing the likelihood of unauthorized access.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -102,7 +107,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "Password policies help enforce password complexity requirements to strengthen account security. In AWS IAM, password policies can be configured to ensure that user passwords meet specific criteria, including using at least number as requirement. It is recommended to enforce the usage of one number to enhance security.",
|
||||
"AdditionalInformation": "Requiring more complex passwords reduces the risk of compromise from brute force attacks, credential stuffing, and other password-based threats. Using a number at least makes it significantly harder for attackers to guess or crack passwords, improving overall account security and resilience.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -119,7 +125,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "IAM password policies can be configured to enforce the use of at least one special character (symbol) in user passwords. Special characters (e.g., @, #, $, %) add complexity, making passwords harder to guess or crack. It is recommended to require at least one symbol in IAM passwords to enhance security.",
|
||||
"AdditionalInformation": "Requiring a symbol in passwords increases entropy, making brute-force and dictionary attacks more difficult. Attackers often rely on common or predictable password patterns, and enforcing special characters helps reduce the effectiveness of such attacks. This policy strengthens overall password security and aligns with industry best practices.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -136,7 +143,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "IAM password policies can be configured to enforce the use of at least one lowercase letter in user passwords. Including lowercase letters increases password complexity, making them more resistant to brute-force and dictionary attacks. It is recommended to require at least one lowercase letter in IAM passwords to strengthen security.",
|
||||
"AdditionalInformation": "Requiring at least one lowercase letter ensures that passwords are not composed solely of numbers or uppercase letters, which are easier to guess. Attackers often use wordlists and predictable patterns when attempting to crack passwords. By enforcing lowercase letters, password complexity improves, reducing the likelihood of unauthorized access.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -153,7 +161,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "IAM password policies can be configured to enforce the use of at least one uppercase letter in user passwords. Including uppercase letters increases password complexity, making them more resilient to brute-force and dictionary attacks. It is recommended to require at least one uppercase letter in IAM passwords to enhance security.",
|
||||
"AdditionalInformation": "Requiring at least one uppercase letter ensures that passwords are not composed solely of lowercase letters or numbers, which are more predictable and easier to crack. Attackers often rely on common word variations in password attacks, and enforcing uppercase letters adds an additional layer of complexity, reducing the risk of unauthorized access.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -171,7 +180,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "AWS IAM users can authenticate and access AWS resources using various types of credentials, including passwords and access keys. To minimize security risks, it is recommended to deactivate or remove any credentials that have been unused for 45 days or more.",
|
||||
"AdditionalInformation": "Disabling or removing inactive credentials reduces the attack surface and prevents unauthorized access through compromised or forgotten credentials. Unused credentials pose a security risk, as attackers may exploit them if they remain active without regular monitoring. Regularly auditing and revoking stale credentials enhances overall account security.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -188,7 +198,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "Access keys consist of an access key ID and a secret access key, which are used to authenticate and sign programmatic requests made to AWS. These keys allow users and applications to interact with AWS services via the AWS Command Line Interface (CLI), AWS SDKs, PowerShell tools, or direct API calls. To maintain security, it is recommended that all access keys be rotated regularly to minimize the risk of unauthorized access.",
|
||||
"AdditionalInformation": "Regularly rotating access keys reduces the risk of compromised credentials being exploited. If an access key is leaked, cracked, or stolen, rotating it limits the window of opportunity for malicious use. Additionally, rotating keys ensures that inactive or outdated credentials cannot be used for unauthorized access, enhancing overall security and compliance.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -205,7 +216,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "IAM password policies can be configured to enforce password expiration after a defined period. It is recommended that passwords be set to expire within 90 days or less to ensure users regularly update their credentials. This helps mitigate security risks associated with stale or compromised passwords that remain active for extended periods.",
|
||||
"AdditionalInformation": "Requiring password expiration within 90 days or less reduces the risk of credential-based attacks, such as brute-force attacks and credential stuffing, by ensuring that old passwords cannot be used indefinitely. If a password has been exposed or compromised without detection, regular expiration limits the window of opportunity for an attacker to exploit it. This policy enforces stronger access control and aligns with industry security best practices.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -222,7 +234,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "The root account in AWS has unrestricted administrative privileges and should be protected with the highest security measures. Access keys provide programmatic access to AWS, but when linked to the root account, they pose a significant security risk. It is recommended that no access keys be associated with the root account, ensuring that all programmatic access is managed through IAM roles and users with least privilege access.",
|
||||
"AdditionalInformation": "The root account holds the highest level of privileges in an AWS environment. AWS Access Keys enable programmatic access to AWS resources, but when associated with the root account, they pose a significant security risk. It is recommended to remove all access keys linked to the root account to minimize potential attack vectors. Eliminating root access keys reduces the risk of unauthorized access and enforces the use of role-based IAM accounts with least privilege, promoting a more secure and controlled access management approach.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -239,7 +252,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "IAM policies define permissions that control access to AWS resources. To ensure scalability, security, and manageability, it is recommended that IAM policies be attached only to groups or roles rather than individual users. By assigning permissions at the group or role level, organizations can apply consistent security policies and avoid permission sprawl.",
|
||||
"AdditionalInformation": "Attaching policies to groups or roles simplifies access control, reduces security risks, and improves compliance tracking. This approach prevents overprivileged accounts and ensures a structured, scalable IAM policy framework.",
|
||||
"LevelOfRisk": 1
|
||||
"LevelOfRisk": 1,
|
||||
"Weight": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -256,7 +270,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "IAM users gain access to AWS services, functions, and data through IAM policies. There are four ways to assign policies to a user: 1.Inline (User-Specific) Policy – Editing the policy directly within the user’s profile.2.Directly Attached Policy – Assigning a standalone policy to a user.3.Group-Based Policy (Recommended) – Adding the user to an IAM group with an attached policy. 4.Group with Inline Policy – Assigning an inline policy to a group that includes the user.Among these methods, only the third approach (group-based policies) is recommended for security and manageability.",
|
||||
"AdditionalInformation": "Managing IAM permissions exclusively through groups ensures consistent, scalable, and role-based access control. This approach reduces the risk of excessive privileges, simplifies auditing, and aligns user permissions with organizational roles.",
|
||||
"LevelOfRisk": 1
|
||||
"LevelOfRisk": 1,
|
||||
"Weight": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -274,7 +289,8 @@
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "IAM policies define permissions for users, groups, and roles, controlling access to AWS resources. Following the principle of least privilege, users should be granted only the permissions necessary to perform their tasks. Instead of assigning broad administrative privileges, permissions should be carefully crafted to allow only the required actions.",
|
||||
"AdditionalInformation": "Starting with minimal permissions and granting additional access as needed is significantly more secure than providing excessive permissions and attempting to restrict them later. Assigning full administrative privileges increases the risk of unauthorized or accidental actions that could compromise AWS resources. IAM policies containing Effect: Allow, Action: , Resource: should be removed to prevent unrestricted access and enforce security best practices.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -291,7 +307,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "AWS offers a Support Center for incident notification, response, technical support, and customer service assistance. To ensure secure and controlled access, an IAM role should be created with a properly assigned policy, allowing only authorized users to manage incidents with AWS Support.",
|
||||
"AdditionalInformation": "Implementing least privilege access control ensures that only designated users can interact with AWS Support. Assigning an IAM role with a specific policy limits access to only necessary actions, reducing the risk of unauthorized modifications or exposure of sensitive account information.",
|
||||
"LevelOfRisk": 1
|
||||
"LevelOfRisk": 1,
|
||||
"Weight": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -308,7 +325,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "AWS instances can access AWS resources either by embedding access keys in API calls or by assigning an IAM role with the necessary permissions. Using IAM roles ensures secure, controlled access without hardcoding credentials.",
|
||||
"AdditionalInformation": "IAM roles eliminate the risks associated with hardcoded credentials, reducing exposure to external threats. Unlike access keys, which can be used outside AWS if compromised, IAM roles require an attacker to maintain control of an instance to exploit privileges. Additionally, IAM roles simplify credential management by ensuring permissions are automatically updated without the need for manual key rotation.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -325,7 +343,8 @@
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "To enable HTTPS connections for applications and websites hosted on AWS, an SSL/TLS server certificate is required. AWS provides two options for managing certificates: AWS Certificate Manager (ACM) – The preferred method for managing SSL/TLS certificates, automating renewals and deployment. IAM Certificate Storage – Used only when deploying SSL/TLS certificates in regions not supported by ACM. IAM securely encrypts private keys and stores them, but certificates must be obtained from an external provider. ACM certificates cannot be uploaded to IAM, and IAM certificates cannot be managed from the IAM Console.",
|
||||
"AdditionalInformation": "Removing expired SSL/TLS certificates prevents the accidental deployment of invalid certificates, which could cause service disruptions, security warnings, and loss of credibility for applications using AWS services like Elastic Load Balancer (ELB). As a best practice, expired certificates should be deleted to maintain a secure and trusted application environment.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -342,7 +361,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "The root account in AWS has unrestricted administrative privileges and should be used only for initial account setup and emergency scenarios. Regular operations should be performed using IAM users or roles with least privilege access to minimize security risks.",
|
||||
"AdditionalInformation": "Using the root account increases the risk of unauthorized access, accidental misconfigurations, and privilege misuse. By restricting root account usage and delegating tasks to IAM users or roles, organizations can enforce better access control, auditing, and security best practices.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -359,7 +379,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "Enable IAM Access Analyzer for all AWS regions to monitor IAM policies and identify resources with unintended external access. IAM Access Analyzer, introduced at AWS re:Invent 2019, scans resource-based policies and provides visibility into which resources—such as KMS keys, IAM roles, S3 buckets, Lambda functions, and SQS queues—are accessible by external accounts or federated users. This allows administrators to enforce least privilege access and mitigate unauthorized access risks. IAM Access Analyzer operates within the same AWS region as the resources being analyzed.",
|
||||
"AdditionalInformation": "IAM Access Analyzer enhances security visibility by detecting AWS resources shared with external entities, helping organizations identify potential security risks and ensure compliance with least privilege principles. It continuously evaluates resource-based policies using logic-based analysis, allowing teams to promptly remediate misconfigurations that could lead to unauthorized access or data exposure.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -376,7 +397,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "In multi-account AWS environments, centralizing IAM user management improves control, security, and access management efficiency. Instead of creating separate IAM users in each account, access should be managed through role assumption. This can be achieved using AWS Organizations or federation with an external identity provider (e.g., AWS IAM Identity Center, Okta, or Active Directory).",
|
||||
"AdditionalInformation": "Centralizing IAM user management into a single identity store simplifies administration, reduces the risk of access misconfigurations, and enforces consistent security policies across all accounts. This approach enhances security, scalability, and compliance while minimizing user duplication and permission errors.",
|
||||
"LevelOfRisk": 1
|
||||
"LevelOfRisk": 1,
|
||||
"Weight": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -393,7 +415,8 @@
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "AWS CloudShell provides a managed command-line interface (CLI) for interacting with AWS services. The AWSCloudShellFullAccess IAM policy grants full access to CloudShell, including file upload and download capabilities between a user’s local system and the CloudShell environment. Within CloudShell, users have sudo privileges and unrestricted internet access, making it possible to install software—such as file transfer tools—that could facilitate data movement to external servers.",
|
||||
"AdditionalInformation": "Access to AWSCloudShellFullAccess should be restricted, as it can serve as a potential data exfiltration vector for malicious or compromised cloud administrators. Granting full permissions to CloudShell increases the risk of unauthorized data transfers outside the AWS environment. AWS provides guidance on creating more restrictive IAM policies to limit file transfer capabilities, reducing security risks.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -410,7 +433,8 @@
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "IAM inline policies define permissions directly attached to users, groups, or roles, rather than being managed as standalone policies. If improperly configured, these policies can grant actions that enable privilege escalation, allowing users to elevate their access beyond intended permissions. Privilege escalation can occur through misconfigured IAM roles, excessive permissions, or indirect access paths, potentially leading to unauthorized control over AWS resources.",
|
||||
"AdditionalInformation": "Users with some IAM permissions are allowed to elevate their privileges up to administrator rights.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -427,7 +451,8 @@
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "Amazon S3 bucket permissions can be configured using a bucket policy to enforce access restrictions. To enhance security, objects within the bucket should be made accessible only via HTTPS, ensuring encrypted data transmission.",
|
||||
"AdditionalInformation": "By default, Amazon S3 accepts both HTTP and HTTPS requests, which can expose data to interception. To enforce secure access, HTTP requests should be explicitly denied in the bucket policy. Simply allowing HTTPS without blocking HTTP does not fully comply with security best practices, as unencrypted requests may still be accepted.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -444,7 +469,8 @@
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "AWS EC2 instances allow users to choose between Instance Metadata Service Version 1 (IMDSv1), which uses a request/response model, or Instance Metadata Service Version 2 (IMDSv2), which uses a session-based approach for enhanced security",
|
||||
"AdditionalInformation": "Instance metadata refers to the data about an EC2 instance, such as host names, events, and security groups, that is used for managing and configuring the instance. When enabling the Metadata Service, users can opt for either IMDSv1, which operates via a simple request/response model, or IMDSv2, which implements session authentication for additional security. With IMDSv2, each request is secured by session-based authentication, ensuring that all interactions with the instance's metadata and credentials are protected. IMDSv1, on the other hand, may expose instances to Server-Side Request Forgery (SSRF) attacks. To improve security, Amazon recommends using IMDSv2",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -461,7 +487,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Enabling MFA Delete on a sensitive or classified Amazon S3 bucket adds an extra layer of protection by requiring two-factor authentication for critical actions, such as deleting object versions or changing the bucket’s versioning state.",
|
||||
"AdditionalInformation": "MFA Delete helps prevent accidental or malicious deletions by requiring an additional authentication step. This mitigates the risk of data loss due to compromised credentials or unauthorized access, ensuring that critical objects remain protected.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -478,7 +505,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon S3 buckets may store sensitive data that needs to be discovered, classified, monitored, and protected to maintain security and compliance. Amazon Macie, along with third-party tools, can automatically inventory S3 buckets and identify sensitive data at scale.",
|
||||
"AdditionalInformation": "Using automated data discovery and classification tools, such as Amazon Macie, enhances security by continuously monitoring S3 buckets for sensitive information. Macie leverages machine learning and pattern matching to detect and protect critical data, reducing the risk of data leaks and unauthorized access.",
|
||||
"LevelOfRisk": 1
|
||||
"LevelOfRisk": 1,
|
||||
"Weight": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -495,7 +523,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Each Amazon VPC includes a default security group that initially denies all inbound traffic, allows all outbound traffic, and permits unrestricted communication between instances within the group. If no security group is specified when launching an instance, it is automatically assigned to this default security group. Since security groups control stateful ingress and egress traffic, it is recommended to restrict all inbound and outbound traffic in the default security group.",
|
||||
"AdditionalInformation": "Restricting all traffic in the default security group enforces least privilege access by ensuring that AWS resources are explicitly assigned to well-defined security groups. This approach reduces unintended exposure, improves network segmentation, and promotes secure resource placement within AWS environments.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -512,7 +541,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "After establishing a VPC peering connection, routing tables must be updated to enable communication between the peered VPCs. Routes can be configured with granular specificity, allowing connections to be restricted to a single host or a specific subnet within the peered VPC.",
|
||||
"AdditionalInformation": "Defining highly specific routes in VPC peering connections enhances security by limiting access to only the necessary resources. This minimizes the potential impact of a security breach, ensuring that resources outside the defined routes remain inaccessible, reducing the risk of lateral movement within the network.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -531,7 +561,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Network Access Control Lists (NACLs) provide stateless filtering of ingress and egress traffic to AWS resources. It is recommended that NACLs do not allow unrestricted inbound access to remote administration ports, such as SSH (port 22) and RDP (port 3389), over TCP (6), UDP (17), or ALL (-1) protocols to prevent unauthorized access.",
|
||||
"AdditionalInformation": "Exposing remote server administration ports (e.g., SSH on 22 and RDP on 3389) to the public internet increases the attack surface, making resources more vulnerable to brute-force attacks and unauthorized access. Restricting inbound access to these ports helps reduce security risks and limit potential exploitation.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -550,7 +581,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Security groups enforce stateful filtering of ingress and egress traffic to AWS resources. To enhance security, no security group should allow unrestricted inbound access to remote administration ports, such as SSH (port 22) and RDP (port 3389), over TCP (6), UDP (17), or ALL (-1) protocols.",
|
||||
"AdditionalInformation": "Exposing remote administration ports to the public internet significantly increases the attack surface, making resources more vulnerable to brute-force attacks, exploitation, and unauthorized access. Restricting ingress traffic to these ports helps reduce security risks and prevent potential system compromises.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -567,7 +599,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon Elastic Block Store (EBS) snapshots contain backups of EC2 volumes, which may include sensitive data such as credentials, application configurations, or customer information. EBS snapshots should never be publicly accessible to prevent unauthorized access and data exposure. By default, snapshots are private, but they can be manually shared with other AWS accounts or made public, which poses a significant security risk if misconfigured.",
|
||||
"AdditionalInformation": "Exposing EBS snapshots publicly increases the risk of data breaches, unauthorized access, and compliance violations. Attackers can scan for publicly accessible snapshots and extract sensitive information. To prevent data leaks, snapshots should be restricted to specific AWS accounts or kept private unless explicitly needed for sharing. Implementing proper access controls helps protect critical data and maintain compliance with security best practices.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -584,7 +617,8 @@
|
||||
"SubSection": "4.2 At-Rest",
|
||||
"AttributeDescription": "Amazon Elastic Compute Cloud (EC2) supports encryption at rest for Elastic Block Store (EBS) volumes, ensuring that stored data remains protected. While EBS encryption is disabled by default, organizations can enforce automatic encryption of newly created volumes to enhance data security and compliance.",
|
||||
"AdditionalInformation": "Enforcing EBS volume encryption reduces the risk of data exposure, unauthorized access, and compliance violations. If encryption remains intact, even if storage is compromised, data remains unreadable to unauthorized users. Encrypting data at rest ensures that sensitive information is protected against accidental disclosure, insider threats, and external attacks.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -601,7 +635,8 @@
|
||||
"SubSection": "4.2 At-Rest",
|
||||
"AttributeDescription": "Amazon Relational Database Service (RDS) supports encryption at rest using the industry-standard AES-256 encryption algorithm to secure database instances and their associated storage. Once enabled, RDS encryption automatically handles access authentication and decryption, ensuring secure data storage with minimal performance impact.",
|
||||
"AdditionalInformation": "Databases often contain sensitive and business-critical information, making encryption essential to protect against unauthorized access and data breaches. Enabling RDS encryption ensures that underlying storage, automated backups, read replicas, and snapshots are all encrypted, preventing accidental or malicious data exposure while maintaining compliance with security best practices.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -618,7 +653,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon Simple Notification Service (SNS) topics enable messaging between AWS services, applications, and users. By default, SNS topics should be restricted to trusted AWS accounts or IAM roles to prevent unauthorized access. Allowing global send (sns:Publish) or subscribe (sns:Subscribe) permissions means any AWS account or unauthenticated entity could send messages or subscribe to the topic, potentially leading to spam, data leaks, or misuse of notifications.",
|
||||
"AdditionalInformation": "SNS topics with global send or subscribe permissions expose AWS environments to unauthorized message injection, data exfiltration, and Denial-of-Service (DoS) attacks. An attacker could flood an SNS topic with malicious or fraudulent messages, leading to unexpected charges or service disruptions. Restricting access ensures that only authorized AWS accounts, applications, or IAM roles can send and receive messages, reducing security risks and protecting system integrity.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -635,7 +671,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon Relational Database Service (RDS) snapshots store backups of database instances, potentially containing sensitive data such as customer records, credentials, and application configurations. By default, RDS snapshots are private, but they can be shared with other AWS accounts or made public, which can lead to data exposure if misconfigured. To prevent unauthorized access, RDS snapshots should never be publicly accessible unless explicitly required and secured.",
|
||||
"AdditionalInformation": "Publicly accessible RDS snapshots create a serious security risk, as anyone can copy the snapshot and restore the database, exposing sensitive information. Attackers actively scan for publicly available snapshots to extract credentials, personally identifiable information (PII), or business-critical data. To prevent unauthorized access and data leaks, RDS snapshots should remain private or restricted to trusted AWS accounts following the principle of least privilege.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -652,7 +689,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "AWS CloudTrail logs record account activity, including API calls, user actions, and resource modifications, making them critical for security monitoring and compliance auditing. These logs are typically stored in an Amazon S3 bucket for long-term retention and analysis. To protect sensitive security data, the S3 bucket storing CloudTrail logs should never be publicly accessible.",
|
||||
"AdditionalInformation": "If the S3 bucket containing CloudTrail logs is publicly accessible, unauthorized users could access sensitive security information, including API calls, IAM activity, and infrastructure changes. Exposing CloudTrail logs can help attackers reconstruct system activity, identify vulnerabilities, and plan targeted attacks. To prevent data leaks and unauthorized access, CloudTrail log buckets should be restricted using IAM policies, bucket policies, and S3 Block Public Access settings.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -669,7 +707,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon Redshift clusters store and process large-scale data for analytics and business intelligence workloads. By default, Redshift clusters can be configured with a public endpoint, making them accessible from the internet. To minimize security risks, Redshift clusters should be restricted to private networks and should not have a public endpoint unless absolutely necessary and properly secured.",
|
||||
"AdditionalInformation": "Exposing a Redshift cluster to the public internet increases the risk of unauthorized access, data breaches, and cyberattacks. Attackers could attempt brute-force login attempts, exploit misconfigurations, or access sensitive business data. Keeping Redshift clusters within private subnets and restricting access via security groups, VPC settings, and IAM policies ensures that only trusted networks and users can connect, reducing the attack surface and enhancing data security.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -686,7 +725,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "AWS API Gateway allows developers to create, deploy, and manage APIs that connect applications to backend services. By default, API Gateway endpoints can be publicly accessible, meaning they can be invoked from anywhere on the internet. To enhance security, API Gateway endpoints should be restricted to private networks using VPC links, private API settings, or access control mechanisms to ensure that only authorized entities can interact with the API.",
|
||||
"AdditionalInformation": "Publicly accessible API Gateway endpoints can expose backend services to unauthorized access, data leaks, and potential exploitation. Attackers may attempt brute-force authentication, injection attacks, or abuse API functionality if access is not properly restricted. To reduce the attack surface, API Gateway endpoints should be limited to internal use or protected with authentication, IAM permissions, WAF rules, or private VPC access to ensure only trusted users and systems can invoke the API.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -703,7 +743,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon EC2 instances launched via Auto Scaling groups can automatically scale workloads based on demand. By default, instances can be assigned public IP addresses, making them accessible from the internet. To enhance security, EC2 instances in Auto Scaling group launch configurations should not have public IP addresses, ensuring they remain within a private network and are only accessible through secure channels such as bastion hosts or VPN connections.",
|
||||
"AdditionalInformation": "Assigning public IP addresses to Auto Scaling group instances increases the risk of unauthorized access, brute-force attacks, and potential exploitation. Publicly accessible instances can become targets for malicious actors, leading to data breaches or service disruptions. By restricting public IP addresses, organizations can enforce network segmentation, ensuring that EC2 instances are accessed securely via private networks, VPNs, or load balancers.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -720,7 +761,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "AWS Lambda functions allow running code without managing servers. Lambda supports resource-based policies that define who can invoke the function. If a Lambda function’s resource-based policy allows public access, it can be triggered by anyone on the internet, posing a significant security risk. To prevent unauthorized execution, Lambda functions should not be publicly accessible unless explicitly required and properly secured.",
|
||||
"AdditionalInformation": "Publicly accessible Lambda functions can be abused for unauthorized execution, leading to service disruptions, data exfiltration, or increased AWS costs due to excessive invocations. Attackers could exploit misconfigured functions to perform malicious actions, extract sensitive data, or abuse compute resources. To reduce security risks, Lambda functions should only be accessible to specific IAM roles, AWS services, or trusted accounts, enforcing least privilege access and maintaining secure function execution.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -737,7 +779,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "AWS Lambda function URLs provide a built-in HTTPS endpoint that allows functions to be invoked directly via HTTP requests. By default, Lambda function URLs can be publicly accessible, meaning anyone on the internet can invoke the function if proper access controls are not enforced. To minimize security risks, Lambda function URLs should not be publicly accessible unless explicitly required and properly restricted.",
|
||||
"AdditionalInformation": "Exposing Lambda function URLs to the public internet increases the risk of unauthorized access, API abuse, and potential exploitation. Attackers may invoke functions maliciously, leading to data leaks, unauthorized operations, increased costs, or denial-of-service (DoS) attacks. To enhance security, Lambda function URLs should be restricted to specific IAM roles, AWS services, or trusted clients, ensuring that only authorized users can trigger the function.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -754,7 +797,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "AWS Database Migration Service (DMS) instances facilitate data migration between databases across on-premises and cloud environments. By default, DMS instances can be configured with publicly accessible endpoints, making them reachable from the internet. To enhance security and prevent unauthorized access, DMS instances should not be publicly accessible unless explicitly required and properly secured.",
|
||||
"AdditionalInformation": "Publicly accessible DMS instances increase the risk of unauthorized access, data interception, and potential exploitation. Attackers could target exposed instances to steal or manipulate sensitive data during migration. Restricting public access ensures data migrations remain secure, limiting access to trusted networks, private VPCs, and authorized IAM roles, thereby reducing the attack surface and ensuring compliance with security best practices.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -771,7 +815,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "AWS DocumentDB manual cluster snapshots store backups of DocumentDB clusters, containing sensitive database information such as application data, configurations, and credentials. By default, snapshots are private, but they can be manually shared or made public, which poses a significant security risk. To prevent unauthorized access, DocumentDB manual cluster snapshots should never be publicly accessible unless explicitly required and properly secured.",
|
||||
"AdditionalInformation": "Publicly accessible DocumentDB snapshots expose critical database information, increasing the risk of data breaches, unauthorized access, and compliance violations. Attackers could restore the snapshot in their own AWS account and gain full access to the database content. To protect sensitive data, DocumentDB snapshots should only be shared with specific AWS accounts or remain private, following least privilege principles and AWS security best practices.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -788,7 +833,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon EC2 Amazon Machine Images (AMIs) contain pre-configured operating system and application environments that can be used to launch new EC2 instances. By default, AMIs are private, but they can be manually shared or made public, which poses a security risk if sensitive data or proprietary configurations are exposed. To prevent unauthorized access and data leaks, EC2 AMIs should not be set as public unless explicitly required and properly secured.",
|
||||
"AdditionalInformation": "Publicly accessible EC2 AMIs increase the risk of data exposure, unauthorized access, and compliance violations. Attackers could copy, analyze, or exploit public AMIs to extract sensitive credentials, misconfigurations, or proprietary software. Keeping AMIs private or shared only with specific AWS accounts ensures that only trusted users or teams can access and launch instances from them, reducing security risks and preventing unintended data exposure.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -805,7 +851,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon Elastic Block Store (EBS) snapshots are backups of EC2 volumes that may contain sensitive data, such as credentials, application configurations, and customer records. By default, EBS snapshots are private, but they can be manually shared or made public, allowing anyone to copy or restore them. To prevent unauthorized access and data exposure, public access to EBS snapshots should always be disabled.",
|
||||
"AdditionalInformation": "Publicly accessible EBS snapshots pose a significant security risk, as attackers can restore and extract sensitive data if a snapshot is exposed. Misconfigured public snapshots have led to data breaches and compliance violations in the past. To mitigate this risk, EBS snapshots should be kept private or explicitly shared only with trusted AWS accounts, following least privilege principles to protect critical data and maintain security compliance.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -838,7 +885,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Amazon EC2 instances can run various services that communicate over common ports such as 22 (SSH), 3389 (RDP), 80 (HTTP), and 443 (HTTPS) (and more). If these ports are open to the internet, attackers can attempt unauthorized access, brute-force attacks, or exploit known vulnerabilities. To reduce security risks, EC2 instances should be configured so that common ports are not exposed to the public internet, unless explicitly required and properly secured.",
|
||||
"AdditionalInformation": "Exposing common ports directly to the internet increases the attack surface and risks unauthorized access or system compromise. Attackers frequently scan for open ports to target misconfigured or unpatched services. To enhance security, access to EC2 common ports should be restricted using security groups, network ACLs, and VPC configurations, ensuring that only trusted networks and users can connect.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -869,7 +917,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Amazon EC2 security groups act as virtual firewalls, controlling inbound and outbound traffic to instances. If a security group allows ingress (incoming traffic) from the internet (0.0.0.0/0 or ::/0) to common ports such as 22 (SSH), 3389 (RDP), 80 (HTTP), or 443 (HTTPS) (and more), it creates a significant security risk. To minimize exposure, security groups should be configured to restrict ingress access to these ports to only trusted IP addresses or internal networks.",
|
||||
"AdditionalInformation": "Allowing unrestricted inbound traffic to common ports increases the risk of brute-force attacks, unauthorized access, and exploitation of vulnerabilities. Attackers actively scan for open ports on public-facing EC2 instances to gain unauthorized control. To reduce security risks, ingress rules should be restricted using least privilege principles, IP whitelisting, VPN access, or bastion hosts, ensuring that only authorized users and networks can connect.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -886,7 +935,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon Elastic Container Registry (ECR) repositories store and manage container images for deployment in AWS services. By default, ECR repositories are private, but they can be manually configured as public, allowing anyone to pull container images. To prevent unauthorized access and potential security risks, ECR repositories should not be set as public unless explicitly required and properly secured.",
|
||||
"AdditionalInformation": "Publicly accessible ECR repositories expose container images to unauthorized users, increasing the risk of intellectual property theft, malware injection, or unauthorized use of containerized applications. Attackers could analyze public images for vulnerabilities or use misconfigured images for malicious purposes. To mitigate this risk, ECR repositories should remain private or be explicitly shared with trusted AWS accounts, ensuring secure access and compliance with best practices.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -903,7 +953,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon Elastic Container Service (ECS) allows running containerized applications on AWS. By default, ECS services can be configured to assign public IP addresses to tasks or services, making them directly accessible from the internet. To enhance security, ECS services should be configured not to automatically assign public IPs, ensuring they remain within a private network and are accessed securely through internal load balancers, VPC peering, or private endpoints.",
|
||||
"AdditionalInformation": "Automatically assigning public IPs to ECS services exposes them to the internet, increasing the risk of unauthorized access, brute-force attacks, and data breaches. Attackers could target publicly exposed containers, exploit vulnerabilities, or disrupt services. To mitigate these risks, ECS services should be restricted to private subnets and accessed through secure networking configurations, such as AWS PrivateLink, VPNs, or internal ALBs.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -920,7 +971,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon Elastic Container Service (ECS) task sets manage multiple versions of a service during deployments. By default, ECS task sets can be configured to automatically assign public IP addresses, making them directly accessible from the internet. To enhance security, ECS task sets should be restricted to private subnets and should not automatically receive public IP addresses unless explicitly required and properly secured.",
|
||||
"AdditionalInformation": "Automatically assigning public IPs to ECS task sets increases the risk of unauthorized access, cyberattacks, and data exposure. Publicly exposed tasks can be targeted by attackers, leading to service disruptions or exploitation of vulnerabilities. To mitigate these risks, ECS task sets should be restricted to private networking environments, accessed only through internal load balancers, VPC endpoints, or secure VPN connections, ensuring controlled and secure communication.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -937,7 +989,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon Elastic File System (EFS) provides scalable, shared file storage for AWS services. EFS mount targets allow instances to connect to the file system within a VPC. By default, EFS mount targets can be configured with public accessibility, making them reachable from the internet. To enhance security, EFS mount targets should be restricted to private networks and should not be publicly accessible unless explicitly required and properly secured.",
|
||||
"AdditionalInformation": "Publicly accessible EFS mount targets expose stored data to unauthorized access, cyberattacks, and data breaches. Attackers could exploit misconfigured security groups or network ACLs to access or modify files. To reduce security risks, EFS mount targets should be restricted to private subnets, with access limited to trusted VPCs, security groups, and IAM roles, ensuring secure file storage and controlled access.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -954,7 +1007,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon Elastic File System (EFS) provides shared storage that can be accessed by multiple EC2 instances and services within a VPC. EFS access is controlled through resource-based policies that define which clients can connect. If an EFS policy allows access to any client within the VPC, it increases the risk of unauthorized access and data exposure. To enhance security, EFS policies should be restricted to specific IAM roles, security groups, or trusted resources instead of granting broad access to all VPC clients.",
|
||||
"AdditionalInformation": "Allowing any client within a VPC to access an EFS file system increases the risk of data leaks, accidental modifications, or unauthorized access by compromised instances or misconfigured services. To minimize exposure, EFS policies should enforce least privilege access, restricting permissions to specific instances, roles, or users that require access, ensuring secure file storage and controlled data access.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -971,7 +1025,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "A Network Policy defines how network traffic is controlled and restricted between workloads within a cloud environment. Enforcing network policies ensures that only authorized communication occurs between services, reducing the risk of unauthorized access and lateral movement. It is recommended to enable Network Policies and configure them appropriately to enforce least privilege access and secure communication between workloads.",
|
||||
"AdditionalInformation": "Without properly configured Network Policies, workloads may be exposed to unnecessary or unauthorized network traffic, increasing the risk of data leaks, exploitation, or lateral movement by attackers. By enabling and enforcing Network Policies, organizations can limit communication between workloads, ensuring that only approved and necessary network interactions are allowed, minimizing the attack surface and enhancing overall security.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -988,7 +1043,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Amazon Elastic Kubernetes Service (EKS) clusters manage containerized applications and can be configured with either private or public access. If an EKS cluster is publicly accessible, it means that the Kubernetes API endpoint can be reached from the internet, increasing the risk of unauthorized access and attacks. To enhance security, EKS clusters should be restricted to private networks and accessed only through secure VPNs, VPC peering, or AWS PrivateLink.",
|
||||
"AdditionalInformation": "Exposing an EKS cluster to the public internet increases the risk of brute-force attacks, credential theft, and unauthorized access to Kubernetes workloads. Attackers could exploit misconfigured RBAC policies or API vulnerabilities to gain control over the cluster. To reduce security risks, EKS clusters should be configured with private endpoints, ensuring that only trusted networks and IAM-authenticated users can manage Kubernetes resources.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1005,7 +1061,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Amazon Elastic Kubernetes Service (EKS) clusters run workloads on worker nodes, which can be either public or private. If EKS clusters are created with public nodes, these nodes are assigned public IP addresses, making them accessible from the internet, which increases the risk of unauthorized access and potential attacks. To enhance security, EKS clusters should be created with private nodes that operate within private subnets and are only accessible through secured networking configurations such as VPNs, VPC peering, or AWS PrivateLink.",
|
||||
"AdditionalInformation": "Using public nodes in EKS exposes Kubernetes workloads to the internet, increasing the risk of unauthorized access, lateral movement, and potential exploitation. Attackers can target misconfigured workloads, open services, or unsecured API endpoints. By creating EKS clusters with private nodes, organizations can restrict access, limit exposure to public threats, and enforce network segmentation, ensuring that workloads remain secure and isolated within a private VPC environment.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1022,7 +1079,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon ElastiCache provides in-memory caching services using Redis and Memcached. By default, ElastiCache clusters can be deployed in either public or private subnets. If an ElastiCache cluster is placed in a public subnet, it becomes accessible from the internet, which significantly increases the risk of unauthorized access and data breaches. To enhance security, ElastiCache clusters should only be deployed in private subnets, ensuring restricted access within a VPC.",
|
||||
"AdditionalInformation": "Deploying an ElastiCache cluster in a public subnet exposes it to external threats, such as unauthorized access, brute-force attacks, and potential data exfiltration. Attackers could exploit misconfigurations to access cached data or disrupt services. By restricting ElastiCache clusters to private subnets, organizations can limit access to trusted resources, enforce VPC security controls, and reduce the attack surface, ensuring secure and efficient caching operations.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1039,7 +1097,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon Elastic Load Balancers (ELBs) distribute incoming traffic across multiple targets, such as EC2 instances, containers, and Lambda functions. By default, ELBs can be configured as either internet-facing or internal (private). If an ELB is publicly accessible, it exposes backend services to the internet, increasing the risk of unauthorized access and attacks. To enhance security, ELBs should be restricted to private networks unless explicitly required and properly secured.",
|
||||
"AdditionalInformation": "Publicly accessible Elastic Load Balancers can serve as entry points for unauthorized traffic, brute-force attacks, and potential data breaches. Attackers may exploit misconfigured security groups, open ports, or exposed application endpoints behind the load balancer. To reduce security risks, ELBs should be configured as internal (private), allowing access only from trusted networks, VPNs, or specific VPCs, ensuring that backend services remain protected and isolated from external threats.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1056,7 +1115,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon Elastic MapReduce (EMR) is a managed big data processing service that can access S3, EC2, and other AWS resources. The EMR Account Public Access Block setting helps prevent public access to EMR resources, such as data stored in S3 buckets. If this setting is not enabled, there is a risk that EMR-related data and configurations could be exposed to the public, leading to unauthorized access or data breaches. To enhance security, the Public Access Block should be enabled for the EMR account.",
|
||||
"AdditionalInformation": "Allowing public access to EMR resources increases the risk of data leaks, unauthorized access, and compliance violations. Attackers could exploit misconfigured policies or publicly accessible S3 buckets to access sensitive data processed by EMR. Enabling EMR Account Public Access Block ensures that S3 data and other EMR-related resources cannot be accessed publicly, reducing exposure and maintaining strong access controls in AWS.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1073,7 +1133,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon Elastic MapReduce (EMR) is a managed service for processing big data workloads using Apache Spark, Hadoop, and other frameworks. By default, EMR clusters can be configured with public or private access. If an EMR cluster is publicly accessible, it exposes data processing nodes and services to the internet, increasing the risk of unauthorized access and potential exploitation. To enhance security, EMR clusters should only be deployed in private subnets and restricted to trusted networks.",
|
||||
"AdditionalInformation": "Publicly accessible EMR clusters increase the risk of data breaches, unauthorized access, and attacks on running workloads. Malicious actors could exploit misconfigured security groups, open ports, or weak authentication settings to compromise the cluster. To reduce exposure, EMR clusters should be placed in private subnets, restricted using VPC security controls, IAM permissions, and firewall rules, ensuring secure data processing and access management.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1090,7 +1151,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "AWS EventBridge is a serverless event bus service that enables communication between AWS services, third-party applications, and custom event sources. By default, EventBridge event buses can be configured to allow events from any AWS account or external source. If an event bus is exposed to everyone, unauthorized entities could send events to your environment, potentially leading to security risks, data injection attacks, or service disruptions. To enhance security, event buses should be restricted to specific AWS accounts, services, or trusted IAM roles.",
|
||||
"AdditionalInformation": "Allowing unrestricted access to an EventBridge event bus increases the risk of malicious event injection, unauthorized access, and data manipulation. Attackers could flood the event bus with malicious events, leading to unexpected behavior, security breaches, or excessive AWS costs. To reduce exposure, event buses should be secured using IAM policies and resource-based permissions, ensuring that only trusted AWS services and accounts can send or receive events.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1107,7 +1169,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon S3 Glacier provides low-cost, long-term storage for archival data. Glacier vaults can be configured with resource-based policies that control access. If a Glacier vault policy allows access to everyone, unauthorized users could retrieve or delete archived data, leading to data exposure or loss. To enhance security, Glacier vault policies should be restricted to specific AWS accounts, IAM roles, or trusted entities, ensuring only authorized users can access or manage archived data.",
|
||||
"AdditionalInformation": "Allowing public access to S3 Glacier vaults poses a significant security risk, increasing the chance of data breaches, unauthorized deletions, or compliance violations. Attackers could restore and download sensitive archived data if the vault is misconfigured. To prevent unauthorized access, Glacier vaults should have strict access controls, using IAM policies, encryption, and resource-based permissions, ensuring that only trusted users and systems can interact with archived data.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1124,7 +1187,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "AWS Glue Data Catalog is a centralized metadata repository used to store and manage schema information for data lakes and analytics workflows. By default, Glue Data Catalogs can be configured to allow public access, which poses a significant security risk if sensitive metadata is exposed. To enhance security, Glue Data Catalogs should be restricted to specific AWS accounts, IAM roles, or trusted services, ensuring that only authorized users can access or modify catalog information.",
|
||||
"AdditionalInformation": "Allowing public access to Glue Data Catalogs increases the risk of unauthorized access, data leaks, and compliance violations. Attackers could gain insights into an organization’s data structure or modify catalog entries, leading to potential data corruption or unauthorized data exposure. To reduce security risks, Glue Data Catalogs should be secured using IAM policies, resource-based permissions, and AWS Lake Formation, ensuring that only trusted accounts and services can interact with metadata.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1141,7 +1205,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon Managed Streaming for Apache Kafka (MSK) allows organizations to build and manage real-time data streaming applications. If a Kafka cluster is publicly accessible, it exposes data streams, configurations, and messaging topics to the internet, increasing the risk of unauthorized access, data interception, and service disruptions. To enhance security, Kafka clusters should be restricted to private networks, ensuring that only trusted AWS resources, VPCs, and IAM-authenticated users can interact with the service.",
|
||||
"AdditionalInformation": "Exposing a Kafka cluster to the public internet creates significant security risks, including unauthorized data ingestion, data leaks, and message tampering. Attackers could consume, modify, or inject malicious data into Kafka topics, disrupting real-time analytics and application workflows. To mitigate these risks, Kafka clusters should be deployed in private subnets, with access restricted via VPC security groups, IAM policies, and AWS PrivateLink, ensuring secure and controlled data streaming.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1158,7 +1223,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "AWS Key Management Service (KMS) provides secure encryption key management for data encryption and cryptographic operations. If KMS keys are exposed to the internet, unauthorized entities could potentially use, modify, or compromise encryption keys, leading to data breaches and security vulnerabilities. To enhance security, KMS keys should be restricted to trusted AWS accounts, IAM roles, and specific AWS services, ensuring that only authorized users and systems can access and manage them.",
|
||||
"AdditionalInformation": "Exposing KMS keys to the public poses a critical security risk, as compromised keys can lead to unauthorized data decryption, loss of data integrity, and compliance violations. Attackers could potentially use public KMS keys to encrypt or decrypt sensitive data, undermining security controls. To prevent unauthorized access, KMS key policies should enforce strict access control using IAM permissions, VPC endpoint policies, and AWS PrivateLink, ensuring that encryption operations remain fully secured and isolated from the public internet.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1175,7 +1241,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "AWS Lightsail Databases provide managed database solutions for applications. If a Lightsail database is set to public mode, it is directly accessible from the internet, increasing the risk of unauthorized access and data breaches. To enhance security, Lightsail databases should be configured in private mode, ensuring they are accessible only from trusted instances, private networks, or VPN connections.",
|
||||
"AdditionalInformation": "Publicly accessible Lightsail databases expose sensitive data to unauthorized access, brute-force attacks, and potential exploitation. Attackers can attempt to compromise credentials, inject malicious queries, or exfiltrate data. To mitigate these risks, Lightsail databases should remain private, with access controlled through firewalls, IAM authentication, and private networking configurations, ensuring secure database connectivity and data protection.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1192,7 +1259,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "AWS Lightsail instances provide a simple way to deploy and manage cloud-based virtual machines. If a Lightsail instance is publicly accessible, it can be directly reached from the internet, increasing the risk of unauthorized access, attacks, and data breaches. To enhance security, Lightsail instances should be restricted to private access, ensuring they are reachable only through secure connections, such as VPNs, bastion hosts, or private networking configurations.",
|
||||
"AdditionalInformation": "Publicly exposed Lightsail instances create a larger attack surface, making them vulnerable to brute-force attacks, unauthorized access, and exploitation of software vulnerabilities. Attackers could compromise credentials, gain control over the instance, or disrupt services. To mitigate these risks, Lightsail instances should be secured using firewalls, private IP configurations, security group restrictions, and IAM-based access controls, ensuring that only trusted users and networks can connect.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1209,7 +1277,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "AWS MQ brokers manage message queues for applications, facilitating secure and reliable communication between distributed services. If an MQ broker is publicly accessible, it can be reached from the internet, increasing the risk of unauthorized access, message interception, and data breaches. To enhance security, MQ brokers should be restricted to private networks, ensuring they are accessible only from trusted VPCs, private endpoints, or secure VPN connections.",
|
||||
"AdditionalInformation": "Publicly exposed MQ brokers pose a significant security risk, as attackers can attempt to intercept messages, inject malicious data, or disrupt message delivery. This could lead to data manipulation, unauthorized access to sensitive information, and system-wide outages. To mitigate these risks, MQ brokers should be configured within private subnets, with access restricted using security groups, IAM policies, and VPC endpoint controls, ensuring secure and controlled message queue operations.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1226,7 +1295,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon NeptuneDB manual cluster snapshots store backups of graph database clusters, containing sensitive data such as relationships, metadata, and application configurations. By default, NeptuneDB snapshots are private, but they can be manually shared or made public, which can expose critical database information. To enhance security, NeptuneDB manual snapshots should never be publicly accessible, ensuring they are only shared with trusted AWS accounts when necessary.",
|
||||
"AdditionalInformation": "Publicly accessible NeptuneDB snapshots pose a significant security risk, as attackers could restore the snapshot in their own AWS account and gain full access to the database contents. This could lead to data leaks, compliance violations, and unauthorized access to sensitive business information. To prevent data exposure, NeptuneDB snapshots should be restricted using IAM policies and AWS resource-based permissions, ensuring that only authorized users and services can access and manage database backups securely.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1243,7 +1313,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon Neptune clusters provide a fully managed graph database service designed for applications requiring complex relationship queries. By default, Neptune clusters can be deployed in either public or private subnets. If a Neptune cluster is placed in a public subnet, it becomes accessible from the internet, significantly increasing the risk of unauthorized access and data breaches. To enhance security, Neptune clusters should only be deployed in private subnets, ensuring access is restricted to trusted VPCs, IAM roles, and security group configurations.",
|
||||
"AdditionalInformation": "Deploying a Neptune cluster in a public subnet exposes the database endpoints to external threats, making them vulnerable to brute-force attacks, unauthorized queries, and data exfiltration. Attackers could exploit misconfigurations to gain access to sensitive graph data, leading to potential compliance violations and security incidents. To reduce exposure, Neptune clusters should be restricted to private subnets, with access controlled through VPC security groups, IAM authentication, and private endpoint configurations, ensuring secure database operations and protected data access.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1260,7 +1331,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon OpenSearch (formerly Elasticsearch) domains provide search, analytics, and log management capabilities for applications. If an OpenSearch/Elasticsearch domain is publicly accessible, it can be reached from the internet, exposing sensitive data and administrative controls to unauthorized users. To enhance security, OpenSearch domains should be restricted to private networks, ensuring access is limited to trusted VPCs, IAM roles, or specific security group rules.",
|
||||
"AdditionalInformation": "Publicly accessible OpenSearch/Elasticsearch domains pose a significant security risk, as attackers could execute unauthorized queries, modify data, or gain administrative control over the cluster. This could lead to data breaches, service disruptions, and compliance violations. To mitigate these risks, OpenSearch domains should be deployed in private subnets, with access controlled using VPC restrictions, fine-grained access control (FGAC), IAM policies, and security group rules, ensuring secure and isolated search and analytics operations.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1277,7 +1349,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon S3 buckets store and manage data, files, and application assets. Bucket policies control access permissions, and if an S3 bucket has a policy that allows WRITE access to everyone, unauthorized users can upload, modify, or delete objects, leading to data tampering, security breaches, or service disruptions. To enhance security, S3 bucket policies should be restricted to specific AWS accounts, IAM roles, or trusted services, ensuring only authorized users have WRITE permissions.",
|
||||
"AdditionalInformation": "Allowing unrestricted WRITE access to an S3 bucket increases the risk of unauthorized modifications, data injection attacks, and accidental data loss. Attackers could upload malicious files, delete critical data, or overwrite important configurations. To prevent unauthorized changes, S3 bucket policies should explicitly deny public WRITE access, enforce least privilege access control, and use AWS Block Public Access settings to ensure secure and controlled data storage.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1294,7 +1367,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon S3 buckets store sensitive data and should have restricted access permissions. If an S3 bucket is listable by Everyone or Any AWS customer, unauthorized users can enumerate the objects within the bucket, potentially exposing sensitive information such as filenames, metadata, or even public datasets. To enhance security, S3 bucket permissions should be configured to restrict LIST access to only authorized IAM roles, AWS accounts, or specific services.",
|
||||
"AdditionalInformation": "Allowing public or AWS-wide LIST access increases the risk of data enumeration, unauthorized access, and information leaks. Attackers or unauthorized users could identify and analyze stored files, extract metadata, or infer sensitive data. To mitigate this risk, S3 bucket policies should explicitly deny public LIST access, enforce least privilege permissions, and use AWS Block Public Access settings to prevent unintended data exposure.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1311,7 +1385,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Amazon S3 buckets should have strict access controls to prevent unauthorized modifications. If an S3 bucket is writable by Everyone or Any AWS customer, it allows unauthorized users to upload, modify, or delete objects, leading to data corruption, security breaches, and compliance risks. To enhance security, S3 bucket permissions should be restricted to trusted IAM roles, AWS accounts, or specific services.",
|
||||
"AdditionalInformation": "Allowing public or AWS-wide WRITE access creates a significant security risk, as attackers can inject malicious files, overwrite critical data, or delete essential objects. This could lead to data loss, malware distribution, or unauthorized system modifications. To prevent unauthorized changes, S3 bucket policies should explicitly deny public WRITE access, enforce least privilege access, and use AWS Block Public Access settings to secure data integrity and prevent unauthorized modifications.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1328,7 +1403,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon SageMaker Notebook instances provide an interactive environment for machine learning development and data analysis. By default, these instances can be configured with direct internet access, which increases the risk of unauthorized access, data leaks, and exposure to malicious external threats. To enhance security, SageMaker Notebook instances should be restricted to private networks, ensuring they are accessed only through secure VPC connections, IAM authentication, or VPNs.",
|
||||
"AdditionalInformation": "Allowing direct internet access to SageMaker Notebook instances poses a significant security risk, as attackers could exploit misconfigurations, exfiltrate data, or inject malicious code. Publicly accessible notebooks can lead to data breaches, intellectual property theft, or compromised model training workflows. To mitigate these risks, SageMaker Notebook instances should be configured within private subnets, with internet access disabled, and restricted using security groups, IAM policies, and VPC endpoint configurations to ensure secure and controlled machine learning operations.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1345,7 +1421,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "AWS Secrets Manager is used to securely store and manage sensitive information, such as API keys, database credentials, and encryption keys. By default, Secrets Manager secrets should be restricted to authorized IAM roles and AWS services. If a secret is publicly accessible, it can be exposed to unauthorized users, leading to data leaks, security breaches, and potential exploitation of sensitive credentials. To enhance security, Secrets Manager secrets should be strictly controlled using IAM policies and resource-based permissions.",
|
||||
"AdditionalInformation": "Allowing public access to Secrets Manager secrets creates a critical security vulnerability, as attackers could retrieve, misuse, or exfiltrate sensitive information. Compromised secrets could lead to unauthorized access to databases, applications, or cloud services, resulting in data breaches, financial loss, or compliance violations. To mitigate this risk, Secrets Manager secrets should be restricted using least privilege IAM permissions, encrypted with AWS KMS, and accessed only by trusted AWS services and roles, ensuring secure and controlled secret management.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1362,7 +1439,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon Simple Email Service (SES) identities (such as email addresses or domains) are used to send and receive emails through AWS. By default, SES identities should be restricted to authorized AWS accounts and IAM roles. If an SES identity is publicly accessible, unauthorized users could send emails using the identity, leading to email spoofing, phishing attacks, or misuse of the domain for malicious purposes. To enhance security, SES identities should be properly restricted using IAM policies and verified senders.",
|
||||
"AdditionalInformation": "Allowing public access to SES identities creates a security and reputational risk, as attackers could impersonate the identity, send spam, or launch phishing campaigns. This could lead to domain blacklisting, compliance violations, and damage to the organization’s email reputation. To mitigate these risks, SES identities should be restricted to trusted AWS accounts and IAM roles, ensuring that only authorized services and users can send emails, protecting the integrity and security of email communications.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1379,7 +1457,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Amazon Simple Queue Service (SQS) queues enable asynchronous message processing between distributed systems. By default, SQS queues should be restricted to authorized AWS accounts and IAM roles. If an SQS queue has a public policy, it allows anyone on the internet to send, receive, or delete messages, leading to data leaks, unauthorized message injection, and potential denial-of-service (DoS) attacks. To enhance security, SQS queue policies should be configured to allow access only to trusted AWS accounts, IAM roles, or specific AWS services.",
|
||||
"AdditionalInformation": "Publicly accessible SQS queues pose a significant security risk, as attackers could inject malicious messages, disrupt processing workflows, or delete critical messages, leading to system failures and data integrity issues. To prevent unauthorized access, SQS policies should explicitly deny public access, enforce least privilege access control, and use IAM policies and VPC endpoint restrictions to ensure secure and controlled messaging operations.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1396,7 +1475,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "AWS Systems Manager (SSM) Documents define configuration, automation, and maintenance tasks for AWS resources. By default, SSM documents should be restricted to specific AWS accounts, IAM roles, or AWS services. If an SSM document is set as public, unauthorized users could access, modify, or execute automation tasks on AWS infrastructure, leading to misconfigurations, security breaches, or unintended system modifications. To enhance security, SSM documents should be kept private and assigned only to trusted AWS entities.",
|
||||
"AdditionalInformation": "Publicly accessible SSM documents pose a significant security risk, as attackers could execute malicious commands, modify system configurations, or disrupt AWS operations. This could lead to unauthorized access, data leaks, compliance violations, or system downtime. To prevent security threats, SSM documents should explicitly deny public access, enforce least privilege permissions, and use IAM policies and resource-based access controls to ensure only trusted users and systems can manage AWS resources.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1413,7 +1493,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "AWS CloudTrail is a service that records and monitors AWS API calls across an account, providing detailed logs of who performed what action, when, and from where. CloudTrail captures API activity from the AWS Management Console, SDKs, CLI, and AWS services such as CloudFormation. The logs include key details such as the identity of the API caller, timestamp, source IP address, request parameters, and response elements.",
|
||||
"AdditionalInformation": "CloudTrail enhances security, auditing, and compliance by providing a complete history of API activities in an AWS account. Enabling a multi-region trail ensures: Detection of unauthorized activity in rarely used AWS regions. Global Service Logging is automatically enabled, capturing API calls from global services such as IAM and AWS Organizations. Tracking of all management events, ensuring that both read and write operations across AWS resources are recorded for improved security monitoring and compliance.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1430,7 +1511,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "AWS CloudTrail log file validation generates digitally signed digest files containing cryptographic hashes of each log file stored in Amazon S3. These digest files allow users to verify whether logs have been altered, deleted, or remain unchanged after being delivered by CloudTrail. Enabling log file validation ensures data integrity and auditability for security and compliance purposes.",
|
||||
"AdditionalInformation": "Enabling log file validation enhances security by ensuring the integrity of CloudTrail logs, preventing tampering or unauthorized modifications. This helps: Detect log file alterations, ensuring logs remain trustworthy for audits and investigations. Improve compliance with frameworks that require log integrity, such as PCI DSS, SOC 2, and ISO 27001. Strengthen forensic capabilities, allowing security teams to verify log authenticity in case of a security incident.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1447,7 +1529,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "AWS Config is a service that continuously monitors, records, and evaluates configuration changes in AWS resources within an account. It tracks configuration items, relationships between resources, and changes over time, delivering logs for security analysis, change management, and compliance auditing. To ensure comprehensive monitoring, AWS Config should be enabled in all regions.",
|
||||
"AdditionalInformation": "Enabling AWS Config in all regions improves security, visibility, and compliance by: Tracking resource changes, allowing for quick identification of misconfigurations. Supporting security audits and forensic investigations by maintaining a historical record of configurations.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1464,7 +1547,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Server access logging provides detailed records of requests made to an S3 bucket, including request type, accessed resources, timestamp, and requester details. Enabling server access logging on the CloudTrail S3 bucket ensures that all interactions with CloudTrail logs are recorded, improving security visibility and auditability",
|
||||
"AdditionalInformation": "Enabling server access logging on CloudTrail S3 buckets enhances security monitoring, incident response, and compliance by: Capturing all events affecting CloudTrail logs, helping detect unauthorized access or modifications. Providing an audit trail for forensic investigations and compliance reporting. Enhancing security workflows by storing access logs in a separate, dedicated logging bucket for improved log integrity and analysis.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1481,7 +1565,8 @@
|
||||
"SubSection": "4.2 At-Rest",
|
||||
"AttributeDescription": "AWS CloudTrail records API activity across an AWS account, and its logs contain sensitive security and operational data. AWS Key Management Service (KMS) provides encryption key management using customer-managed keys (CMKs) and Hardware Security Modules (HSMs) to ensure secure key storage and usage. CloudTrail logs can be encrypted using Server-Side Encryption (SSE) with KMS (SSE-KMS) to add an extra layer of protection and access control",
|
||||
"AdditionalInformation": "Using SSE-KMS encryption for CloudTrail logs enhances security by adding an extra layer of access control. This ensures that only authorized users with both S3 read permissions and KMS decryption rights can access log data, protecting sensitive security information from unauthorized access or tampering. It also helps maintain compliance with security and regulatory standards by enforcing strict encryption controls.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1498,7 +1583,8 @@
|
||||
"SubSection": "3.2 Retention",
|
||||
"AttributeDescription": "AWS Key Management Service (KMS) allows users to manage encryption keys securely. Key rotation enables the automatic replacement of the backing key (the cryptographic material tied to a customer-managed key (CMK)), ensuring continuous security without disrupting access to previously encrypted data. AWS automatically retains previous backing keys to allow seamless decryption of older data while using a newly generated key for encryption. It is recommended to enable key rotation for symmetric CMKs, as asymmetric keys do not support this feature.",
|
||||
"AdditionalInformation": "Regularly rotating encryption keys minimizes the risk associated with key compromise by ensuring that newly encrypted data is protected with a fresh key, reducing the potential impact of an exposed key. Since AWS KMS retains prior backing keys for seamless decryption, rotation does not disrupt access to previously encrypted data. Implementing key rotation enhances security by limiting the exposure window of any single encryption key and aligning with best practices for cryptographic hygiene.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1515,7 +1601,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "VPC Flow Logs capture and record IP traffic information for network interfaces within a VPC, allowing administrators to monitor and analyze network activity. These logs are stored in Amazon CloudWatch Logs for retrieval and analysis. It is recommended to enable VPC Flow Logs for rejected packets to track unauthorized access attempts, misconfigurations, or potential security threats within the VPC.",
|
||||
"AdditionalInformation": "Enabling VPC Flow Logs for rejected traffic enhances network visibility and security monitoring by detecting suspicious activity, failed connection attempts, and potential threats. These logs help identify anomalous traffic patterns, troubleshoot connectivity issues, and support incident response workflows, improving overall security posture.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1532,7 +1619,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "S3 object-level API operations, such as GetObject, PutObject, and DeleteObject, are classified as data events in AWS CloudTrail. By default, CloudTrail does not log data events, meaning detailed tracking of individual object interactions is not enabled. To enhance visibility and security, it is recommended to enable object-level logging for S3 buckets to monitor access and modification activities.",
|
||||
"AdditionalInformation": "Enabling object-level logging helps organizations meet compliance requirements, enhance security monitoring, and detect unauthorized access. It allows administrators to analyze user behavior, track modifications to critical data, and respond to security incidents in real time using Amazon CloudWatch Events, ensuring greater control over S3 bucket activity.",
|
||||
"LevelOfRisk": 1
|
||||
"LevelOfRisk": 1,
|
||||
"Weight": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1549,7 +1637,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "S3 object-level API operations, such as GetObject, PutObject, and DeleteObject, are classified as data events in AWS CloudTrail. By default, CloudTrail does not log data events, meaning individual object interactions are not tracked unless explicitly enabled. To improve security monitoring and compliance, it is recommended to enable object-level logging for S3 buckets.",
|
||||
"AdditionalInformation": "Object-level logging enhances data security, compliance, and operational visibility by providing detailed tracking of who accessed, modified, or deleted objects within S3 buckets. This enables organizations to monitor user behavior, detect unauthorized access, and quickly respond to potential security incidents using Amazon CloudWatch Events.",
|
||||
"LevelOfRisk": 1
|
||||
"LevelOfRisk": 1,
|
||||
"Weight": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1566,7 +1655,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of API calls can be achieved by forwarding CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. By configuring metric filters and alarms, organizations can automatically detect and respond to unauthorized API calls, improving security visibility. It is recommended to establish a metric filter and alarm for unauthorized API calls to enhance threat detection and incident response.",
|
||||
"AdditionalInformation": "Monitoring unauthorized API calls helps identify potential security incidents faster, reducing the time attackers have to exploit vulnerabilities. CloudWatch provides real-time monitoring and alerting, while SIEM solutions offer centralized security event analysis. Detecting unauthorized API calls early allows organizations to take immediate action, investigate potential threats, and strengthen overall AWS security posture.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1583,7 +1673,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be implemented by directing CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. By setting up metric filters and alarms, organizations can detect and respond to security risks effectively. It is recommended to establish a metric filter and alarm for AWS console logins that are not protected by multi-factor authentication (MFA) to enhance security monitoring.",
|
||||
"AdditionalInformation": "Monitoring console logins without MFA improves visibility into accounts that lack strong authentication controls. Accounts without MFA are more vulnerable to credential theft, brute-force attacks, and unauthorized access. By detecting these login attempts in real-time, organizations can identify security gaps, enforce MFA policies, and reduce the risk of account compromise.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1600,7 +1691,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be achieved by directing CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. Setting up metric filters and alarms helps detect potential security threats. It is recommended to establish a metric filter and alarm for root account login attempts to identify unauthorized access or improper use of the highly privileged root account.",
|
||||
"AdditionalInformation": "Monitoring root account logins enhances visibility into the usage of the most privileged AWS account, which should be used only in exceptional cases. Frequent or unauthorized root logins increase security risks by exposing critical administrative controls. Detecting root login attempts in real time enables organizations to identify potential security incidents, enforce least privilege principles, and limit unnecessary use of the root account.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1617,7 +1709,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be implemented by directing CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. By configuring metric filters and alarms, organizations can detect critical security events. It is recommended to establish a metric filter and alarm for changes to Identity and Access Management (IAM) policies to track modifications that could affect authentication and authorization controls.",
|
||||
"AdditionalInformation": "Monitoring IAM policy changes helps ensure that access controls remain secure and intact. Unauthorized or unintended modifications to IAM policies can lead to privilege escalation, misconfigurations, and security breaches. Detecting these changes in real-time allows organizations to respond quickly to potential threats, enforce least privilege principles, and maintain a strong security posture.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1634,7 +1727,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be implemented by directing CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. By configuring metric filters and alarms, organizations can track critical security events. It is recommended to establish a metric filter and alarm to detect changes to CloudTrail configurations, ensuring that logging remains active and tamper-proof.",
|
||||
"AdditionalInformation": "Monitoring CloudTrail configuration changes helps maintain continuous visibility into AWS account activity. Unauthorized modifications to CloudTrail settings could disable or alter logging, potentially allowing malicious activity to go undetected. Detecting these changes in real time enables organizations to quickly respond to threats, enforce security best practices, and ensure compliance with auditing requirements.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1651,7 +1745,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be implemented by directing CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. By setting up metric filters and alarms, organizations can detect potential security threats. It is recommended to establish a metric filter and alarm for failed console authentication attempts to identify potential unauthorized access attempts or brute-force attacks.",
|
||||
"AdditionalInformation": "Monitoring failed console logins helps detect brute-force attempts and unauthorized access attempts early. Repeated failed authentication attempts can indicate malicious activity, and tracking them allows security teams to identify suspicious IP addresses, correlate with other security events, and take proactive measures to protect AWS accounts.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1668,7 +1763,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be achieved by forwarding CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. By configuring metric filters and alarms, organizations can detect security-critical changes. It is recommended to set up a metric filter and alarm for customer-managed KMS keys (CMKs) that are disabled or scheduled for deletion to prevent unintended encryption key loss.",
|
||||
"AdditionalInformation": "Disabling or deleting a customer-managed CMK can render encrypted data permanently inaccessible, leading to data loss and service disruptions. Monitoring CMK state changes helps detect unauthorized or accidental modifications, ensuring encryption keys remain available and aligned with security policies. Detecting such changes in real time allows organizations to prevent data loss, maintain compliance, and take corrective action if needed.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1685,7 +1781,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be implemented by forwarding CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. By configuring metric filters and alarms, organizations can track critical security-related changes. It is recommended to set up a metric filter and alarm for modifications to S3 bucket policies to detect potential misconfigurations or unauthorized access changes.",
|
||||
"AdditionalInformation": "Monitoring S3 bucket policy changes helps detect and respond to overly permissive configurations that could expose sensitive data. Unauthorized or accidental modifications may grant public access or excessive permissions, increasing the risk of data breaches and compliance violations. Real-time alerts allow security teams to quickly identify, investigate, and correct risky policy changes, reducing exposure and strengthening data security.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1702,7 +1799,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be implemented by directing CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. By configuring metric filters and alarms, organizations can detect critical configuration changes. It is recommended to establish a metric filter and alarm for modifications to AWS Config’s configurations to ensure continuous monitoring and compliance.",
|
||||
"AdditionalInformation": "Monitoring AWS Config configuration changes helps maintain visibility and control over resource configurations. Unauthorized or accidental modifications to AWS Config settings may result in gaps in security monitoring, misconfigurations going undetected, and compliance violations. Real-time alerts allow security teams to quickly detect, investigate, and respond to changes, ensuring the integrity of configuration tracking across the AWS environment.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1719,7 +1817,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be implemented by forwarding CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. Security groups act as stateful packet filters that control inbound and outbound traffic within a VPC. It is recommended to establish a metric filter and alarm to detect changes to security groups to prevent unauthorized modifications that could expose resources to security threats.",
|
||||
"AdditionalInformation": "Monitoring security group changes helps ensure that network access controls remain secure and that AWS resources are not unintentionally exposed. Unauthorized or accidental modifications to security groups can create security gaps, increasing the risk of data breaches and unauthorized access. Real-time alerts enable security teams to detect, investigate, and respond to security group changes quickly, maintaining a strong network security posture.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1736,7 +1835,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be implemented by forwarding CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. Network Access Control Lists (NACLs) act as stateless packet filters that control inbound and outbound traffic for subnets within a VPC. It is recommended to establish a metric filter and alarm to detect changes to NACLs to prevent unauthorized modifications that could compromise network security.",
|
||||
"AdditionalInformation": "Monitoring NACL changes helps ensure that network traffic controls remain properly configured and that AWS resources are not unintentionally exposed. Unauthorized or accidental modifications to NACL rules can lead to misconfigured security policies, increased attack surfaces, and potential data breaches. Real-time alerts enable security teams to detect, investigate, and respond to NACL modifications quickly, maintaining strong network security controls.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1753,7 +1853,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be implemented by forwarding CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. Network gateways serve as the primary route for traffic entering and leaving a VPC, facilitating communication with external networks. It is recommended to establish a metric filter and alarm for changes to network gateways to ensure that network traffic is securely routed through controlled paths.",
|
||||
"AdditionalInformation": "Monitoring network gateway changes helps maintain secure ingress and egress traffic flows within a VPC. Unauthorized or accidental modifications to network gateways can disrupt connectivity, introduce security vulnerabilities, or expose AWS resources to external threats. Real-time alerts enable security teams to detect, investigate, and respond to changes quickly, ensuring that all traffic follows a controlled and secure routing policy.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1770,7 +1871,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be achieved by forwarding CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. Route tables determine how network traffic is directed between subnets and network gateways within a VPC. It is recommended to establish a metric filter and alarm for changes to route tables to detect unauthorized or accidental modifications that could impact network security and connectivity.",
|
||||
"AdditionalInformation": "Monitoring route table changes ensures that VPC traffic follows the intended and secure routing paths. Unauthorized modifications can result in misrouted traffic, exposure of sensitive resources, or connectivity disruptions. Real-time alerts enable security teams to detect, investigate, and respond to route table changes promptly, preventing potential security risks and maintaining a controlled and secure network environment.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1787,7 +1889,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be implemented by forwarding CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. AWS accounts can contain multiple Virtual Private Clouds (VPCs), and VPC peering connections allow network traffic to flow between them. It is recommended to establish a metric filter and alarm for changes made to VPC configurations to detect unauthorized modifications that could impact network security and connectivity.",
|
||||
"AdditionalInformation": "Monitoring VPC configuration changes helps ensure network integrity, security, and proper traffic flow within AWS environments. Unauthorized or accidental modifications can result in misconfigured routing, unintended internet exposure, or connectivity disruptions between resources. Real-time alerts enable security teams to detect, investigate, and respond to VPC changes promptly, preventing security risks and ensuring consistent network accessibility and isolation.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1804,7 +1907,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Real-time monitoring of AWS API calls can be achieved by forwarding CloudTrail logs to Amazon CloudWatch Logs or an external Security Information and Event Management (SIEM) system. AWS Organizations allows centralized management of multiple AWS accounts, and modifications to its configuration can significantly impact access control, account governance, and security policies. It is recommended to establish a metric filter and alarm for changes made to AWS Organizations in the master AWS account to detect unauthorized or unintended modifications.",
|
||||
"AdditionalInformation": "Monitoring AWS Organizations configuration changes helps prevent unwanted, accidental, or malicious modifications that could lead to unauthorized access, policy misconfigurations, or security breaches. Detecting changes in real time ensures that unexpected modifications can be investigated and remediated quickly, reducing the risk of compromised governance structures and ensuring compliance with organizational security policies.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1821,7 +1925,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "AWS Security Hub centralizes security data from multiple AWS services and third-party security tools, allowing for real-time threat detection, risk assessment, and compliance monitoring. When enabled, Security Hub aggregates, organizes, and prioritizes security findings from services such as Amazon GuardDuty, Amazon Inspector, and Amazon Macie, as well as integrated third-party security products. This provides organizations with a unified security management platform to enhance threat visibility.",
|
||||
"AdditionalInformation": "Enabling AWS Security Hub provides a comprehensive view of your security posture, helping to identify vulnerabilities, detect threats, and enforce security best practices. It allows organizations to monitor security trends, benchmark environments against industry standards, and quickly respond to high-priority security issues, strengthening overall AWS security governance and compliance.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1838,7 +1943,8 @@
|
||||
"SubSection": "3.2 Retention",
|
||||
"AttributeDescription": "AWS CloudWatch Log Groups store logs from various AWS services and applications, enabling monitoring, debugging, and security auditing. By default, CloudWatch logs are retained indefinitely, which can lead to unnecessary data storage costs and compliance risks. To manage log lifecycle effectively, it is recommended to set a retention policy for CloudWatch Log Groups, ensuring logs are retained only for a specific number of days based on operational and compliance requirements.",
|
||||
"AdditionalInformation": "Setting a retention policy for CloudWatch logs helps balance cost management, compliance, and security. Retaining logs for too long increases storage costs and potential exposure to sensitive data, while keeping them for too short a duration can limit forensic investigations and compliance reporting. By defining a specific retention period, organizations can ensure logs are available for troubleshooting and audits while adhering to data retention best practices and regulatory requirements.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -17,7 +17,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "Microsoft Entra ID Security Defaults offer preconfigured security settings designed to protect organizations from common identity attacks at no additional cost. These settings enforce basic security measures such as MFA registration, risk-based authentication prompts, and blocking legacy authentication clients that do not support MFA. Security defaults are available to all organizations and can be enabled via the Azure portal to strengthen authentication security.",
|
||||
"AdditionalInformation": "Security defaults provide built-in protections to reduce the risk of unauthorized access until organizations configure their own identity security policies. By requiring MFA, blocking weak authentication methods, and adapting authentication challenges based on risk factors, these settings create a stronger security foundation without additional licensing requirements.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -34,7 +35,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "[IMPORTANT] If your organization has Microsoft Entra ID licensing (included in Microsoft 365 E3, E5, F5, or EM&S E3/E5) and can use Conditional Access, you may skip this section and proceed to Conditional Access. Enable multi-factor authentication (MFA) for all users, roles, and groups with write access to Azure resources. This includes both custom-created roles and built-in roles such as: Service Co-Administrators, Subscription Owners, Contributors",
|
||||
"AdditionalInformation": "MFA enhances security by requiring two or more authentication factors, ensuring that only authorized users gain access. It significantly reduces the risk of unauthorized access, credential theft, and privilege escalation. By implementing MFA, attackers must compromise multiple authentication mechanisms, making breaches far more difficult and improving overall Azure resource security.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -51,7 +53,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "[IMPORTANT] If your organization has Microsoft Entra ID licensing (included in Microsoft 365 E3, E5, F5, or EM&S E3/E5) and can use Conditional Access, you may skip this section and proceed to Conditional Access. Enable multi-factor authentication (MFA) for all non-privileged users to enhance security and prevent unauthorized access.",
|
||||
"AdditionalInformation": "MFA strengthens authentication by requiring at least two verification factors, making it significantly harder for attackers to gain access using stolen credentials. Even if one authentication factor is compromised, an additional layer of security reduces the risk of unauthorized account access, enhancing overall identity and access management security.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -68,7 +71,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "This recommendation ensures that users accessing the Windows Azure Service Management API (e.g., Azure PowerShell, Azure CLI, Azure Resource Manager API) are required to authenticate using multi-factor authentication (MFA) before accessing resources.",
|
||||
"AdditionalInformation": "Administrative access to the Azure Service Management API should be secured with enhanced authentication measures to prevent unauthorized changes. Enforcing MFA helps mitigate the risk of credential compromise, privilege abuse, and unauthorized modifications to administrative settings. IMPORTANT: While exceptions for specific users or groups may be configured, they should be carefully tracked and periodically reviewed through an Access Review process. The policy should apply to all users by default, ensuring that only explicitly exempted accounts bypass MFA.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -85,7 +89,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "This recommendation ensures that users accessing Microsoft Admin Portals (such as Microsoft 365 Admin, Microsoft 365 Defender, Exchange Admin Center, and Azure Portal) must authenticate using multi-factor authentication (MFA) before logging in.",
|
||||
"AdditionalInformation": "Microsoft Admin Portals provide privileged access to critical settings and resources, requiring enhanced security measures. Enforcing MFA reduces the risk of unauthorized access, credential compromise, and administrative abuse, preventing intruders from making unauthorized changes to administrative settings.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -102,7 +107,8 @@
|
||||
"SubSection": "1.1 Authentication",
|
||||
"AttributeDescription": "Ensure that privileged virtual machines do not allow logins from identities without multi-factor authentication (MFA) and that access is restricted to necessary permissions only. Unauthorized access can enable attackers to move laterally and misuse the virtual machine’s managed identity for further privilege escalation or unauthorized operations.",
|
||||
"AdditionalInformation": "Requiring MFA for privileged VM access reduces the risk of credential compromise, lateral movement, and unauthorized access to cloud resources. Attackers can exploit valid accounts to access virtual machines and escalate privileges using the managed identity. Enforcing MFA and least privilege access helps mitigate these risks by preventing unauthorized logins and restricting administrative permissions to only what is necessary.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -119,7 +125,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "Restrict the ability to create new Microsoft Entra ID or Azure AD B2C tenants to administrators or appropriately delegated users.",
|
||||
"AdditionalInformation": "Limiting tenant creation to authorized administrators prevents unauthorized users from creating new tenants, reducing the risk of uncontrolled environments, misconfigurations, and potential security gaps. This ensures that only approved users can establish and manage tenant resources.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -136,7 +143,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "To maintain security and operational efficiency, it is recommended to have a minimum of two and a maximum of four users assigned the Global Administrator role in Microsoft Entra ID. This ensures redundancy while minimizing the risk of excessive privileged access.",
|
||||
"AdditionalInformation": "The Global Administrator role holds broad privileges across Microsoft Entra ID services and should not be used for daily tasks. Administrators should have separate accounts for regular activities and privileged actions. Limiting the number of Global Administrators reduces the risk of unauthorized access, human error, and privilege misuse, while ensuring at least two administrators are available to prevent disruptions in case of unavailability. This approach aligns with the principle of least privilege and strengthens the security posture of an Azure tenant.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -153,7 +161,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "Restrict guest user permissions to prevent unauthorized access to directory resources and administrative roles.",
|
||||
"AdditionalInformation": "Limiting guest access ensures that guest accounts cannot enumerate users, groups, or directory objects and prevents them from being assigned administrative roles. Guest access has three levels of restriction, with the most secure option being: “Guest user access is restricted to their own directory object” (most restrictive). This setting minimizes the risk of unauthorized access and ensures guests only interact with their assigned resources.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -170,7 +179,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "Require administrators to provide consent before applications can access Microsoft Entra ID resources, preventing unauthorized or malicious app integrations.",
|
||||
"AdditionalInformation": "Allowing users to grant application permissions without restrictions increases the risk of data exfiltration and privilege abuse by malicious applications. Restricting app consent to administrators ensures that only verified and approved applications gain access, protecting sensitive data and privileged accounts from unauthorized use.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -187,7 +197,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "Allow users to grant consent only for selected permissions when the request comes from a verified publisher, ensuring tighter control over third-party application access.",
|
||||
"AdditionalInformation": "Restricting app consent to verified publishers helps mitigate the risk of unauthorized data access and privilege abuse. Malicious applications may attempt to exploit user-granted permissions, so ensuring only trusted, verified applications can request access enhances security while maintaining flexibility for approved integrations.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -204,7 +215,8 @@
|
||||
"SubSection": "1.2 Authorization",
|
||||
"AttributeDescription": "Microsoft Entra ID Conditional Access allows organizations to define Named Locations and categorize them as trusted or untrusted. These locations can be based on geographical regions, specific IP addresses, or IP ranges to enhance access control policies.",
|
||||
"AdditionalInformation": "Defining trusted IP addresses or ranges enables organizations to enforce Conditional Access policies based on user location. Users authenticating from trusted locations may receive fewer access restrictions, while those from untrusted sources can be subject to stricter security controls, reducing the risk of unauthorized access.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -221,7 +233,8 @@
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "Limit the ability to create security groups to administrators only, preventing unauthorized users from managing group memberships.",
|
||||
"AdditionalInformation": "Allowing all users to create and manage security groups can lead to uncontrolled access, misconfigurations, and potential security risks. Unless business requirements justify broader delegation, restricting security group creation to administrators ensures that group permissions and memberships are properly managed, reducing the risk of unauthorized access.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -238,7 +251,8 @@
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "Restrict the ability to register third-party applications to administrators or appropriately delegated users, ensuring better security control over app integrations.",
|
||||
"AdditionalInformation": "Allowing unrestricted application registration can expose Microsoft Entra ID data to security risks. Requiring administrator approval ensures that custom-developed applications undergo a formal security review before being granted access. Organizations may delegate permissions to developers or high-request users as needed, but policies should be reviewed to align with security best practices and operational needs.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -255,7 +269,8 @@
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "Limit the ability to send invitations to users with specific administrative roles, preventing unauthorized guest access to cloud resources.",
|
||||
"AdditionalInformation": "Restricting guest invitations to designated administrators helps enforce “Need to Know” permissions, reducing the risk of unintended data exposure. By default, anyone in the organization—including guests and non-admins—can invite external users, which can lead to unauthorized access. Restricting this capability ensures that only approved accounts can extend access to the tenant, strengthening security and access control.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -272,7 +287,8 @@
|
||||
"SubSection": "1.3 Privilege Escalation Prevention",
|
||||
"AttributeDescription": "Limit Microsoft 365 group creation to administrators only, ensuring that group management remains under centralized control.",
|
||||
"AdditionalInformation": "Restricting group creation to administrators prevents unauthorized or unnecessary group creation, ensuring that only appropriate groups are created and managed. This reduces the risk of misconfigurations, access issues, and uncontrolled resource sharing within the organization.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -289,7 +305,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Regularly review Network Security Groups (NSGs) for misconfigured or overly permissive rules, especially those exposing ports and protocols to the public internet. Any unnecessary exposure should be restricted to reduce security risks.",
|
||||
"AdditionalInformation": "Exposing Remote Desktop Protocol (RDP) or other critical services to the internet increases the risk of brute-force attacks, which could lead to unauthorized access to Azure Virtual Machines. Compromised VMs can be used to launch attacks on other networked resources, both inside and outside Azure. Periodic NSG evaluations help minimize attack surfaces and ensure that only explicitly required access is permitted.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -306,7 +323,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Regularly review Network Security Groups (NSGs) for misconfigured or overly permissive rules, particularly those exposing SSH (port 22) or other critical services to the internet. Any unnecessary exposure should be restricted to reduce security risks.",
|
||||
"AdditionalInformation": "Exposing SSH over the internet increases the risk of brute-force attacks, allowing attackers to gain unauthorized access to Azure Virtual Machines. Once compromised, a VM can be used as a pivot point to attack other machines within the Azure Virtual Network or even external systems. Periodic NSG evaluations help minimize attack surfaces and ensure that only explicitly required access is permitted.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -323,7 +341,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Regularly review Network Security Groups (NSGs) for misconfigured or overly permissive UDP rules, especially those exposing UDP-based services to the internet. Any unnecessary exposure should be restricted to prevent security risks.",
|
||||
"AdditionalInformation": "Exposing UDP services to the internet increases the risk of Distributed Denial-of-Service (DDoS) amplification attacks, where attackers can reflect and amplify spoofed traffic from Azure Virtual Machines. Commonly exploited UDP services include DNS, NTP, SSDP, SNMP, and CLDAP, which can be used to disrupt services or launch attacks against other networked systems inside and outside Azure. Regular NSG evaluations help minimize attack surfaces and prevent VMs from being leveraged in DDoS attacks.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -340,7 +359,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Regularly review Network Security Groups (NSGs) for misconfigured or overly permissive HTTP(S) rules, ensuring that exposure to the internet is necessary and narrowly configured. Restrict access where it is not explicitly required.",
|
||||
"AdditionalInformation": "Exposing HTTP(S) services to the internet increases the risk of brute-force attacks, credential stuffing, and exploitation of vulnerabilities in web applications or services. If compromised, an attacker can use the affected resource as a pivot point to escalate privileges, move laterally, and compromise other Azure resources within the tenant. Periodic NSG evaluations help reduce attack surfaces and enforce least privilege access.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -357,7 +377,8 @@
|
||||
"SubSection": "2.1 Network",
|
||||
"AttributeDescription": "Enable Azure Bastion to securely access Azure Virtual Machines (VMs) over the internet without exposing RDP (3389/TCP) or SSH (22/TCP) ports directly to the public network. Azure Bastion provides remote access through TLS (443/TCP) while integrating with Azure Active Directory (AAD) security policies.",
|
||||
"AdditionalInformation": "Using Azure Bastion eliminates the need to assign public IP addresses to VMs, reducing exposure to brute-force attacks and unauthorized access. It enhances security by enabling browser-based RDP/SSH access, leveraging Azure Active Directory controls, and supporting Multi-Factor Authentication (MFA), Conditional Access Policies, and other hardening measures for a centralized and secure access solution.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -374,7 +395,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Use Microsoft Entra authentication for SQL Database to centralize identity and credential management, enhancing security and simplifying access control. By leveraging Microsoft Entra ID, organizations can manage database users, permissions, and authentication policies in a single location, reducing complexity and improving security.",
|
||||
"AdditionalInformation": "Microsoft Entra authentication eliminates the need for separate SQL authentication, preventing identity sprawl and simplifying password management. It enables centralized permission management, supports token-based authentication, integrates with Active Directory Federation Services (ADFS), and enhances security with Multi-Factor Authentication (MFA). Using Entra ID authentication also eliminates the need to store passwords, enabling secure, modern authentication methods while ensuring seamless access control for users and applications.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -391,7 +413,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Disable access from Azure services to PostgreSQL flexible servers to restrict inbound connections and enhance security.",
|
||||
"AdditionalInformation": "Allowing access from all Azure services bypasses firewall restrictions and permits connections from any Azure resource, including those outside your subscription. This can expose the database to unauthorized access and security risks. Instead, configure firewall rules or Virtual Network (VNet) rules to allow only specific, trusted network ranges, ensuring tighter access control.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -408,7 +431,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Restrict Cosmos DB access to whitelisted networks to minimize exposure and reduce potential attack vectors.",
|
||||
"AdditionalInformation": "Limiting Cosmos DB communication to specific trusted networks prevents unauthorized access from unapproved sources, including the public internet. This enhances security by reducing the attack surface and ensuring only designated networks can interact with the database.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -425,7 +449,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Use private endpoints for Cosmos DB to restrict network traffic to approved sources, ensuring secure and private communication.",
|
||||
"AdditionalInformation": "For sensitive data, private endpoints provide granular control over which services can access Cosmos DB, preventing exposure to unauthorized networks. This setup ensures that all traffic remains within a private network, reducing security risks and enhancing data protection.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -442,7 +467,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Use Microsoft Entra ID for Cosmos DB client authentication, leveraging Azure RBAC for enhanced security, centralized management, and MFA support.",
|
||||
"AdditionalInformation": "Entra ID authentication is significantly more secure than token-based authentication, as tokens must be stored persistently on the client, increasing the risk of compromise. Entra ID eliminates this risk by securely handling credentials, integrating seamlessly with Azure RBAC, and supporting multi-factor authentication (MFA) for stronger access control.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -459,7 +485,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Disable public network access for Azure Storage accounts, ensuring that access settings for individual containers cannot override this restriction. This applies to Azure Resource Manager deployment model storage accounts, as classic deployment model accounts will be retired by August 31, 2024.",
|
||||
"AdditionalInformation": "By default, Azure Storage accounts allow users with appropriate permissions to enable public access to containers and blobs, granting read-only access without requiring authentication. To minimize the risk of unauthorized data exposure, public access should be restricted unless explicitly required. Instead, use Azure AD RBAC or shared access signatures (SAS) to grant controlled and time-limited access to storage resources.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -476,7 +503,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Restricting default network access enhances security by preventing unrestricted connections to Azure Storage accounts. By default, storage accounts accept connections from any network, so access should be limited to selected networks by modifying the default configuration.",
|
||||
"AdditionalInformation": "Storage accounts should deny access from all networks by default, except for explicitly allowed Azure Virtual Networks or specific IP address ranges. This approach creates a secure network boundary, ensuring only authorized applications can connect. Even when network rules allow access, proper authentication (via access keys or SAS tokens) is still required, adding an extra layer of security.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -493,7 +521,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "This recommendation assumes that public network access is set to “Enabled from selected virtual networks and IP addresses” and that the default network access rule for storage accounts is set to deny. Some Azure services require access to storage accounts from networks that cannot be explicitly granted permissions through network rules. To allow these services to function properly, enable the trusted Azure services exception, which allows selected Azure services to bypass network restrictions while still enforcing strong authentication.",
|
||||
"AdditionalInformation": "Enabling firewall rules on a storage account blocks access to all incoming requests, including those from other Azure services. Allowing access to trusted Azure services ensures that essential Azure functions, such as Azure Backup, Event Grid, Monitor, and Site Recovery, can interact with storage accounts securely without exposing them to broader public access.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -510,7 +539,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Use private endpoints for Azure Storage accounts to enable secure, encrypted access over a Private Link. Private endpoints assign an IP address from the Virtual Network (VNet) for each service, ensuring that network traffic remains isolated and encrypted within the VNet. This configuration helps segment network traffic, preventing external access while allowing secure communication between services. Additionally, VNets can extend addressing spaces or act as secure tunnels over public networks, connecting remote infrastructures securely.",
|
||||
"AdditionalInformation": "Encrypting traffic between services protects sensitive data from interception and unauthorized access. Using private endpoints ensures that data remains within a controlled network environment, reducing exposure to external threats and enhancing overall security posture.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -527,7 +557,8 @@
|
||||
"SubSection": "2.2 Storage",
|
||||
"AttributeDescription": "Ensure that Azure SQL Databases do not allow ingress from 0.0.0.0/0 (ANY IP), as this would expose them to the public internet. Azure SQL Server includes a firewall that blocks unauthorized connections by default, but it allows defining more granular IP address rules to restrict access. Allowing 0.0.0.0/0 effectively grants open access, increasing security risks.",
|
||||
"AdditionalInformation": "Leaving Azure SQL firewall rules open to ANY IP significantly increases the attack surface, making databases vulnerable to brute-force attacks and unauthorized access. Instead, firewall rules should be restricted to specific, trusted IP ranges from known datacenters or internal resources. Additionally, enabling Allow Azure services and resources to access this server could allow access from outside your organization’s subscription or region, potentially bypassing SQL Server’s network ACLs and exposing the database to malicious activity.",
|
||||
"LevelOfRisk": 5
|
||||
"LevelOfRisk": 5,
|
||||
"Weight": 1000
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -544,7 +575,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Enable Azure App Service Authentication to restrict anonymous HTTP requests and enforce authentication using identity providers before requests reach the application.",
|
||||
"AdditionalInformation": "Enabling App Service Authentication ensures that all incoming HTTP requests are validated and authenticated before being processed by the application. It integrates with Microsoft Entra ID, Google, Facebook, Microsoft Accounts, and Twitter to manage authentication, token validation, and session handling. Additionally, disabling HTTP Basic Authentication helps mitigate risks from legacy authentication methods, strengthening application security.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -561,7 +593,8 @@
|
||||
"SubSection": "2.3 Application",
|
||||
"AttributeDescription": "Ensure that FTP access is disabled for Azure App Services, or if required, enforce FTPS (FTP over SSL/TLS) for secure authentication and data transmission.",
|
||||
"AdditionalInformation": "FTP transmits data, including credentials, in plaintext, making it vulnerable to interception, credential theft, and data exfiltration. Requiring FTPS ensures that all FTP connections are encrypted, reducing the risk of unauthorized access, persistence, and lateral movement within the environment. If FTP is not needed, disabling it entirely enhances security by eliminating an unnecessary attack vector.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -578,7 +611,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Enable log_checkpoints on PostgreSQL flexible servers to log checkpoint activity, generating query and error logs that aid in monitoring and troubleshooting database performance.",
|
||||
"AdditionalInformation": "Logging checkpoints helps track database activity, errors, and performance issues, providing valuable insights for troubleshooting and optimization. While transaction logs remain inaccessible, query and error logs allow identification and resolution of configuration errors and inefficiencies, improving database reliability and performance.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -595,7 +629,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Enable connection throttling on PostgreSQL flexible servers to regulate concurrent connections and generate logs for monitoring and troubleshooting.",
|
||||
"AdditionalInformation": "Connection throttling helps prevent Denial of Service (DoS) attacks by limiting excessive connections that could exhaust resources. It also protects against system failures or degraded performance caused by high user loads. Query and error logs provide insights into connection issues, enabling faster troubleshooting and performance optimization.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -612,7 +647,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Enable log_connections on PostgreSQL Single Servers to record connection attempts and authentication events for security monitoring and auditing. (Note: This recommendation applies only to Single Server, as Azure PostgreSQL Single Server is planned for retirement.)",
|
||||
"AdditionalInformation": "Logging connection attempts helps detect unauthorized access, failed authentication attempts, and potential security threats. These logs provide valuable insights for troubleshooting, auditing, and identifying suspicious activities, enhancing overall database security and monitoring.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -629,7 +665,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Enable log_disconnections on PostgreSQL Servers to record session termination events, including session duration. (Note: This recommendation applies only to Single Server, as Azure PostgreSQL Single Server is planned for retirement.)",
|
||||
"AdditionalInformation": "Logging disconnections provides visibility into session activity and duration, helping to analyze user behavior, detect anomalies, and troubleshoot performance issues. These logs contribute to audit trails, security monitoring, and database performance optimization.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -646,7 +683,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Enable audit_log_enabled on MySQL flexible servers to capture logs for connection attempts, DDL/DML activity, and other database events for security and monitoring purposes.",
|
||||
"AdditionalInformation": "Logging database activity helps detect unauthorized access, troubleshoot issues, and analyze performance bottlenecks. Enabling audit logs enhances security visibility, compliance monitoring, and incident response by providing valuable insights into database operations.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -663,7 +701,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Configure audit_log_events on MySQL flexible servers to include CONNECTION events, ensuring that successful and failed connection attempts are logged.",
|
||||
"AdditionalInformation": "Logging CONNECTION events provides visibility into authentication attempts, helping to detect unauthorized access, troubleshoot issues, and optimize database performance. These logs are essential for security monitoring, compliance, and identifying potential misconfigurations or attack attempts.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -680,7 +719,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Enable AuditEvent logging for Azure Key Vault to track and log interactions with keys, certificates, and other sensitive information.",
|
||||
"AdditionalInformation": "Logging Key Vault access events provides an audit trail that helps monitor who accessed the vault, when, and how. This enhances security, compliance, and incident response by ensuring logs are stored in a designated Azure Storage account or Log Analytics workspace, allowing centralized monitoring across multiple Key Vaults.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -697,7 +737,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Ensure that network flow logs are collected and sent to a central Log Analytics workspace for monitoring and analysis.",
|
||||
"AdditionalInformation": "Capturing network flow logs provides visibility into traffic patterns across your network, helping detect anomalies, potential lateral movement, and security threats. These logs integrate with Azure Monitor and Azure Sentinel, enabling advanced analytics and visualization for improved network security and incident response.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -714,7 +755,8 @@
|
||||
"SubSection": "3.1 Logging",
|
||||
"AttributeDescription": "Enable AppServiceHTTPLogs for Azure App Service instances to capture and centrally log all HTTP requests.",
|
||||
"AdditionalInformation": "Logging web requests provides critical data for security monitoring and incident response. Captured logs can be ingested into a SIEM or central log aggregation system, helping security analysts detect anomalies, investigate threats, and enhance application security.",
|
||||
"LevelOfRisk": 1
|
||||
"LevelOfRisk": 1,
|
||||
"Weight": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -731,7 +773,8 @@
|
||||
"SubSection": "3.2 Retention",
|
||||
"AttributeDescription": "Configure SQL Server Audit Retention to retain logs for more than 90 days to ensure long-term visibility into database activity and security events.",
|
||||
"AdditionalInformation": "Maintaining audit logs for over 90 days helps detect anomalies, security breaches, and unauthorized access. Longer retention periods allow organizations to analyze historical data, support compliance requirements, and strengthen forensic investigations.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -748,7 +791,8 @@
|
||||
"SubSection": "3.2 Retention",
|
||||
"AttributeDescription": "Enable Network Security Group (NSG) Flow Logs and configure the retention period to at least 90 days to capture and store IP traffic data for security monitoring and analysis.",
|
||||
"AdditionalInformation": "NSG Flow Logs provide visibility into network traffic, helping detect anomalies, unauthorized access, and potential security breaches. Retaining logs for at least 90 days ensures that historical data is available for incident investigation, compliance, and forensic analysis, strengthening overall network security monitoring.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -765,7 +809,8 @@
|
||||
"SubSection": "3.2 Retention",
|
||||
"AttributeDescription": "Ensure that logfiles.retention_days on PostgreSQL flexible servers is set to an appropriate value to maintain access to historical logs for monitoring and troubleshooting.",
|
||||
"AdditionalInformation": "Setting an appropriate log retention period ensures that query and error logs are available for diagnosing configuration issues, troubleshooting errors, and optimizing performance. Retaining logs for a sufficient duration supports security analysis, compliance requirements, and operational debugging while preventing unnecessary storage consumption.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -782,7 +827,8 @@
|
||||
"SubSection": "3.2 Retention",
|
||||
"AttributeDescription": "Enable Diagnostic Settings to export activity logs for all appropriate resources within a subscription, ensuring long-term visibility into security and operational events.",
|
||||
"AdditionalInformation": "By default, logs are retained for only 90 days. Configuring diagnostic settings allows logs to be exported and stored for extended periods, enabling security analysis, compliance monitoring, and forensic investigations within an Azure subscription.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -799,7 +845,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Enable auditing on Azure SQL Servers to track and log database events for security and compliance purposes.",
|
||||
"AdditionalInformation": "Auditing at the server level ensures that all existing and newly created databases inherit auditing policies, providing consistent monitoring across the SQL environment. It does not override database-level auditing policies but complements them. Audit logs are stored in Azure Storage, helping organizations maintain regulatory compliance, monitor database activity, and detect anomalies that may indicate security threats or operational concerns.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -816,7 +863,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Create an Activity Log Alert for the Delete SQL Server Firewall Rule event to monitor and track firewall rule removals in SQL Server.",
|
||||
"AdditionalInformation": "Monitoring firewall rule deletions helps detect unauthorized or accidental changes that could expose the database to security risks. Timely alerts ensure quick detection and response to prevent unauthorized network access and maintain a secure SQL environment.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -833,7 +881,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Create an Activity Log Alert for the Create or Update Public IP Address event to monitor changes in public IP configurations.",
|
||||
"AdditionalInformation": "Tracking public IP address modifications provides visibility into network access changes, helping detect unauthorized or misconfigured public exposure. Timely alerts enable faster detection and response to potential security risks, ensuring a controlled and secure network environment.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -850,7 +899,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Create an Activity Log Alert for the Delete Public IP Address event to monitor and track the removal of public IP addresses.",
|
||||
"AdditionalInformation": "Monitoring public IP deletions helps detect unauthorized or accidental changes that could impact network accessibility or security. Timely alerts enable quick investigation and response, ensuring that critical network configurations remain intact and secure.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -867,7 +917,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Application Insights in Azure serves as an Application Performance Monitoring (APM) solution, providing valuable insights into application performance, telemetry data, and trace logs. It helps organizations monitor application health and troubleshoot incidents efficiently.",
|
||||
"AdditionalInformation": "Enabling Application Insights enhances security monitoring and performance optimization by collecting metrics, telemetry, and trace logs. These logs aid in proactive performance tuning to reduce costs and support incident response by helping identify the root cause of potential security or operational issues. Integrating Application Insights into a broader logging and monitoring strategy strengthens an organization’s overall observability and security posture.",
|
||||
"LevelOfRisk": 1
|
||||
"LevelOfRisk": 1,
|
||||
"Weight": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -884,7 +935,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Enable Network Watcher for all Azure regions within your subscription to monitor and diagnose network activity.",
|
||||
"AdditionalInformation": "Network Watcher provides network diagnostic and visualization tools that help organizations analyze traffic, troubleshoot connectivity issues, and detect anomalies. Enabling it enhances network visibility, security monitoring, and operational insights across Azure environments.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -901,7 +953,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Ensure endpoint protection is installed on all Azure Virtual Machines (VMs) to provide real-time security monitoring and threat detection.",
|
||||
"AdditionalInformation": "Endpoint protection helps detect and remove malware, viruses, and other security threats, protecting VMs from compromise and unauthorized access. It also provides configurable alerts for suspicious activity, enhancing security monitoring and incident response across Azure environments.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -918,7 +971,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Ensure that all Windows and Linux Virtual Machines (VMs) have the latest OS patches and security updates applied to mitigate vulnerabilities and improve system stability.",
|
||||
"AdditionalInformation": "Keeping VMs updated helps address security vulnerabilities, bug fixes, and stability improvements. Microsoft Defender for Cloud continuously checks for missing critical and security updates using Windows Update, WSUS, or Linux package managers. Applying recommended updates reduces the risk of exploits, malware, and performance issues, ensuring a secure and resilient cloud environment.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -935,7 +989,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Ensure that all Microsoft Cloud Security Benchmark (MCSB) policies are enabled to effectively evaluate resource configurations against best practice security recommendations.",
|
||||
"AdditionalInformation": "The MCSB Policy Initiative enforces security best practices and helps ensure compliance with organizational and regulatory requirements. If a policy’s Effect is set to Disabled, it will not be evaluated, potentially preventing administrators from detecting security misconfigurations. Keeping all MCSB policies enabled allows Microsoft Defender for Cloud to assess relevant resources and provide actionable security insights to strengthen cloud security.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -952,7 +1007,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Ensure that security alert emails are enabled for subscription owners to receive notifications about potential security threats and vulnerabilities.",
|
||||
"AdditionalInformation": "Enabling security alert emails ensures that subscription owners are promptly informed of security incidents, threats, or misconfigurations detected by Microsoft Defender for Cloud. Timely notifications allow administrators to take immediate action, mitigate risks, and maintain a strong security posture within the Azure environment.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -969,7 +1025,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Configure Microsoft Defender for Cloud to send high-severity security alerts to a designated security contact email in addition to the subscription owner.",
|
||||
"AdditionalInformation": "By default, Microsoft Defender for Cloud notifies only subscription owners of critical security alerts. Adding a security contact email ensures that the security team is promptly informed of potential threats, allowing for faster response and risk mitigation to maintain a secure Azure environment.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -986,7 +1043,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Ensure that Diagnostic Settings are configured to log relevant control and management plane activities for enhanced visibility and security monitoring. (Note: A Diagnostic Setting must exist for this configuration to be available.)",
|
||||
"AdditionalInformation": "Diagnostic settings determine how logs are exported and stored. Capturing logs from the control and management plane enables proper alerting, monitoring, and analysis of administrative actions, helping to detect unauthorized changes and security threats.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1003,7 +1061,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Enable security alert emails to notify the subscription owner or designated security contacts about potential security threats.",
|
||||
"AdditionalInformation": "Ensuring security alerts are sent to the appropriate personnel allows for timely detection and response to security incidents. This helps mitigate risks by ensuring that critical threats and vulnerabilities are addressed promptly, improving the overall security posture of the Azure environment.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1020,7 +1079,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Enable Microsoft Defender for DNS to monitor and scan outgoing DNS traffic within a subscription for potential security threats. (Note: As of August 1, 2023, new subscribers receive DNS threat alerts as part of Defender for Servers P2.)",
|
||||
"AdditionalInformation": "Defender for DNS analyzes DNS queries against a dynamic threat intelligence list to detect potential malicious activity, compromised services, or security breaches. Monitoring DNS lookups helps identify and prevent threats such as data exfiltration, command and control (C2) communications, and phishing attacks, improving overall network security.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1037,7 +1097,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Create an activity log alert for the Create Policy Assignment event to track changes in Azure Policy assignments.",
|
||||
"AdditionalInformation": "Monitoring policy assignment events helps detect unauthorized or unintended changes in Azure policies, providing greater visibility and reducing the time required to identify and respond to policy modifications.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1054,7 +1115,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Create an activity log alert for the Delete Policy Assignment event to monitor and track policy removal activities in Azure Policy assignments.",
|
||||
"AdditionalInformation": "Monitoring policy deletions helps detect unauthorized or unintended changes, ensuring policy integrity and reducing the time required to identify and respond to security or compliance violations.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1071,7 +1133,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Create an Activity Log Alert for the Create or Update Network Security Group (NSG) event to track changes in network security configurations.",
|
||||
"AdditionalInformation": "Monitoring NSG creation and updates provides visibility into network access changes, helping to detect unauthorized modifications and identify potential security risks in a timely manner.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1088,7 +1151,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Create an Activity Log Alert for the Delete Network Security Group (NSG) event to monitor and track network security group removals.",
|
||||
"AdditionalInformation": "Monitoring NSG deletions provides visibility into network access changes, helping to detect unauthorized modifications and identify potential security threats before they impact the environment.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1105,7 +1169,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Create an Activity Log Alert for the Create or Update Security Solution event to track modifications to security solutions in your environment.",
|
||||
"AdditionalInformation": "Monitoring security solution changes provides visibility into updates, additions, or modifications to active security configurations. This helps detect unauthorized changes or suspicious activity and ensures that security controls remain properly configured.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1122,7 +1187,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Create an Activity Log Alert for the Delete Security Solution event to monitor and track the removal of security solutions in your environment.",
|
||||
"AdditionalInformation": "Monitoring security solution deletions helps detect unauthorized removals or suspicious activity, ensuring that critical security controls are not accidentally or maliciously disabled. This improves security visibility and helps maintain a strong security posture.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1139,7 +1205,8 @@
|
||||
"SubSection": "3.3 Monitoring",
|
||||
"AttributeDescription": "Create an Activity Log Alert for the Create or Update SQL Server Firewall Rule event to track changes in SQL Server firewall configurations.",
|
||||
"AdditionalInformation": "Monitoring firewall rule modifications provides visibility into network access changes, helping detect unauthorized updates that could expose the database to security risks. Timely alerts can reduce the time needed to identify and respond to suspicious activity, ensuring a secure SQL environment.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1156,7 +1223,8 @@
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "Enable require_secure_transport on PostgreSQL flexible servers to enforce SSL connections between the database server and client applications, ensuring encrypted communication.",
|
||||
"AdditionalInformation": "Requiring SSL encryption helps protect data in transit from man-in-the-middle attacks by securing the connection between the database server and client applications. Enforcing secure transport prevents unauthorized access and strengthens data integrity and confidentiality.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1173,7 +1241,8 @@
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "Enable require_secure_transport on MySQL flexible servers to enforce SSL encryption between the database server and client applications, ensuring secure communication.",
|
||||
"AdditionalInformation": "Requiring SSL connectivity protects data in transit from man-in-the-middle attacks by encrypting communication between client applications and the database server. Enforcing secure transport helps prevent unauthorized access and data interception, strengthening the overall security posture of the database.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1190,7 +1259,8 @@
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "Ensure that the minimum TLS version for Azure Storage is set to TLS 1.2 or higher to mitigate security vulnerabilities associated with older TLS versions.",
|
||||
"AdditionalInformation": "TLS 1.0 is outdated and has known security vulnerabilities that can expose data in transit to attacks and interception. Upgrading to TLS 1.2 or later enhances encryption security, ensuring secure communication between clients and Azure Storage while reducing the risk of data compromise.",
|
||||
"LevelOfRisk": 2
|
||||
"LevelOfRisk": 2,
|
||||
"Weight": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1207,7 +1277,8 @@
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "Configure Azure App Service to enforce HTTPS-only traffic, ensuring that all HTTP requests are automatically redirected to HTTPS for secure communication.",
|
||||
"AdditionalInformation": "Enforcing HTTPS protects data in transit by using TLS/SSL encryption, preventing man-in-the-middle attacks and ensuring secure authentication. Redirecting non-secure HTTP traffic to HTTPS enhances the confidentiality and integrity of application communications, reducing exposure to security vulnerabilities.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1224,7 +1295,8 @@
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "Ensure that Azure App Service is configured to use TLS 1.2 or higher to secure data transmission and align with industry security standards.",
|
||||
"AdditionalInformation": "TLS 1.0 and 1.1 are outdated protocols with known vulnerabilities that expose web applications to security threats, including data interception and man-in-the-middle attacks. TLS 1.2 is the recommended encryption standard by industry frameworks such as PCI DSS, providing stronger encryption and improved security for web app connections.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1241,7 +1313,8 @@
|
||||
"SubSection": "4.1 In-Transit",
|
||||
"AttributeDescription": "Ensure that Azure App Service is configured to use the latest supported HTTP version to benefit from security improvements, performance enhancements, and new functionalities.",
|
||||
"AdditionalInformation": "Newer HTTP versions provide security enhancements, performance optimizations, and better resource management. HTTP/2, for example, improves efficiency by addressing issues such as head-of-line blocking, header compression, and request prioritization. Keeping apps updated to the latest HTTP version ensures they leverage modern security protocols and enhanced data transmission capabilities while maintaining compatibility and performance standards.",
|
||||
"LevelOfRisk": 1
|
||||
"LevelOfRisk": 1,
|
||||
"Weight": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1258,7 +1331,8 @@
|
||||
"SubSection": "4.2 At-Rest",
|
||||
"AttributeDescription": "Enable encryption at rest using Customer Managed Keys (CMK) instead of Microsoft Managed Keys to maintain greater control over sensitive data encryption in Azure Storage accounts.",
|
||||
"AdditionalInformation": "By default, Azure encrypts all storage resources (blobs, disks, files, queues, and tables) using Microsoft Managed Keys. However, using Customer Managed Keys (CMK) allows organizations to control, manage, and rotate their encryption keys through Azure Key Vault, ensuring compliance with security policies. CMKs also support automatic key version updates for enhanced security. Since this recommendation applies specifically to storage accounts holding critical data, its assessment remains manual rather than automated.",
|
||||
"LevelOfRisk": 4
|
||||
"LevelOfRisk": 4,
|
||||
"Weight": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1275,7 +1349,8 @@
|
||||
"SubSection": "4.2 At-Rest",
|
||||
"AttributeDescription": "Enable Transparent Data Encryption (TDE) with Customer-Managed Keys (CMK) to enhance control, security, and separation of duties over the TDE Protector. TDE encrypts data at rest using a database encryption key (DEK). Traditionally, DEKs were protected by Azure SQL Service-managed certificates, but with Customer-Managed Key support, the DEK can now be secured using an asymmetric key stored in Azure Key Vault. Azure Key Vault provides centralized key management, FIPS 140-2 Level 2 validated HSMs, and additional security through key and data separation. Based on business needs and data sensitivity, organizations should use Customer-Managed Keys for TDE encryption.",
|
||||
"AdditionalInformation": "Using Customer-Managed Keys for TDE gives organizations full control over encryption keys, including who can access them and when. Azure Key Vault serves as a secure external key management system, ensuring that TDE encryption keys remain protected from unauthorized access. When configured, the asymmetric key is set at the server level and inherited by all databases under that server, providing consistent encryption management across the environment.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1292,7 +1367,8 @@
|
||||
"SubSection": "4.2 At-Rest",
|
||||
"AttributeDescription": "Enable Transparent Data Encryption (TDE) on all Azure SQL Servers to ensure real-time encryption and decryption of databases, backups, and transaction logs at rest. TDE operates seamlessly without requiring modifications to applications.",
|
||||
"AdditionalInformation": "TDE helps protect Azure SQL Databases from unauthorized access and malicious activity by encrypting data at rest. This ensures that database files, backups, and logs remain secure, reducing the risk of data breaches while maintaining operational transparency.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1309,7 +1385,8 @@
|
||||
"SubSection": "4.2 At-Rest",
|
||||
"AttributeDescription": "Configure storage accounts used for activity log exports to use Customer Managed Keys (CMK) for enhanced security and access control.",
|
||||
"AdditionalInformation": "Using CMKs for log storage provides additional confidentiality controls, ensuring that only users with read access to the storage account and explicit decrypt permissions can access the logs. This enhances data security by restricting unauthorized access and strengthening compliance with encryption and access management policies.",
|
||||
"LevelOfRisk": 3
|
||||
"LevelOfRisk": 3,
|
||||
"Weight": 10
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Use corporate login credentials instead of personal accounts, such as Gmail accounts.",
|
||||
@@ -29,7 +29,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Setup multi-factor authentication for Google Cloud Platform accounts.",
|
||||
@@ -48,7 +48,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Setup Security Key Enforcement for Google Cloud Platform admin accounts.",
|
||||
@@ -69,7 +69,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. API keys are always at risk because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to restrict API keys to use (call) only APIs required by an application.",
|
||||
@@ -90,7 +90,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. If they are in use it is recommended to rotate API keys every 90 days.",
|
||||
@@ -111,7 +111,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. Unused keys with their permissions in tact may still exist within a project. Keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to use standard authentication flow instead.",
|
||||
@@ -132,7 +132,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that Essential Contacts is configured to designate email addresses for Google Cloud services to notify of important technical or security information.",
|
||||
@@ -153,7 +153,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Google Cloud Key Management Service stores cryptographic keys in a hierarchical structure designed for useful and elegant access control management. The format for the rotation schedule depends on the client library that is used. For the gcloud command-line tool, the next rotation time must be in `ISO` or `RFC3339` format, and the rotation period must be in the form `INTEGERUNIT`, where units can be one of seconds (s), minutes (m), hours (h) or days (d).",
|
||||
@@ -174,7 +174,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that the IAM policy on Cloud KMS `cryptokeys` should restrict anonymous and/or public access.",
|
||||
@@ -195,7 +195,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "When you use Dataproc, cluster and job data is stored on Persistent Disks (PDs) associated with the Compute Engine VMs in your cluster and in a Cloud Storage staging bucket. This PD and bucket data is encrypted using a Google-generated data encryption key (DEK) and key encryption key (KEK). The CMEK feature allows you to create, use, and revoke the key encryption key (KEK). Google still controls the data encryption key (DEK).",
|
||||
@@ -216,7 +216,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to assign the `Service Account User (iam.serviceAccountUser)` and `Service Account Token Creator (iam.serviceAccountTokenCreator)` roles to a user for a specific service account rather than assigning the role to a user at project level.",
|
||||
@@ -237,7 +237,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that the principle of 'Separation of Duties' is enforced while assigning KMS related roles to users.",
|
||||
@@ -256,7 +256,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. In this case, unrestricted keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to restrict API key usage to trusted hosts, HTTP referrers and apps. It is recommended to use the more secure standard authentication flow instead.",
|
||||
@@ -277,7 +277,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that the principle of 'Separation of Duties' is enforced while assigning service-account related roles to users.",
|
||||
@@ -298,7 +298,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "A service account is a special Google account that belongs to an application or a VM, instead of to an individual end-user. The application uses the service account to call the service's Google API so that users aren't directly involved. It's recommended not to use admin access for ServiceAccount.",
|
||||
@@ -319,7 +319,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "User managed service accounts should not have user-managed keys.",
|
||||
@@ -340,7 +340,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Service Account keys consist of a key ID (Private_key_Id) and Private key, which are used to sign programmatic requests users make to Google cloud services accessible to that particular service account. It is recommended that all Service Account keys are regularly rotated.",
|
||||
@@ -359,7 +359,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Google Cloud Functions allow you to host serverless code that is executed when an event is triggered, without the requiring the management a host operating system. These functions can also store environment variables to be used by the code that may contain authentication or other information that needs to remain confidential.",
|
||||
@@ -380,7 +380,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "GCP Access Approval enables you to require your organizations' explicit approval whenever Google support try to access your projects. You can then select users within your organization who can approve these requests through giving them a security role in IAM. All access requests display which Google Employee requested them in an email or Pub/Sub message that you can choose to Approve. This adds an additional control and logging of who in your organization approved/denied these requests.",
|
||||
@@ -401,7 +401,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "GCP Cloud Asset Inventory is services that provides a historical view of GCP resources and IAM policies through a time-series database. The information recorded includes metadata on Google Cloud resources, metadata on policies set on Google Cloud projects or resources, and runtime information gathered within a Google Cloud resource.",
|
||||
@@ -422,7 +422,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "In order to prevent unnecessary project ownership assignments to users/service-accounts and further misuses of projects and resources, all `roles/Owner` assignments should be monitored. Members (users/Service-Accounts) with a role assignment to primitive role `roles/Owner` are project owners. The project owner has all the privileges on the project the role belongs to. These are summarized below: - All viewer permissions on all GCP Services within the project - Permissions for actions that modify the state of all GCP services within the project - Manage roles and permissions for a project and all resources within the project - Set up billing for a project Granting the owner role to a member (user/Service-Account) will allow that member to modify the Identity and Access Management (IAM) policy. Therefore, grant the owner role only if the member has a legitimate purpose to manage the IAM policy. This is because the project IAM policy contains sensitive access control data. Having a minimal set of users allowed to manage IAM policy will simplify any auditing that may be necessary.",
|
||||
@@ -443,7 +443,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Logging enabled on a HTTPS Load Balancer will show all network traffic and its destination.",
|
||||
@@ -464,7 +464,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that Cloud Audit Logging is configured to track all admin activities and read, write access to user data.",
|
||||
@@ -485,7 +485,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Cloud DNS logging records the queries from the name servers within your VPC to Stackdriver. Logged queries can come from Compute Engine VMs, GKE containers, or other GCP resources provisioned within the VPC.",
|
||||
@@ -506,7 +506,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling retention policies on log buckets will protect logs stored in cloud storage buckets from being overwritten or accidentally deleted. It is recommended to set up retention policies and configure Bucket Lock on all storage buckets that are used as log sinks.",
|
||||
@@ -527,7 +527,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to create a sink that will export copies of all the log entries. This can help aggregate logs from multiple projects and export them to a Security Information and Event Management (SIEM).",
|
||||
@@ -548,7 +548,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Google Cloud Platform (GCP) services write audit log entries to the Admin Activity and Data Access logs to help answer the questions of, \"who did what, where, and when?\" within GCP projects. Cloud audit logging records information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by GCP services. Cloud audit logging provides a history of GCP API calls for an account, including API calls made via the console, SDKs, command-line tools, and other GCP services.",
|
||||
@@ -569,7 +569,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for Cloud Storage Bucket IAM changes.",
|
||||
@@ -590,7 +590,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for changes to Identity and Access Management (IAM) role creation, deletion and updating activities.",
|
||||
@@ -611,7 +611,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for SQL instance configuration changes.",
|
||||
@@ -632,7 +632,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) network changes.",
|
||||
@@ -651,7 +651,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "GCP Access Transparency provides audit logs for all actions that Google personnel take in your Google Cloud resources.",
|
||||
@@ -672,7 +672,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) Network Firewall rule changes.",
|
||||
@@ -693,7 +693,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) network route changes.",
|
||||
@@ -714,7 +714,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "In order to prevent use of legacy networks, a project should not have a legacy network configured. As of now, Legacy Networks are gradually being phased out, and you can no longer create projects with them. This recommendation is to check older projects to ensure that they are not using Legacy Networks.",
|
||||
@@ -735,7 +735,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Cloud Domain Name System (DNS) is a fast, reliable and cost-effective domain name system that powers millions of domains on the internet. Domain Name System Security Extensions (DNSSEC) in Cloud DNS enables domain owners to take easy steps to protect their domains against DNS hijacking and man-in-the-middle and other attacks.",
|
||||
@@ -756,7 +756,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow users to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances. Firewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the Internet to a VPC or VM instance using `RDP` on `Port 3389` can be avoided.",
|
||||
@@ -777,7 +777,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "NOTE: Currently, the SHA1 algorithm has been removed from general use by Google, and, if being used, needs to be whitelisted on a project basis by Google and will also, therefore, require a Google Cloud support contract. DNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms. The algorithm used for key signing should be a recommended one and it should be strong.",
|
||||
@@ -798,7 +798,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "NOTE: Currently, the SHA1 algorithm has been removed from general use by Google, and, if being used, needs to be whitelisted on a project basis by Google and will also, therefore, require a Google Cloud support contract. DNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms. The algorithm used for key signing should be a recommended one and it should be strong.",
|
||||
@@ -819,7 +819,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow the user to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances. Firewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, only an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the internet to VPC or VM instance using `SSH` on `Port 22` can be avoided.",
|
||||
@@ -840,7 +840,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "To prevent use of `default` network, a project should not have a `default` network.",
|
||||
@@ -861,7 +861,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Flow Logs is a feature that enables users to capture information about the IP traffic going to and from network interfaces in the organization's VPC Subnets. Once a flow log is created, the user can view and retrieve its data in Stackdriver Logging. It is recommended that Flow Logs be enabled for every business-critical VPC subnet.",
|
||||
@@ -880,7 +880,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Secure Sockets Layer (SSL) policies determine what port Transport Layer Security (TLS) features clients are permitted to use when connecting to load balancers. To prevent usage of insecure features, SSL policies should use (a) at least TLS 1.2 with the MODERN profile; or (b) the RESTRICTED profile, because it effectively requires clients to use TLS 1.2 regardless of the chosen minimum TLS version; or (3) a CUSTOM profile that does not support any of the following features: ``` TLS_RSA_WITH_AES_128_GCM_SHA256 TLS_RSA_WITH_AES_256_GCM_SHA384 TLS_RSA_WITH_AES_128_CBC_SHA TLS_RSA_WITH_AES_256_CBC_SHA TLS_RSA_WITH_3DES_EDE_CBC_SHA ```",
|
||||
@@ -899,7 +899,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "IAP authenticates the user requests to your apps via a Google single sign in. You can then manage these users with permissions to control access. It is recommended to use both IAP permissions and firewalls to restrict this access to your apps with sensitive information.",
|
||||
@@ -920,7 +920,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Interacting with a serial port is often referred to as the serial console, which is similar to using a terminal window, in that input and output is entirely in text mode and there is no graphical interface or mouse support. If you enable the interactive serial console on an instance, clients can attempt to connect to that instance from any IP address. Therefore interactive serial console support should be disabled.",
|
||||
@@ -941,7 +941,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to use Instance specific SSH key(s) instead of using common/shared project-wide SSH key(s) to access Instances.",
|
||||
@@ -962,7 +962,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "To defend against advanced threats and ensure that the boot loader and firmware on your VMs are signed and untampered, it is recommended that Compute instances are launched with Shielded VM enabled.",
|
||||
@@ -983,7 +983,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling OS login binds SSH certificates to IAM users and facilitates effective SSH certificate management.",
|
||||
@@ -1004,7 +1004,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Compute instances should not be configured to have external IP addresses.",
|
||||
@@ -1025,7 +1025,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Google Cloud encrypts data at-rest and in-transit, but customer data must be decrypted for processing. Confidential Computing is a breakthrough technology which encrypts data in-use—while it is being processed. Confidential Computing environments keep data encrypted in memory and elsewhere outside the central processing unit (CPU). Confidential VMs leverage the Secure Encrypted Virtualization (SEV) feature of AMD EPYC™ CPUs. Customer data will stay encrypted while it is used, indexed, queried, or trained on. Encryption keys are generated in hardware, per VM, and not exportable. Thanks to built-in hardware optimizations of both performance and security, there is no significant performance penalty to Confidential Computing workloads.",
|
||||
@@ -1046,7 +1046,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to configure your instance to not use the default Compute Engine service account because it has the Editor role on the project.",
|
||||
@@ -1067,7 +1067,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "To support principle of least privileges and prevent potential privilege escalation it is recommended that instances are not assigned to default service account `Compute Engine default service account` with Scope `Allow full access to all Cloud APIs`.",
|
||||
@@ -1088,7 +1088,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Compute Engine instance cannot forward a packet unless the source IP address of the packet matches the IP address of the instance. Similarly, GCP won't deliver a packet whose destination IP address is different than the IP address of the instance receiving the packet. However, both capabilities are required if you want to use instances to help route packets. Forwarding of data packets should be disabled to prevent data loss or information disclosure.",
|
||||
@@ -1107,7 +1107,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "In order to maintain the highest level of security all connections to an application should be secure by default.",
|
||||
@@ -1128,7 +1128,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Customer-Supplied Encryption Keys (CSEK) are a feature in Google Cloud Storage and Google Compute Engine. If you supply your own encryption keys, Google uses your key to protect the Google-generated keys used to encrypt and decrypt your data. By default, Google Compute Engine encrypts all data at rest. Compute Engine handles and manages this encryption for you without any additional actions on your part. However, if you wanted to control and manage this encryption yourself, you can provide your own encryption keys.",
|
||||
@@ -1147,7 +1147,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Google Cloud Virtual Machines have the ability via an OS Config agent API to periodically (about every 10 minutes) report OS inventory data. A patch compliance API periodically reads this data, and cross references metadata to determine if the latest updates are installed. This is not the only Patch Management solution available to your organization and you should weigh your needs before committing to using this method.",
|
||||
@@ -1168,7 +1168,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Storage",
|
||||
"Section": "5 Storage",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that IAM policy on Cloud Storage bucket does not allows anonymous or public access.",
|
||||
@@ -1189,7 +1189,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Storage",
|
||||
"Section": "5 Storage",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that uniform bucket-level access is enabled on Cloud Storage buckets.",
|
||||
@@ -1210,7 +1210,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to have all SQL database instances set to enable automated backups.",
|
||||
@@ -1231,7 +1231,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to configure Second Generation Sql instance to use private IPs instead of public IPs.",
|
||||
@@ -1252,7 +1252,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Database Server should accept connections only from trusted Network(s)/IP(s) and restrict access from public IP addresses.",
|
||||
@@ -1273,7 +1273,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to enforce all incoming connections to SQL database instance to use SSL.",
|
||||
@@ -1292,8 +1292,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.1 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances. This recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.",
|
||||
@@ -1314,8 +1314,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.1 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`",
|
||||
@@ -1336,8 +1336,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.1 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.",
|
||||
@@ -1358,8 +1358,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are: - `TERSE` - `DEFAULT` - `VERBOSE` `TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information. `VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error. Ensure an appropriate value is set to 'DEFAULT' or stricter.",
|
||||
@@ -1380,8 +1380,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`. Each severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.",
|
||||
@@ -1402,8 +1402,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are: - `none` - `ddl` - `mod` - `all` The value `ddl` logs all data definition statements. The value `mod` logs all ddl statements, plus data-modifying statements. The statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included. A value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.",
|
||||
@@ -1424,8 +1424,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Instance addresses can be public IP or private IP. Public IP means that the instance is accessible through the public internet. In contrast, instances using only private IP are not accessible through the public internet, but are accessible through a Virtual Private Cloud (VPC). Limiting network access to your database will limit potential attacks.",
|
||||
@@ -1446,8 +1446,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.",
|
||||
@@ -1468,8 +1468,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.",
|
||||
@@ -1490,8 +1490,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.",
|
||||
@@ -1512,8 +1512,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.",
|
||||
@@ -1534,8 +1534,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`. Each severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.",
|
||||
@@ -1556,8 +1556,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.",
|
||||
@@ -1578,8 +1578,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`",
|
||||
@@ -1600,8 +1600,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.",
|
||||
@@ -1622,8 +1622,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.",
|
||||
@@ -1644,8 +1644,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.",
|
||||
@@ -1666,8 +1666,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `off`.",
|
||||
@@ -1688,8 +1688,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.",
|
||||
@@ -1710,7 +1710,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7. BigQuery",
|
||||
"Section": "7 BigQuery",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "BigQuery by default encrypts the data as rest by employing `Envelope Encryption` using Google managed cryptographic keys. The data is encrypted using the `data encryption keys` and data encryption keys themselves are further encrypted using `key encryption keys`. This is seamless and do not require any additional input from the user. However, if you want to have greater control, Customer-managed encryption keys (CMEK) can be used as encryption key management solution for BigQuery Data Sets.",
|
||||
@@ -1731,7 +1731,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7. BigQuery",
|
||||
"Section": "7 BigQuery",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "BigQuery by default encrypts the data as rest by employing `Envelope Encryption` using Google managed cryptographic keys. The data is encrypted using the `data encryption keys` and data encryption keys themselves are further encrypted using `key encryption keys`. This is seamless and do not require any additional input from the user. However, if you want to have greater control, Customer-managed encryption keys (CMEK) can be used as encryption key management solution for BigQuery Data Sets. If CMEK is used, the CMEK is used to encrypt the data encryption keys instead of using google-managed encryption keys.",
|
||||
@@ -1752,7 +1752,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7. BigQuery",
|
||||
"Section": "7 BigQuery",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that the IAM policy on BigQuery datasets does not allow anonymous and/or public access.",
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Use corporate login credentials instead of consumer accounts, such as Gmail accounts.",
|
||||
@@ -30,7 +30,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Setup multi-factor authentication for Google Cloud Platform accounts.",
|
||||
@@ -50,7 +50,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Setup Security Key Enforcement for Google Cloud Platform admin accounts.",
|
||||
@@ -72,7 +72,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "User-managed service accounts should not have user-managed keys.",
|
||||
@@ -94,7 +94,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "A service account is a special Google account that belongs to an application or a VM, instead of to an individual end-user. The application uses the service account to call the service's Google API so that users aren't directly involved. It's recommended not to use admin access for ServiceAccount.",
|
||||
@@ -116,7 +116,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to assign the `Service Account User (iam.serviceAccountUser)` and `Service Account Token Creator (iam.serviceAccountTokenCreator)` roles to a user for a specific service account rather than assigning the role to a user at project level.",
|
||||
@@ -138,7 +138,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Service Account keys consist of a key ID (Private_key_Id) and Private key, which are used to sign programmatic requests users make to Google cloud services accessible to that particular service account. It is recommended that all Service Account keys are regularly rotated.",
|
||||
@@ -160,7 +160,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that the principle of 'Separation of Duties' is enforced while assigning service-account related roles to users.",
|
||||
@@ -182,7 +182,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that the IAM policy on Cloud KMS `cryptokeys` should restrict anonymous and/or public access.",
|
||||
@@ -204,7 +204,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Google Cloud Key Management Service stores cryptographic keys in a hierarchical structure designed for useful and elegant access control management. The format for the rotation schedule depends on the client library that is used. For the gcloud command-line tool, the next rotation time must be in `ISO` or `RFC3339` format, and the rotation period must be in the form `INTEGER[UNIT]`, where units can be one of seconds (s), minutes (m), hours (h) or days (d).",
|
||||
@@ -226,7 +226,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that the principle of 'Separation of Duties' is enforced while assigning KMS related roles to users.",
|
||||
@@ -248,7 +248,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. Unused keys with their permissions in tact may still exist within a project. Keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to use standard authentication flow instead.",
|
||||
@@ -268,7 +268,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. In this case, unrestricted keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to restrict API key usage to trusted hosts, HTTP referrers and apps. It is recommended to use the more secure standard authentication flow instead.",
|
||||
@@ -290,7 +290,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. API keys are always at risk because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to restrict API keys to use (call) only APIs required by an application.",
|
||||
@@ -312,7 +312,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. If they are in use it is recommended to rotate API keys every 90 days.",
|
||||
@@ -334,7 +334,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that Essential Contacts is configured to designate email addresses for Google Cloud services to notify of important technical or security information.",
|
||||
@@ -354,7 +354,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Section": "1 Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Google Cloud Functions allow you to host serverless code that is executed when an event is triggered, without the requiring the management a host operating system. These functions can also store environment variables to be used by the code that may contain authentication or other information that needs to remain confidential.",
|
||||
@@ -376,7 +376,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that Cloud Audit Logging is configured to track all admin activities and read, write access to user data.",
|
||||
@@ -398,7 +398,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to create a sink that will export copies of all the log entries. This can help aggregate logs from multiple projects and export them to a Security Information and Event Management (SIEM).",
|
||||
@@ -420,7 +420,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling retention policies on log buckets will protect logs stored in cloud storage buckets from being overwritten or accidentally deleted. It is recommended to set up retention policies and configure Bucket Lock on all storage buckets that are used as log sinks.",
|
||||
@@ -442,7 +442,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "In order to prevent unnecessary project ownership assignments to users/service-accounts and further misuses of projects and resources, all `roles/Owner` assignments should be monitored.Members (users/Service-Accounts) with a role assignment to primitive role `roles/Owner` are project owners.The project owner has all the privileges on the project the role belongs to. These are summarized below:- All viewer permissions on all GCP Services within the project- Permissions for actions that modify the state of all GCP services within the project- Manage roles and permissions for a project and all resources within the project- Set up billing for a projectGranting the owner role to a member (user/Service-Account) will allow that member to modify the Identity and Access Management (IAM) policy. Therefore, grant the owner role only if the member has a legitimate purpose to manage the IAM policy. This is because the project IAM policy contains sensitive access control data. Having a minimal set of users allowed to manage IAM policy will simplify any auditing that may be necessary.",
|
||||
@@ -464,7 +464,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Google Cloud Platform (GCP) services write audit log entries to the Admin Activity and Data Access logs to help answer the questions of, \"who did what, where, and when?\" within GCP projects.Cloud audit logging records information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by GCP services. Cloud audit logging provides a history of GCP API calls for an account, including API calls made via the console, SDKs, command-line tools, and other GCP services.",
|
||||
@@ -486,7 +486,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for changes to Identity and Access Management (IAM) role creation, deletion and updating activities.",
|
||||
@@ -508,7 +508,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) Network Firewall rule changes.",
|
||||
@@ -530,7 +530,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) network route changes.",
|
||||
@@ -552,7 +552,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) network changes.",
|
||||
@@ -574,7 +574,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for Cloud Storage Bucket IAM changes.",
|
||||
@@ -596,7 +596,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that a metric filter and alarm be established for SQL instance configuration changes.",
|
||||
@@ -616,7 +616,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Cloud DNS logging records the queries from the name servers within your VPC to Stackdriver. Logged queries can come from Compute Engine VMs, GKE containers, or other GCP resources provisioned within the VPC.",
|
||||
@@ -638,7 +638,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "GCP Cloud Asset Inventory is services that provides a historical view of GCP resources and IAM policies through a time-series database. The information recorded includes metadata on Google Cloud resources, metadata on policies set on Google Cloud projects or resources, and runtime information gathered within a Google Cloud resource.Cloud Asset Inventory Service (CAIS) API enablement is not required for operation of the service, but rather enables the mechanism for searching/exporting CAIS asset data directly.",
|
||||
@@ -658,7 +658,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "GCP Access Transparency provides audit logs for all actions that Google personnel take in your Google Cloud resources.",
|
||||
@@ -680,7 +680,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "GCP Access Approval enables you to require your organizations' explicit approval whenever Google support try to access your projects. You can then select users within your organization who can approve these requests through giving them a security role in IAM. All access requests display which Google Employee requested them in an email or Pub/Sub message that you can choose to Approve. This adds an additional control and logging of who in your organization approved/denied these requests.",
|
||||
@@ -702,7 +702,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2. Logging and Monitoring",
|
||||
"Section": "2 Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Logging enabled on a HTTPS Load Balancer will show all network traffic and its destination.",
|
||||
@@ -724,7 +724,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "To prevent use of `default` network, a project should not have a `default` network.",
|
||||
@@ -746,7 +746,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "In order to prevent use of legacy networks, a project should not have a legacy network configured. As of now, Legacy Networks are gradually being phased out, and you can no longer create projects with them. This recommendation is to check older projects to ensure that they are not using Legacy Networks.",
|
||||
@@ -768,7 +768,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Cloud Domain Name System (DNS) is a fast, reliable and cost-effective domain name system that powers millions of domains on the internet. Domain Name System Security Extensions (DNSSEC) in Cloud DNS enables domain owners to take easy steps to protect their domains against DNS hijacking and man-in-the-middle and other attacks.",
|
||||
@@ -790,7 +790,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "NOTE: Currently, the SHA1 algorithm has been removed from general use by Google, and, if being used, needs to be whitelisted on a project basis by Google and will also, therefore, require a Google Cloud support contract.DNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms. The algorithm used for key signing should be a recommended one and it should be strong.",
|
||||
@@ -812,7 +812,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "NOTE: Currently, the SHA1 algorithm has been removed from general use by Google, and, if being used, needs to be whitelisted on a project basis by Google and will also, therefore, require a Google Cloud support contract.DNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms. The algorithm used for key signing should be a recommended one and it should be strong.",
|
||||
@@ -834,7 +834,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow the user to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances.Firewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, only an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the internet to VPC or VM instance using `SSH` on `Port 22` can be avoided.",
|
||||
@@ -856,7 +856,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow users to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances.Firewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the Internet to a VPC or VM instance using `RDP` on `Port 3389` can be avoided.",
|
||||
@@ -878,7 +878,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Flow Logs is a feature that enables users to capture information about the IP traffic going to and from network interfaces in the organization's VPC Subnets. Once a flow log is created, the user can view and retrieve its data in Stackdriver Logging. It is recommended that Flow Logs be enabled for every business-critical VPC subnet.",
|
||||
@@ -898,7 +898,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Secure Sockets Layer (SSL) policies determine what port Transport Layer Security (TLS) features clients are permitted to use when connecting to load balancers. To prevent usage of insecure features, SSL policies should use (a) at least TLS 1.2 with the MODERN profile; or (b) the RESTRICTED profile, because it effectively requires clients to use TLS 1.2 regardless of the chosen minimum TLS version; or (3) a CUSTOM profile that does not support any of the following features: ```TLS_RSA_WITH_AES_128_GCM_SHA256TLS_RSA_WITH_AES_256_GCM_SHA384TLS_RSA_WITH_AES_128_CBC_SHATLS_RSA_WITH_AES_256_CBC_SHATLS_RSA_WITH_3DES_EDE_CBC_SHA```",
|
||||
@@ -918,7 +918,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "3. Networking",
|
||||
"Section": "3 Networking",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "IAP authenticates the user requests to your apps via a Google single sign in. You can then manage these users with permissions to control access. It is recommended to use both IAP permissions and firewalls to restrict this access to your apps with sensitive information.",
|
||||
@@ -940,7 +940,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to configure your instance to not use the default Compute Engine service account because it has the Editor role on the project.",
|
||||
@@ -962,7 +962,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "To support principle of least privileges and prevent potential privilege escalation it is recommended that instances are not assigned to default service account `Compute Engine default service account` with Scope `Allow full access to all Cloud APIs`.",
|
||||
@@ -984,7 +984,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to use Instance specific SSH key(s) instead of using common/shared project-wide SSH key(s) to access Instances.",
|
||||
@@ -1006,7 +1006,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling OS login binds SSH certificates to IAM users and facilitates effective SSH certificate management.",
|
||||
@@ -1028,7 +1028,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Interacting with a serial port is often referred to as the serial console, which is similar to using a terminal window, in that input and output is entirely in text mode and there is no graphical interface or mouse support.If you enable the interactive serial console on an instance, clients can attempt to connect to that instance from any IP address. Therefore interactive serial console support should be disabled.",
|
||||
@@ -1050,7 +1050,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Compute Engine instance cannot forward a packet unless the source IP address of the packet matches the IP address of the instance. Similarly, GCP won't deliver a packet whose destination IP address is different than the IP address of the instance receiving the packet. However, both capabilities are required if you want to use instances to help route packets.Forwarding of data packets should be disabled to prevent data loss or information disclosure.",
|
||||
@@ -1072,7 +1072,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Customer-Supplied Encryption Keys (CSEK) are a feature in Google Cloud Storage and Google Compute Engine. If you supply your own encryption keys, Google uses your key to protect the Google-generated keys used to encrypt and decrypt your data. By default, Google Compute Engine encrypts all data at rest. Compute Engine handles and manages this encryption for you without any additional actions on your part. However, if you wanted to control and manage this encryption yourself, you can provide your own encryption keys.",
|
||||
@@ -1094,7 +1094,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "To defend against advanced threats and ensure that the boot loader and firmware on your VMs are signed and untampered, it is recommended that Compute instances are launched with Shielded VM enabled.",
|
||||
@@ -1116,7 +1116,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Compute instances should not be configured to have external IP addresses.",
|
||||
@@ -1136,7 +1136,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "In order to maintain the highest level of security all connections to an application should be secure by default.",
|
||||
@@ -1158,7 +1158,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Google Cloud encrypts data at-rest and in-transit, but customer data must be decrypted for processing. Confidential Computing is a breakthrough technology which encrypts data in-use—while it is being processed. Confidential Computing environments keep data encrypted in memory and elsewhere outside the central processing unit (CPU). Confidential VMs leverage the Secure Encrypted Virtualization (SEV) feature of AMD EPYC™ CPUs. Customer data will stay encrypted while it is used, indexed, queried, or trained on. Encryption keys are generated in hardware, per VM, and not exportable. Thanks to built-in hardware optimizations of both performance and security, there is no significant performance penalty to Confidential Computing workloads.",
|
||||
@@ -1178,7 +1178,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4. Virtual Machines",
|
||||
"Section": "4 Virtual Machines",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Google Cloud Virtual Machines have the ability via an OS Config agent API to periodically (about every 10 minutes) report OS inventory data. A patch compliance API periodically reads this data, and cross references metadata to determine if the latest updates are installed.This is not the only Patch Management solution available to your organization and you should weigh your needs before committing to using this method.",
|
||||
@@ -1200,7 +1200,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Storage",
|
||||
"Section": "5 Storage",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that IAM policy on Cloud Storage bucket does not allows anonymous or public access.",
|
||||
@@ -1222,7 +1222,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5. Storage",
|
||||
"Section": "5 Storage",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that uniform bucket-level access is enabled on Cloud Storage buckets.",
|
||||
@@ -1244,7 +1244,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to enforce all incoming connections to SQL database instance to use SSL.",
|
||||
@@ -1266,7 +1266,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Database Server should accept connections only from trusted Network(s)/IP(s) and restrict access from public IP addresses.",
|
||||
@@ -1288,7 +1288,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to configure Second Generation Sql instance to use private IPs instead of public IPs.",
|
||||
@@ -1310,7 +1310,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to have all SQL database instances set to enable automated backups.",
|
||||
@@ -1330,8 +1330,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.1 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances.This recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.",
|
||||
@@ -1353,8 +1353,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.1 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`",
|
||||
@@ -1376,8 +1376,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.1 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.",
|
||||
@@ -1399,8 +1399,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are:- `TERSE`- `DEFAULT`- `VERBOSE``TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information.`VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error.Ensure an appropriate value is set to 'DEFAULT' or stricter.",
|
||||
@@ -1422,8 +1422,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.",
|
||||
@@ -1445,8 +1445,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.",
|
||||
@@ -1468,8 +1468,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are:- `none`- `ddl`- `mod`- `all`The value `ddl` logs all data definition statements.The value `mod` logs all ddl statements, plus data-modifying statements.The statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included.A value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.",
|
||||
@@ -1491,8 +1491,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include (from lowest to highest severity) `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.Each severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.",
|
||||
@@ -1514,8 +1514,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include (from lowest to highest severity) `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.Each severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.",
|
||||
@@ -1537,8 +1537,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.",
|
||||
@@ -1560,8 +1560,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.2 PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.",
|
||||
@@ -1583,8 +1583,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`",
|
||||
@@ -1606,8 +1606,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.This flag is deprecated for all SQL Server versions in CGP. Going forward, you can't set its value to on. However, if you have this flag enabled, we strongly recommend that you either remove the flag from your database or set it to off. For cross-database access, use the [Microsoft tutorial for signing stored procedures with a certificate](https://learn.microsoft.com/en-us/sql/relational-databases/tutorial-signing-stored-procedures-with-a-certificate?view=sql-server-ver16).",
|
||||
@@ -1629,8 +1629,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.",
|
||||
@@ -1652,8 +1652,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.",
|
||||
@@ -1675,8 +1675,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.",
|
||||
@@ -1694,12 +1694,12 @@
|
||||
"Id": "6.3.6",
|
||||
"Description": "Ensure '3625 (trace flag)' database flag for all Cloud SQL Server instances is set to 'on'",
|
||||
"Checks": [
|
||||
"cloudsql_instance_sqlserver_trace_flag\""
|
||||
"cloudsql_instance_sqlserver_trace_flag"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.",
|
||||
@@ -1721,8 +1721,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Section": "6 Cloud SQL Database Services",
|
||||
"SubSection": "6.3 SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended not to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `on`.",
|
||||
@@ -1744,7 +1744,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7. BigQuery",
|
||||
"Section": "7 BigQuery",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that the IAM policy on BigQuery datasets does not allow anonymous and/or public access.",
|
||||
@@ -1766,7 +1766,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7. BigQuery",
|
||||
"Section": "7 BigQuery",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "BigQuery by default encrypts the data as rest by employing `Envelope Encryption` using Google managed cryptographic keys. The data is encrypted using the `data encryption keys` and data encryption keys themselves are further encrypted using `key encryption keys`. This is seamless and do not require any additional input from the user. However, if you want to have greater control, Customer-managed encryption keys (CMEK) can be used as encryption key management solution for BigQuery Data Sets. If CMEK is used, the CMEK is used to encrypt the data encryption keys instead of using google-managed encryption keys.",
|
||||
@@ -1788,7 +1788,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7. BigQuery",
|
||||
"Section": "7 BigQuery",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "BigQuery by default encrypts the data as rest by employing `Envelope Encryption` using Google managed cryptographic keys. The data is encrypted using the `data encryption keys` and data encryption keys themselves are further encrypted using `key encryption keys`. This is seamless and do not require any additional input from the user. However, if you want to have greater control, Customer-managed encryption keys (CMEK) can be used as encryption key management solution for BigQuery Data Sets.",
|
||||
@@ -1808,7 +1808,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "7. BigQuery",
|
||||
"Section": "7 BigQuery",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "BigQuery tables can contain sensitive data that for security purposes should be discovered, monitored, classified, and protected. Google Cloud's Sensitive Data Protection tools can automatically provide data classification of all BigQuery data across an organization.",
|
||||
@@ -1830,7 +1830,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "8. Dataproc",
|
||||
"Section": "8 Dataproc",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "When you use Dataproc, cluster and job data is stored on Persistent Disks (PDs) associated with the Compute Engine VMs in your cluster and in a Cloud Storage staging bucket. This PD and bucket data is encrypted using a Google-generated data encryption key (DEK) and key encryption key (KEK). The CMEK feature allows you to create, use, and revoke the key encryption key (KEK). Google still controls the data encryption key (DEK).",
|
||||
|
||||