diff --git a/api/.env.example b/api/.env.example new file mode 100644 index 0000000000..3506531aac --- /dev/null +++ b/api/.env.example @@ -0,0 +1,39 @@ +# Django settings +DJANGO_ALLOWED_HOSTS=localhost,127.0.0.1 +DJANGO_BIND_ADDRESS=0.0.0.0 +DJANGO_PORT=8000 +DJANGO_DEBUG=False +# Select one of [production|devel] +DJANGO_SETTINGS_MODULE=config.django.[production|devel] +# Select one of [ndjson|human_readable] +DJANGO_LOGGING_FORMATTER=[ndjson|human_readable] +# Select one of [DEBUG|INFO|WARNING|ERROR|CRITICAL] +# Applies to both Django and Celery Workers +DJANGO_LOGGING_LEVEL=INFO +DJANGO_WORKERS=4 # Defaults to the maximum available based on CPU cores if not set. +DJANGO_TOKEN_SIGNING_KEY="" +DJANGO_TOKEN_VERIFYING_KEY="" +# Token lifetime is in minutes +DJANGO_ACCESS_TOKEN_LIFETIME=30 +DJANGO_REFRESH_TOKEN_LIFETIME=1440 +DJANGO_CACHE_MAX_AGE=3600 +DJANGO_STALE_WHILE_REVALIDATE=60 +DJANGO_SECRETS_ENCRYPTION_KEY="" +# Decide whether to allow Django manage database table partitions +DJANGO_MANAGE_DB_PARTITIONS=[True|False] + +# PostgreSQL settings +# If running django and celery on host, use 'localhost', else use 'postgres-db' +POSTGRES_HOST=[localhost|postgres-db] +POSTGRES_PORT=5432 +POSTGRES_ADMIN_USER=prowler +POSTGRES_ADMIN_PASSWORD=S3cret +POSTGRES_USER=prowler_user +POSTGRES_PASSWORD=S3cret +POSTGRES_DB=prowler_db + +# Valkey settings +# If running django and celery on host, use localhost, else use 'valkey' +VALKEY_HOST=[localhost|valkey] +VALKEY_PORT=6379 +VALKEY_DB=0 diff --git a/api/.github/CODEOWNERS b/api/.github/CODEOWNERS new file mode 100644 index 0000000000..087ce7daa0 --- /dev/null +++ b/api/.github/CODEOWNERS @@ -0,0 +1,5 @@ +* @prowler-cloud/api + +# To protect a repository fully against unauthorized changes, you also need to define an owner for the CODEOWNERS file itself. +# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners#codeowners-and-branch-protection +/.github/ @prowler-cloud/api diff --git a/api/.github/ISSUE_TEMPLATE/bug_report.yml b/api/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000000..6ee76d25ea --- /dev/null +++ b/api/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,97 @@ +name: ๐Ÿž Bug Report +description: Create a report to help us improve +title: "[Bug]: " +labels: ["bug", "status/needs-triage"] + +body: + - type: textarea + id: reproduce + attributes: + label: Steps to Reproduce + description: Steps to reproduce the behavior + placeholder: |- + 1. What command are you running? + 2. Cloud provider you are launching + 3. Environment you have, like single account, multi-account, organizations, multi or single subscription, etc. + 4. See error + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected behavior + description: A clear and concise description of what you expected to happen. + validations: + required: true + - type: textarea + id: actual + attributes: + label: Actual Result with Screenshots or Logs + description: If applicable, add screenshots to help explain your problem. Also, you can add logs (anonymize them first!). Here a command that may help to share a log `prowler --log-level DEBUG --log-file $(date +%F)_debug.log` then attach here the log file. + validations: + required: true + - type: dropdown + id: type + attributes: + label: How did you install Prowler? + options: + - Cloning the repository from github.com (git clone) + - From pip package (pip install prowler) + - From brew (brew install prowler) + - Docker (docker pull toniblyx/prowler) + validations: + required: true + - type: textarea + id: environment + attributes: + label: Environment Resource + description: From where are you running Prowler? + placeholder: |- + 1. EC2 instance + 2. Fargate task + 3. Docker container locally + 4. EKS + 5. Cloud9 + 6. CodeBuild + 7. Workstation + 8. Other(please specify) + validations: + required: true + - type: textarea + id: os + attributes: + label: OS used + description: Which OS are you using? + placeholder: |- + 1. Amazon Linux 2 + 2. MacOS + 3. Alpine Linux + 4. Windows + 5. Other(please specify) + validations: + required: true + - type: input + id: prowler-version + attributes: + label: Prowler version + description: Which Prowler version are you using? + placeholder: |- + prowler --version + validations: + required: true + - type: input + id: pip-version + attributes: + label: Pip version + description: Which pip version are you using? + placeholder: |- + pip --version + validations: + required: true + - type: textarea + id: additional + attributes: + description: Additional context + label: Context + validations: + required: false diff --git a/api/.github/ISSUE_TEMPLATE/config.yml b/api/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..3ba13e0cec --- /dev/null +++ b/api/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false diff --git a/api/.github/ISSUE_TEMPLATE/feature-request.yml b/api/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 0000000000..e72e26a490 --- /dev/null +++ b/api/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,36 @@ +name: ๐Ÿ’ก Feature Request +description: Suggest an idea for this project +labels: ["enhancement", "status/needs-triage"] + + +body: + - type: textarea + id: Problem + attributes: + label: New feature motivation + description: Is your feature request related to a problem? Please describe + placeholder: |- + 1. A clear and concise description of what the problem is. Ex. I'm always frustrated when + validations: + required: true + - type: textarea + id: Solution + attributes: + label: Solution Proposed + description: A clear and concise description of what you want to happen. + validations: + required: true + - type: textarea + id: Alternatives + attributes: + label: Describe alternatives you've considered + description: A clear and concise description of any alternative solutions or features you've considered. + validations: + required: true + - type: textarea + id: Context + attributes: + label: Additional context + description: Add any other context or screenshots about the feature request here. + validations: + required: false diff --git a/api/.github/labeler.yml b/api/.github/labeler.yml new file mode 100644 index 0000000000..b6b5c279b2 --- /dev/null +++ b/api/.github/labeler.yml @@ -0,0 +1,15 @@ +documentation: + - changed-files: + - any-glob-to-any-file: "docs/**" + +backend/api: + - changed-files: + - any-glob-to-any-file: "src/backend/api/**" + +backend/backend: + - changed-files: + - any-glob-to-any-file: "src/backend/backend/**" + +github_actions: + - changed-files: + - any-glob-to-any-file: ".github/workflows/*" diff --git a/api/.github/pull_request_template.md b/api/.github/pull_request_template.md new file mode 100644 index 0000000000..8ffd4db041 --- /dev/null +++ b/api/.github/pull_request_template.md @@ -0,0 +1,13 @@ +### Context + +Please include relevant motivation and context for this PR. + + +### Description + +Please include a summary of the change and which issue is fixed. List any dependencies that are required for this change. + + +### License + +By submitting this pull request, I confirm that my contribution is made under the terms of the AGPL-3.0 license. diff --git a/api/.github/workflows/codeql.yml b/api/.github/workflows/codeql.yml new file mode 100644 index 0000000000..dc454058ee --- /dev/null +++ b/api/.github/workflows/codeql.yml @@ -0,0 +1,57 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ "main"] + pull_request: + # The branches below must be a subset of the branches above + branches: [ "main" ] + schedule: + - cron: '00 12 * * *' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'python' ] + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" diff --git a/api/.github/workflows/find-secrets.yml b/api/.github/workflows/find-secrets.yml new file mode 100644 index 0000000000..6428ee0a0a --- /dev/null +++ b/api/.github/workflows/find-secrets.yml @@ -0,0 +1,18 @@ +name: find-secrets + +on: pull_request + +jobs: + trufflehog: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: TruffleHog OSS + uses: trufflesecurity/trufflehog@v3.68.2 + with: + path: ./ + base: ${{ github.event.repository.default_branch }} + head: HEAD diff --git a/api/.github/workflows/labeler.yml b/api/.github/workflows/labeler.yml new file mode 100644 index 0000000000..7bbe961110 --- /dev/null +++ b/api/.github/workflows/labeler.yml @@ -0,0 +1,15 @@ +name: "Pull Request Labeler" + +on: + pull_request_target: + branches: + - "main" + +jobs: + labeler: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 diff --git a/api/.github/workflows/pull-request.yml b/api/.github/workflows/pull-request.yml new file mode 100644 index 0000000000..d842de06e8 --- /dev/null +++ b/api/.github/workflows/pull-request.yml @@ -0,0 +1,139 @@ +name: pr-lint-test + +on: + push: + branches: + - "main" + pull_request: + branches: + - "main" + + +env: + POSTGRES_HOST: localhost + POSTGRES_PORT: 5432 + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: prowler_db_test + VALKEY_HOST: localhost + VALKEY_PORT: 6379 + VALKEY_DB: 0 + + +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.12"] + + # Service containers to run with `test` + services: + # Label used to access the service container + postgres: + image: postgres + env: + POSTGRES_HOST: ${{ env.POSTGRES_HOST }} + POSTGRES_PORT: ${{ env.POSTGRES_PORT }} + POSTGRES_USER: ${{ env.POSTGRES_USER }} + POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }} + POSTGRES_DB: ${{ env.POSTGRES_DB }} + # Set health checks to wait until postgres has started + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + valkey: + image: valkey/valkey:7-alpine3.19 + env: + VALKEY_HOST: ${{ env.VALKEY_HOST }} + VALKEY_PORT: ${{ env.VALKEY_PORT }} + VALKEY_DB: ${{ env.VALKEY_DB }} + # Set health checks to wait until postgres has started + ports: + - 6379:6379 + options: >- + --health-cmd "valkey-cli ping" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - uses: actions/checkout@v4 + - name: Test if changes are in not ignored paths + id: are-non-ignored-files-changed + uses: tj-actions/changed-files@v42 + with: + files: ./** + files_ignore: | + .github/** + README.md + docs/** + permissions/** + mkdocs.yml + - name: Install poetry + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + run: | + python -m pip install --upgrade pip + pipx install poetry + - name: Set up Python ${{ matrix.python-version }} + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "poetry" + - name: Install dependencies + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + run: | + poetry install + poetry run pip list + VERSION=$(curl --silent "https://api.github.com/repos/hadolint/hadolint/releases/latest" | \ + grep '"tag_name":' | \ + sed -E 's/.*"v([^"]+)".*/\1/' \ + ) && curl -L -o /tmp/hadolint "https://github.com/hadolint/hadolint/releases/download/v${VERSION}/hadolint-Linux-x86_64" \ + && chmod +x /tmp/hadolint + + - name: Poetry check + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + run: | + poetry lock --check + - name: Lint with ruff + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + run: | + poetry run ruff check . --exclude contrib + - name: Check Format with ruff + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + run: | + poetry run ruff format --check . --exclude contrib + - name: Lint with pylint + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + run: | + poetry run pylint --disable=W,C,R,E -j 0 -rn -sn src/ + - name: Bandit + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + run: | + poetry run bandit -q -lll -x '*_test.py,./contrib/' -r . + - name: Safety + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + run: | + poetry run safety check --ignore 70612,66963 + - name: Vulture + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + run: | + poetry run vulture --exclude "contrib,tests,conftest.py" --min-confidence 100 . + - name: Hadolint + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + run: | + /tmp/hadolint Dockerfile --ignore=DL3013 + - name: Test with pytest + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + run: | + poetry run pytest -n auto --cov=./src/backend --cov-report=xml src/backend + - name: Upload coverage reports to Codecov + if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true' + uses: codecov/codecov-action@v4 + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/api/.gitignore b/api/.gitignore new file mode 100644 index 0000000000..a215af5677 --- /dev/null +++ b/api/.gitignore @@ -0,0 +1,168 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.pyc +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal +/_data/ + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +*.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ + +# VSCode +.vscode/ diff --git a/api/.pre-commit-config.yaml b/api/.pre-commit-config.yaml new file mode 100644 index 0000000000..6859567eb5 --- /dev/null +++ b/api/.pre-commit-config.yaml @@ -0,0 +1,91 @@ +repos: + ## GENERAL + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: check-merge-conflict + - id: check-yaml + args: ["--unsafe"] + - id: check-json + - id: end-of-file-fixer + - id: trailing-whitespace + - id: no-commit-to-branch + - id: pretty-format-json + args: ["--autofix", "--no-sort-keys", "--no-ensure-ascii"] + exclude: 'src/backend/api/fixtures/dev/.*\.json$' + + ## TOML + - repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks + rev: v2.13.0 + hooks: + - id: pretty-format-toml + args: [--autofix] + files: pyproject.toml + + ## BASH + - repo: https://github.com/koalaman/shellcheck-precommit + rev: v0.10.0 + hooks: + - id: shellcheck + exclude: contrib + ## PYTHON + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.5.0 + hooks: + # Run the linter. + - id: ruff + args: [ --fix ] + # Run the formatter. + - id: ruff-format + + - repo: https://github.com/python-poetry/poetry + rev: 1.8.0 + hooks: + - id: poetry-check + args: ["--directory=src"] + - id: poetry-lock + args: ["--no-update", "--directory=src"] + + - repo: https://github.com/hadolint/hadolint + rev: v2.13.0-beta + hooks: + - id: hadolint + args: ["--ignore=DL3013", "Dockerfile"] + + - repo: local + hooks: + - id: pylint + name: pylint + entry: bash -c 'poetry run pylint --disable=W,C,R,E -j 0 -rn -sn src/' + language: system + files: '.*\.py' + + - id: trufflehog + name: TruffleHog + description: Detect secrets in your data. + entry: bash -c 'trufflehog --no-update git file://. --only-verified --fail' + # For running trufflehog in docker, use the following entry instead: + # entry: bash -c 'docker run -v "$(pwd):/workdir" -i --rm trufflesecurity/trufflehog:latest git file:///workdir --only-verified --fail' + language: system + stages: ["commit", "push"] + + - id: bandit + name: bandit + description: "Bandit is a tool for finding common security issues in Python code" + entry: bash -c 'poetry run bandit -q -lll -x '*_test.py,./contrib/,./.venv/' -r .' + language: system + files: '.*\.py' + + - id: safety + name: safety + description: "Safety is a tool that checks your installed dependencies for known security vulnerabilities" + entry: bash -c 'poetry run safety check --ignore 70612,66963' + language: system + + - id: vulture + name: vulture + description: "Vulture finds unused code in Python programs." + entry: bash -c 'poetry run vulture --exclude "contrib,.venv,tests,conftest.py" --min-confidence 100 .' + language: system + files: '.*\.py' diff --git a/api/Dockerfile b/api/Dockerfile new file mode 100644 index 0000000000..9bda1950ec --- /dev/null +++ b/api/Dockerfile @@ -0,0 +1,46 @@ +FROM python:3.12-alpine AS build + +LABEL maintainer="https://github.com/prowler-cloud/api" + +# hadolint ignore=DL3018 +RUN apk --no-cache add gcc python3-dev musl-dev linux-headers curl-dev + +RUN apk --no-cache upgrade && \ + addgroup -g 1000 prowler && \ + adduser -D -u 1000 -G prowler prowler +USER prowler + +WORKDIR /home/prowler + +COPY pyproject.toml ./ + +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir poetry + +COPY src/backend/ ./backend/ + +ENV PATH="/home/prowler/.local/bin:$PATH" + +RUN poetry install && \ + rm -rf ~/.cache/pip + +COPY docker-entrypoint.sh ./docker-entrypoint.sh + +WORKDIR /home/prowler/backend + +# Development image +# hadolint ignore=DL3006 +FROM build AS dev + +USER 0 +# hadolint ignore=DL3018 +RUN apk --no-cache add curl vim + +USER prowler + +ENTRYPOINT ["../docker-entrypoint.sh", "dev"] + +# Production image +FROM build + +ENTRYPOINT ["../docker-entrypoint.sh", "prod"] diff --git a/api/README.md b/api/README.md new file mode 100644 index 0000000000..cd38f532d5 --- /dev/null +++ b/api/README.md @@ -0,0 +1,271 @@ +# Description + +This repository contains the JSON API and Task Runner components for Prowler, which facilitate a complete backend that interacts with the Prowler SDK and is used by the Prowler UI. + +# Components +The Prowler API is composed of the following components: + +- The JSON API, which is an API built with Django Rest Framework. +- The Celery worker, which is responsible for executing the background tasks that are defined in the JSON API. +- The PostgreSQL database, which is used to store the data. +- The Valkey database, which is an in-memory database which is used as a message broker for the Celery workers. + +## Note about Valkey + +[Valkey](https://valkey.io/) is an open source (BSD) high performance key/value datastore. + +Valkey exposes a Redis 7.2 compliant API. Any service that exposes the Redis API can be used with Prowler API. + +# Modify environment variables + +Under the root path of the project, you can find a file called `.env.example`. This file shows all the environment variables that the project uses. You *must* create a new file called `.env` and set the values for the variables. + +## Local deployment +Keep in mind if you export the `.env` file to use it with local deployment that you will have to do it within the context of the Poetry interpreter, not before. Otherwise, variables will not be loaded properly. + +To do this, you can run: + +```console +poetry shell +set -a +source .env +``` + +# ๐Ÿš€ Production deployment +## Docker deployment + +This method requires `docker` and `docker compose`. + +### Clone the repository + +```console +# HTTPS +git clone https://github.com/prowler-cloud/api.git + +# SSH +git clone git@github.com:prowler-cloud/api.git + +``` + +### Build the base image + +```console +docker compose --profile prod build +``` + +### Run the production service + +This command will start the Django production server and the Celery worker and also the Valkey and PostgreSQL databases. + +```console +docker compose --profile prod up -d +``` + +You can access the server in `http://localhost:8080`. + +> **NOTE:** notice how the port is different. When developing using docker, the port will be `8080` to prevent conflicts. + +### View the Production Server Logs + +To view the logs for any component (e.g., Django, Celery worker), you can use the following command with a wildcard. This command will follow logs for any container that matches the specified pattern: + +```console +docker logs -f $(docker ps --format "{{.Names}}" | grep 'api-') + +## Local deployment + +To use this method, you'll need to set up a Python virtual environment (version ">=3.11,<3.13") and keep dependencies updated. Additionally, ensure that `poetry` and `docker compose` are installed. + +### Clone the repository + +```console +# HTTPS +git clone https://github.com/prowler-cloud/api.git + +# SSH +git clone git@github.com:prowler-cloud/api.git + +``` +### Install all dependencies with Poetry + +```console +poetry install +poetry shell +``` + +## Start the PostgreSQL Database and Valkey + +The PostgreSQL database (version 16.3) and Valkey (version 7) are required for the development environment. To make development easier, we have provided a `docker-compose` file that will start these components for you. + +**Note:** Make sure to use the specified versions, as there are features in our setup that may not be compatible with older versions of PostgreSQL and Valkey. + + +```console +docker compose up postgres valkey -d +``` + +## Deploy Django and the Celery worker + +### Run migrations + +For migrations, you need to force the `admin` database router. Assuming you have the correct environment variables and Python virtual environment, run: + +```console +cd src/backend +python manage.py migrate --database admin +``` + +### Run the Celery worker + +```console +cd src/backend +python -m celery -A config.celery worker -l info -E +``` + +### Run the Django server with Gunicorn + +```console +cd src/backend +gunicorn -c config/guniconf.py config.wsgi:application +``` + +> By default, the Gunicorn server will try to use as many workers as your machine can handle. You can manually change that in the `src/backend/config/guniconf.py` file. + +# ๐Ÿงช Development guide + +## Local deployment + +To use this method, you'll need to set up a Python virtual environment (version ">=3.11,<3.13") and keep dependencies updated. Additionally, ensure that `poetry` and `docker compose` are installed. + +### Clone the repository + +```console +# HTTPS +git clone https://github.com/prowler-cloud/api.git + +# SSH +git clone git@github.com:prowler-cloud/api.git + +``` + +### Start the PostgreSQL Database and Valkey + +The PostgreSQL database (version 16.3) and Valkey (version 7) are required for the development environment. To make development easier, we have provided a `docker-compose` file that will start these components for you. + +**Note:** Make sure to use the specified versions, as there are features in our setup that may not be compatible with older versions of PostgreSQL and Valkey. + + +```console +docker compose up postgres valkey -d +``` + +### Install the Python dependencies + +> You must have Poetry installed + +```console +poetry install +poetry shell +``` + +### Apply migrations + +For migrations, you need to force the `admin` database router. Assuming you have the correct environment variables and Python virtual environment, run: + +```console +cd src/backend +python manage.py migrate --database admin +``` + +### Run the Django development server + +```console +cd src/backend +python manage.py runserver +``` + +You can access the server in `http://localhost:8000`. +All changes in the code will be automatically reloaded in the server. + +### Run the Celery worker + +```console +python -m celery -A config.celery worker -l info -E +``` + +The Celery worker does not detect and reload changes in the code, so you need to restart it manually when you make changes. + +## Docker deployment + +This method requires `docker` and `docker compose`. + +### Clone the repository + +```console +# HTTPS +git clone https://github.com/prowler-cloud/api.git + +# SSH +git clone git@github.com:prowler-cloud/api.git + +``` + +### Build the base image + +```console +docker compose --profile dev build +``` + +### Run the development service + +This command will start the Django development server and the Celery worker and also the Valkey and PostgreSQL databases. + +```console +docker compose --profile dev up -d +``` + +You can access the server in `http://localhost:8080`. +All changes in the code will be automatically reloaded in the server. + +> **NOTE:** notice how the port is different. When developing using docker, the port will be `8080` to prevent conflicts. + +### View the development server logs + +To view the logs for any component (e.g., Django, Celery worker), you can use the following command with a wildcard. This command will follow logs for any container that matches the specified pattern: + +```console +docker logs -f $(docker ps --format "{{.Names}}" | grep 'api-') + +## Applying migrations + +For migrations, you need to force the `admin` database router. Assuming you have the correct environment variables and Python virtual environment, run: + +```console +poetry shell +cd src/backend +python manage.py migrate --database admin +``` + +## Apply fixtures + +Fixtures are used to populate the database with initial development data. + +```console +poetry shell +cd src/backend +python manage.py loaddata api/fixtures/0_dev_users.json --database admin +``` + +> The default credentials are `dev@prowler.com:thisisapassword123` or `dev2@prowler.com:thisisapassword123` + +## Run tests + +Note that the tests will fail if you use the same `.env` file as the development environment. + +For best results, run in a new shell with no environment variables set. + +```console +poetry shell +cd src/backend +pytest +``` diff --git a/api/docker-compose.yml b/api/docker-compose.yml new file mode 100644 index 0000000000..b344736152 --- /dev/null +++ b/api/docker-compose.yml @@ -0,0 +1,125 @@ +services: + api: + build: + dockerfile: Dockerfile + image: prowler-api + env_file: + - path: ./.env + required: false + ports: + - "${DJANGO_PORT:-8000}:${DJANGO_PORT:-8000}" + profiles: + - prod + depends_on: + postgres: + condition: service_healthy + valkey: + condition: service_healthy + entrypoint: + - "../docker-entrypoint.sh" + - "prod" + + api-dev: + build: + dockerfile: Dockerfile + target: dev + image: prowler-api-dev + environment: + - DJANGO_SETTINGS_MODULE=config.django.devel + - DJANGO_LOGGING_FORMATTER=human_readable + env_file: + - path: ./.env + required: false + ports: + - "${DJANGO_PORT:-8080}:${DJANGO_PORT:-8080}" + volumes: + - "./src/backend:/home/prowler/backend" + - "./pyproject.toml:/home/prowler/pyproject.toml" + profiles: + - dev + depends_on: + postgres: + condition: service_healthy + valkey: + condition: service_healthy + entrypoint: + - "../docker-entrypoint.sh" + - "dev" + + postgres: + image: postgres:16.3-alpine + ports: + - "${POSTGRES_PORT:-5432}:${POSTGRES_PORT:-5432}" + hostname: "postgres-db" + volumes: + - ./_data/postgres:/var/lib/postgresql/data + environment: + - POSTGRES_USER=${POSTGRES_ADMIN_USER:-prowler} + - POSTGRES_PASSWORD=${POSTGRES_ADMIN_PASSWORD:-S3cret} + - POSTGRES_DB=${POSTGRES_DB:-prowler_db} + env_file: + - path: ./.env + required: false + healthcheck: + test: ["CMD-SHELL", "sh -c 'pg_isready -U ${POSTGRES_ADMIN_USER:-prowler} -d ${POSTGRES_DB:-prowler_db}'"] + interval: 5s + timeout: 5s + retries: 5 + + valkey: + image: valkey/valkey:7-alpine3.19 + ports: + - "${VALKEY_PORT:-6379}:6379" + hostname: "valkey" + volumes: + - ./_data/valkey:/data + env_file: + - path: ./.env + required: false + healthcheck: + test: ["CMD-SHELL", "sh -c 'valkey-cli ping'"] + interval: 10s + timeout: 5s + retries: 3 + + worker: + build: + dockerfile: Dockerfile + image: prowler-worker + environment: + - DJANGO_SETTINGS_MODULE=${DJANGO_SETTINGS_MODULE:-config.django.production} + env_file: + - path: ./.env + required: false + profiles: + - dev + - prod + depends_on: + valkey: + condition: service_healthy + postgres: + condition: service_healthy + entrypoint: + - "../docker-entrypoint.sh" + - "worker" + + worker-beat: + build: + dockerfile: Dockerfile + image: prowler-worker + environment: + - DJANGO_SETTINGS_MODULE=${DJANGO_SETTINGS_MODULE:-config.django.production} + env_file: + - path: ./.env + required: false + profiles: + - dev + - prod + depends_on: + valkey: + condition: service_healthy + postgres: + condition: service_healthy + entrypoint: + - "../docker-entrypoint.sh" + - "beat" diff --git a/api/docker-entrypoint.sh b/api/docker-entrypoint.sh new file mode 100755 index 0000000000..62ceb69a0f --- /dev/null +++ b/api/docker-entrypoint.sh @@ -0,0 +1,71 @@ +#!/bin/sh + + +apply_migrations() { + echo "Applying database migrations..." + poetry run python manage.py migrate --database admin +} + +apply_fixtures() { + echo "Applying Django fixtures..." + for fixture in api/fixtures/dev/*.json; do + if [ -f "$fixture" ]; then + echo "Loading $fixture" + poetry run python manage.py loaddata "$fixture" --database admin + fi + done +} + +start_dev_server() { + echo "Starting the development server..." + poetry run python manage.py runserver 0.0.0.0:"${DJANGO_PORT:-8080}" +} + +start_prod_server() { + echo "Starting the Gunicorn server..." + poetry run gunicorn -c config/guniconf.py config.wsgi:application +} + +start_worker() { + echo "Starting the worker..." + poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans -E +} + +start_worker_beat() { + echo "Starting the worker-beat..." + sleep 15 + poetry run python -m celery -A config.celery beat -l "${DJANGO_LOGGING_LEVEL:-info}" --scheduler django_celery_beat.schedulers:DatabaseScheduler +} + +manage_db_partitions() { + if [ "${DJANGO_MANAGE_DB_PARTITIONS}" = "True" ]; then + echo "Managing DB partitions..." + # For now we skip the deletion of partitions until we define the data retention policy + # --yes auto approves the operation without the need of an interactive terminal + poetry run python manage.py pgpartition --using admin --skip-delete --yes + fi +} + +case "$1" in + dev) + apply_migrations + apply_fixtures + manage_db_partitions + start_dev_server + ;; + prod) + apply_migrations + manage_db_partitions + start_prod_server + ;; + worker) + start_worker + ;; + beat) + start_worker_beat + ;; + *) + echo "Usage: $0 {dev|prod|worker|beat}" + exit 1 + ;; +esac diff --git a/api/docs/partitions.md b/api/docs/partitions.md new file mode 100644 index 0000000000..3423488bb1 --- /dev/null +++ b/api/docs/partitions.md @@ -0,0 +1,65 @@ +# Partitions + +## Overview + +Partitions are used to split the data in a table into smaller chunks, allowing for more efficient querying and storage. + +The Prowler API uses partitions to store findings. The partitions are created based on the UUIDv7 `id` field. + +You can use the Prowler API without ever creating additional partitions. This documentation is only relevant if you want to manage partitions to gain additional query performance. + +### Required Postgres Configuration + +There are 3 configuration options that need to be set in the `postgres.conf` file to get the most performance out of the partitioning: + +- `enable_partition_pruning = on` (default is on) +- `enable_partitionwise_join = on` (default is off) +- `enable_partitionwise_aggregate = on` (default is off) + +For more information on these options, see the [Postgres documentation](https://www.postgresql.org/docs/current/runtime-config-query.html). + +## Partitioning Strategy + +The partitioning strategy is defined in the `api.partitions` module. The strategy is responsible for creating and deleting partitions based on the provided configuration. + +## Managing Partitions + +The application will run without any extra work on your part. If you want to add or delete partitions, you can use the following commands: + +To manage the partitions, run `python manage.py pgpartition --using admin` + +This command will generate a list of partitions to create and delete based on the provided configuration. + +By default, the command will prompt you to accept the changes before applying them. + +```shell +Finding: + + 2024_nov + name: 2024_nov + from_values: 0192e505-9000-72c8-a47c-cce719d8fb93 + to_values: 01937f84-5418-7eb8-b2a6-e3be749e839d + size_unit: months + size_value: 1 + + 2024_dec + name: 2024_dec + from_values: 01937f84-5800-7b55-879c-9cdb46f023f6 + to_values: 01941f29-7818-7f9f-b4be-20b05bb2f574 + size_unit: months + size_value: 1 + +0 partitions will be deleted +2 partitions will be created +``` + +If you choose to apply the partitions, tables will be generated with the following format: `__`. + +For more info on the partitioning manager, see https://github.com/SectorLabs/django-postgres-extra + +### Changing the Partitioning Parameters + +There are 4 environment variables that can be used to change the partitioning parameters: + +- `DJANGO_MANAGE_DB_PARTITIONS`: Allow Django to manage database partitons. By default is set to `False`. +- `FINDINGS_TABLE_PARTITION_MONTHS`: Set the months for each partition. Setting the partition monts to 1 will create partitions with a size of 1 natural month. +- `FINDINGS_TABLE_PARTITION_COUNT`: Set the number of partitions to create +- `FINDINGS_TABLE_PARTITION_MAX_AGE_MONTHS`: Set the number of months to keep partitions before deleting them. Setting this to `None` will keep partitions indefinitely. diff --git a/api/poetry.lock b/api/poetry.lock new file mode 100644 index 0000000000..ef5e2bfe26 --- /dev/null +++ b/api/poetry.lock @@ -0,0 +1,4963 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "about-time" +version = "4.2.1" +description = "Easily measure timing and throughput of code blocks, with beautiful human friendly representations." +optional = false +python-versions = ">=3.7, <4" +files = [ + {file = "about-time-4.2.1.tar.gz", hash = "sha256:6a538862d33ce67d997429d14998310e1dbfda6cb7d9bbfbf799c4709847fece"}, + {file = "about_time-4.2.1-py3-none-any.whl", hash = "sha256:8bbf4c75fe13cbd3d72f49a03b02c5c7dca32169b6d49117c257e7eb3eaee341"}, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.4.3" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohappyeyeballs-2.4.3-py3-none-any.whl", hash = "sha256:8a7a83727b2756f394ab2895ea0765a0a8c475e3c71e98d43d76f22b4b435572"}, + {file = "aiohappyeyeballs-2.4.3.tar.gz", hash = "sha256:75cf88a15106a5002a8eb1dab212525c00d1f4c0fa96e551c9fbe6f09a621586"}, +] + +[[package]] +name = "aiohttp" +version = "3.10.11" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5077b1a5f40ffa3ba1f40d537d3bec4383988ee51fbba6b74aa8fb1bc466599e"}, + {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d6a14a4d93b5b3c2891fca94fa9d41b2322a68194422bef0dd5ec1e57d7d298"}, + {file = "aiohttp-3.10.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ffbfde2443696345e23a3c597049b1dd43049bb65337837574205e7368472177"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20b3d9e416774d41813bc02fdc0663379c01817b0874b932b81c7f777f67b217"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b943011b45ee6bf74b22245c6faab736363678e910504dd7531a58c76c9015a"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48bc1d924490f0d0b3658fe5c4b081a4d56ebb58af80a6729d4bd13ea569797a"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e12eb3f4b1f72aaaf6acd27d045753b18101524f72ae071ae1c91c1cd44ef115"}, + {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f14ebc419a568c2eff3c1ed35f634435c24ead2fe19c07426af41e7adb68713a"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:72b191cdf35a518bfc7ca87d770d30941decc5aaf897ec8b484eb5cc8c7706f3"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5ab2328a61fdc86424ee540d0aeb8b73bbcad7351fb7cf7a6546fc0bcffa0038"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aa93063d4af05c49276cf14e419550a3f45258b6b9d1f16403e777f1addf4519"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:30283f9d0ce420363c24c5c2421e71a738a2155f10adbb1a11a4d4d6d2715cfc"}, + {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e5358addc8044ee49143c546d2182c15b4ac3a60be01c3209374ace05af5733d"}, + {file = "aiohttp-3.10.11-cp310-cp310-win32.whl", hash = "sha256:e1ffa713d3ea7cdcd4aea9cddccab41edf6882fa9552940344c44e59652e1120"}, + {file = "aiohttp-3.10.11-cp310-cp310-win_amd64.whl", hash = "sha256:778cbd01f18ff78b5dd23c77eb82987ee4ba23408cbed233009fd570dda7e674"}, + {file = "aiohttp-3.10.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:80ff08556c7f59a7972b1e8919f62e9c069c33566a6d28586771711e0eea4f07"}, + {file = "aiohttp-3.10.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c8f96e9ee19f04c4914e4e7a42a60861066d3e1abf05c726f38d9d0a466e695"}, + {file = "aiohttp-3.10.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fb8601394d537da9221947b5d6e62b064c9a43e88a1ecd7414d21a1a6fba9c24"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ea224cf7bc2d8856d6971cea73b1d50c9c51d36971faf1abc169a0d5f85a382"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db9503f79e12d5d80b3efd4d01312853565c05367493379df76d2674af881caa"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0f449a50cc33f0384f633894d8d3cd020e3ccef81879c6e6245c3c375c448625"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82052be3e6d9e0c123499127782a01a2b224b8af8c62ab46b3f6197035ad94e9"}, + {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20063c7acf1eec550c8eb098deb5ed9e1bb0521613b03bb93644b810986027ac"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:489cced07a4c11488f47aab1f00d0c572506883f877af100a38f1fedaa884c3a"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ea9b3bab329aeaa603ed3bf605f1e2a6f36496ad7e0e1aa42025f368ee2dc07b"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ca117819d8ad113413016cb29774b3f6d99ad23c220069789fc050267b786c16"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2dfb612dcbe70fb7cdcf3499e8d483079b89749c857a8f6e80263b021745c730"}, + {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9b615d3da0d60e7d53c62e22b4fd1c70f4ae5993a44687b011ea3a2e49051b8"}, + {file = "aiohttp-3.10.11-cp311-cp311-win32.whl", hash = "sha256:29103f9099b6068bbdf44d6a3d090e0a0b2be6d3c9f16a070dd9d0d910ec08f9"}, + {file = "aiohttp-3.10.11-cp311-cp311-win_amd64.whl", hash = "sha256:236b28ceb79532da85d59aa9b9bf873b364e27a0acb2ceaba475dc61cffb6f3f"}, + {file = "aiohttp-3.10.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7480519f70e32bfb101d71fb9a1f330fbd291655a4c1c922232a48c458c52710"}, + {file = "aiohttp-3.10.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f65267266c9aeb2287a6622ee2bb39490292552f9fbf851baabc04c9f84e048d"}, + {file = "aiohttp-3.10.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7400a93d629a0608dc1d6c55f1e3d6e07f7375745aaa8bd7f085571e4d1cee97"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f34b97e4b11b8d4eb2c3a4f975be626cc8af99ff479da7de49ac2c6d02d35725"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e7b825da878464a252ccff2958838f9caa82f32a8dbc334eb9b34a026e2c636"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9f92a344c50b9667827da308473005f34767b6a2a60d9acff56ae94f895f385"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc6f1ab987a27b83c5268a17218463c2ec08dbb754195113867a27b166cd6087"}, + {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1dc0f4ca54842173d03322793ebcf2c8cc2d34ae91cc762478e295d8e361e03f"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7ce6a51469bfaacff146e59e7fb61c9c23006495d11cc24c514a455032bcfa03"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:aad3cd91d484d065ede16f3cf15408254e2469e3f613b241a1db552c5eb7ab7d"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f4df4b8ca97f658c880fb4b90b1d1ec528315d4030af1ec763247ebfd33d8b9a"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2e4e18a0a2d03531edbc06c366954e40a3f8d2a88d2b936bbe78a0c75a3aab3e"}, + {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6ce66780fa1a20e45bc753cda2a149daa6dbf1561fc1289fa0c308391c7bc0a4"}, + {file = "aiohttp-3.10.11-cp312-cp312-win32.whl", hash = "sha256:a919c8957695ea4c0e7a3e8d16494e3477b86f33067478f43106921c2fef15bb"}, + {file = "aiohttp-3.10.11-cp312-cp312-win_amd64.whl", hash = "sha256:b5e29706e6389a2283a91611c91bf24f218962717c8f3b4e528ef529d112ee27"}, + {file = "aiohttp-3.10.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:703938e22434d7d14ec22f9f310559331f455018389222eed132808cd8f44127"}, + {file = "aiohttp-3.10.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9bc50b63648840854e00084c2b43035a62e033cb9b06d8c22b409d56eb098413"}, + {file = "aiohttp-3.10.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5f0463bf8b0754bc744e1feb61590706823795041e63edf30118a6f0bf577461"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6c6dec398ac5a87cb3a407b068e1106b20ef001c344e34154616183fe684288"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcaf2d79104d53d4dcf934f7ce76d3d155302d07dae24dff6c9fffd217568067"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25fd5470922091b5a9aeeb7e75be609e16b4fba81cdeaf12981393fb240dd10e"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbde2ca67230923a42161b1f408c3992ae6e0be782dca0c44cb3206bf330dee1"}, + {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:249c8ff8d26a8b41a0f12f9df804e7c685ca35a207e2410adbd3e924217b9006"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:878ca6a931ee8c486a8f7b432b65431d095c522cbeb34892bee5be97b3481d0f"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8663f7777ce775f0413324be0d96d9730959b2ca73d9b7e2c2c90539139cbdd6"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6cd3f10b01f0c31481fba8d302b61603a2acb37b9d30e1d14e0f5a58b7b18a31"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e8d8aad9402d3aa02fdc5ca2fe68bcb9fdfe1f77b40b10410a94c7f408b664d"}, + {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:38e3c4f80196b4f6c3a85d134a534a56f52da9cb8d8e7af1b79a32eefee73a00"}, + {file = "aiohttp-3.10.11-cp313-cp313-win32.whl", hash = "sha256:fc31820cfc3b2863c6e95e14fcf815dc7afe52480b4dc03393c4873bb5599f71"}, + {file = "aiohttp-3.10.11-cp313-cp313-win_amd64.whl", hash = "sha256:4996ff1345704ffdd6d75fb06ed175938c133425af616142e7187f28dc75f14e"}, + {file = "aiohttp-3.10.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:74baf1a7d948b3d640badeac333af581a367ab916b37e44cf90a0334157cdfd2"}, + {file = "aiohttp-3.10.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:473aebc3b871646e1940c05268d451f2543a1d209f47035b594b9d4e91ce8339"}, + {file = "aiohttp-3.10.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c2f746a6968c54ab2186574e15c3f14f3e7f67aef12b761e043b33b89c5b5f95"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d110cabad8360ffa0dec8f6ec60e43286e9d251e77db4763a87dcfe55b4adb92"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0099c7d5d7afff4202a0c670e5b723f7718810000b4abcbc96b064129e64bc7"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0316e624b754dbbf8c872b62fe6dcb395ef20c70e59890dfa0de9eafccd2849d"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a5f7ab8baf13314e6b2485965cbacb94afff1e93466ac4d06a47a81c50f9cca"}, + {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c891011e76041e6508cbfc469dd1a8ea09bc24e87e4c204e05f150c4c455a5fa"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9208299251370ee815473270c52cd3f7069ee9ed348d941d574d1457d2c73e8b"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:459f0f32c8356e8125f45eeff0ecf2b1cb6db1551304972702f34cd9e6c44658"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:14cdc8c1810bbd4b4b9f142eeee23cda528ae4e57ea0923551a9af4820980e39"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:971aa438a29701d4b34e4943e91b5e984c3ae6ccbf80dd9efaffb01bd0b243a9"}, + {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9a309c5de392dfe0f32ee57fa43ed8fc6ddf9985425e84bd51ed66bb16bce3a7"}, + {file = "aiohttp-3.10.11-cp38-cp38-win32.whl", hash = "sha256:9ec1628180241d906a0840b38f162a3215114b14541f1a8711c368a8739a9be4"}, + {file = "aiohttp-3.10.11-cp38-cp38-win_amd64.whl", hash = "sha256:9c6e0ffd52c929f985c7258f83185d17c76d4275ad22e90aa29f38e211aacbec"}, + {file = "aiohttp-3.10.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cdc493a2e5d8dc79b2df5bec9558425bcd39aff59fc949810cbd0832e294b106"}, + {file = "aiohttp-3.10.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b3e70f24e7d0405be2348da9d5a7836936bf3a9b4fd210f8c37e8d48bc32eca6"}, + {file = "aiohttp-3.10.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:968b8fb2a5eee2770eda9c7b5581587ef9b96fbdf8dcabc6b446d35ccc69df01"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deef4362af9493d1382ef86732ee2e4cbc0d7c005947bd54ad1a9a16dd59298e"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:686b03196976e327412a1b094f4120778c7c4b9cff9bce8d2fdfeca386b89829"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3bf6d027d9d1d34e1c2e1645f18a6498c98d634f8e373395221121f1c258ace8"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:099fd126bf960f96d34a760e747a629c27fb3634da5d05c7ef4d35ef4ea519fc"}, + {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c73c4d3dae0b4644bc21e3de546530531d6cdc88659cdeb6579cd627d3c206aa"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0c5580f3c51eea91559db3facd45d72e7ec970b04528b4709b1f9c2555bd6d0b"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fdf6429f0caabfd8a30c4e2eaecb547b3c340e4730ebfe25139779b9815ba138"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d97187de3c276263db3564bb9d9fad9e15b51ea10a371ffa5947a5ba93ad6777"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:0acafb350cfb2eba70eb5d271f55e08bd4502ec35e964e18ad3e7d34d71f7261"}, + {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c13ed0c779911c7998a58e7848954bd4d63df3e3575f591e321b19a2aec8df9f"}, + {file = "aiohttp-3.10.11-cp39-cp39-win32.whl", hash = "sha256:22b7c540c55909140f63ab4f54ec2c20d2635c0289cdd8006da46f3327f971b9"}, + {file = "aiohttp-3.10.11-cp39-cp39-win_amd64.whl", hash = "sha256:7b26b1551e481012575dab8e3727b16fe7dd27eb2711d2e63ced7368756268fb"}, + {file = "aiohttp-3.10.11.tar.gz", hash = "sha256:9dc2b8f3dcab2e39e0fa309c8da50c3b55e6f34ab25f1a71d3288f24924d33a7"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.3.0" +aiosignal = ">=1.1.2" +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.12.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "alive-progress" +version = "3.2.0" +description = "A new kind of Progress Bar, with real-time throughput, ETA, and very cool animations!" +optional = false +python-versions = "<4,>=3.9" +files = [ + {file = "alive-progress-3.2.0.tar.gz", hash = "sha256:ede29d046ff454fe56b941f686f89dd9389430c4a5b7658e445cb0b80e0e4deb"}, + {file = "alive_progress-3.2.0-py3-none-any.whl", hash = "sha256:0677929f8d3202572e9d142f08170b34dbbe256cc6d2afbf75ef187c7da964a8"}, +] + +[package.dependencies] +about-time = "4.2.1" +grapheme = "0.6.0" + +[[package]] +name = "amqp" +version = "5.2.0" +description = "Low-level AMQP client for Python (fork of amqplib)." +optional = false +python-versions = ">=3.6" +files = [ + {file = "amqp-5.2.0-py3-none-any.whl", hash = "sha256:827cb12fb0baa892aad844fd95258143bce4027fdac4fccddbc43330fd281637"}, + {file = "amqp-5.2.0.tar.gz", hash = "sha256:a1ecff425ad063ad42a486c902807d1482311481c8ad95a72694b2975e75f7fd"}, +] + +[package.dependencies] +vine = ">=5.0.0,<6.0.0" + +[[package]] +name = "anyio" +version = "4.6.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +files = [ + {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, + {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, +] + +[package.dependencies] +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "asgiref" +version = "3.8.1" +description = "ASGI specs, helper code, and adapters" +optional = false +python-versions = ">=3.8" +files = [ + {file = "asgiref-3.8.1-py3-none-any.whl", hash = "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47"}, + {file = "asgiref-3.8.1.tar.gz", hash = "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590"}, +] + +[package.extras] +tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] + +[[package]] +name = "astroid" +version = "3.2.4" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, + {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, +] + +[[package]] +name = "async-timeout" +version = "4.0.3" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] + +[[package]] +name = "attrs" +version = "24.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, +] + +[package.extras] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] + +[[package]] +name = "authlib" +version = "1.3.2" +description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." +optional = false +python-versions = ">=3.8" +files = [ + {file = "Authlib-1.3.2-py2.py3-none-any.whl", hash = "sha256:ede026a95e9f5cdc2d4364a52103f5405e75aa156357e831ef2bfd0bc5094dfc"}, + {file = "authlib-1.3.2.tar.gz", hash = "sha256:4b16130117f9eb82aa6eec97f6dd4673c3f960ac0283ccdae2897ee4bc030ba2"}, +] + +[package.dependencies] +cryptography = "*" + +[[package]] +name = "awsipranges" +version = "0.3.3" +description = "Work with the AWS IP address ranges in native Python." +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "awsipranges-0.3.3-py3-none-any.whl", hash = "sha256:f3d7a54aeaf7fe310beb5d377a4034a63a51b72677ae6af3e0967bc4de7eedaf"}, + {file = "awsipranges-0.3.3.tar.gz", hash = "sha256:4f0b3f22a9dc1163c85b513bed812b6c92bdacd674e6a7b68252a3c25b99e2c0"}, +] + +[[package]] +name = "azure-common" +version = "1.1.28" +description = "Microsoft Azure Client Library for Python (Common)" +optional = false +python-versions = "*" +files = [ + {file = "azure-common-1.1.28.zip", hash = "sha256:4ac0cd3214e36b6a1b6a442686722a5d8cc449603aa833f3f0f40bda836704a3"}, + {file = "azure_common-1.1.28-py2.py3-none-any.whl", hash = "sha256:5c12d3dcf4ec20599ca6b0d3e09e86e146353d443e7fcc050c9a19c1f9df20ad"}, +] + +[[package]] +name = "azure-core" +version = "1.31.0" +description = "Microsoft Azure Core Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_core-1.31.0-py3-none-any.whl", hash = "sha256:22954de3777e0250029360ef31d80448ef1be13b80a459bff80ba7073379e2cd"}, + {file = "azure_core-1.31.0.tar.gz", hash = "sha256:656a0dd61e1869b1506b7c6a3b31d62f15984b1a573d6326f6aa2f3e4123284b"}, +] + +[package.dependencies] +requests = ">=2.21.0" +six = ">=1.11.0" +typing-extensions = ">=4.6.0" + +[package.extras] +aio = ["aiohttp (>=3.0)"] + +[[package]] +name = "azure-identity" +version = "1.19.0" +description = "Microsoft Azure Identity Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_identity-1.19.0-py3-none-any.whl", hash = "sha256:e3f6558c181692d7509f09de10cca527c7dce426776454fb97df512a46527e81"}, + {file = "azure_identity-1.19.0.tar.gz", hash = "sha256:500144dc18197d7019b81501165d4fa92225f03778f17d7ca8a2a180129a9c83"}, +] + +[package.dependencies] +azure-core = ">=1.31.0" +cryptography = ">=2.5" +msal = ">=1.30.0" +msal-extensions = ">=1.2.0" +typing-extensions = ">=4.0.0" + +[[package]] +name = "azure-keyvault-keys" +version = "4.10.0" +description = "Microsoft Azure Key Vault Keys Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_keyvault_keys-4.10.0-py3-none-any.whl", hash = "sha256:210227e0061f641a79755f0e0bcbcf27bbfb4df630a933c43a99a29962283d0d"}, + {file = "azure_keyvault_keys-4.10.0.tar.gz", hash = "sha256:511206ae90aec1726a4d6ff5a92d754bd0c0f1e8751891368d30fb70b62955f1"}, +] + +[package.dependencies] +azure-core = ">=1.31.0" +cryptography = ">=2.1.4" +isodate = ">=0.6.1" +typing-extensions = ">=4.0.1" + +[[package]] +name = "azure-mgmt-applicationinsights" +version = "4.0.0" +description = "Microsoft Azure Application Insights Management Client Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "azure-mgmt-applicationinsights-4.0.0.zip", hash = "sha256:50c3db05573e0cc2d56314a0556fb346ef05ec489ac000f4d720d92c6b647e06"}, + {file = "azure_mgmt_applicationinsights-4.0.0-py3-none-any.whl", hash = "sha256:2b1ffd9a0114974455795c73a3a5d17c849e32b961d707d2db393b99254b576f"}, +] + +[package.dependencies] +azure-common = ">=1.1,<2.0" +azure-mgmt-core = ">=1.3.2,<2.0.0" +isodate = ">=0.6.1,<1.0.0" + +[[package]] +name = "azure-mgmt-authorization" +version = "4.0.0" +description = "Microsoft Azure Authorization Management Client Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "azure-mgmt-authorization-4.0.0.zip", hash = "sha256:69b85abc09ae64fc72975bd43431170d8c7eb5d166754b98aac5f3845de57dc4"}, + {file = "azure_mgmt_authorization-4.0.0-py3-none-any.whl", hash = "sha256:d8feeb3842e6ddf1a370963ca4f61fb6edc124e8997b807dd025bc9b2379cd1a"}, +] + +[package.dependencies] +azure-common = ">=1.1,<2.0" +azure-mgmt-core = ">=1.3.2,<2.0.0" +isodate = ">=0.6.1,<1.0.0" + +[[package]] +name = "azure-mgmt-compute" +version = "33.0.0" +description = "Microsoft Azure Compute Management Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure-mgmt-compute-33.0.0.tar.gz", hash = "sha256:a3cc0fe4f09c8e1d3523c1bfb92620dfe263a0a893b0ac13a33d7057e9ddddd2"}, + {file = "azure_mgmt_compute-33.0.0-py3-none-any.whl", hash = "sha256:155f8d78a1fdedcea1725fd12b85b2d87fbcb6b53f8e77451c644f45701e3bcf"}, +] + +[package.dependencies] +azure-common = ">=1.1" +azure-mgmt-core = ">=1.3.2" +isodate = ">=0.6.1" +typing-extensions = ">=4.6.0" + +[[package]] +name = "azure-mgmt-containerregistry" +version = "10.3.0" +description = "Microsoft Azure Container Registry Client Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "azure-mgmt-containerregistry-10.3.0.tar.gz", hash = "sha256:ae21651855dfb19c42d91d6b3a965c6c611e23f8bc4bf7138835e652d2f918e3"}, + {file = "azure_mgmt_containerregistry-10.3.0-py3-none-any.whl", hash = "sha256:851e1c57f9bc4a3589c6b21fb627c11fd6cbb57a0388b7dfccd530ba3160805f"}, +] + +[package.dependencies] +azure-common = ">=1.1,<2.0" +azure-mgmt-core = ">=1.3.2,<2.0.0" +isodate = ">=0.6.1,<1.0.0" + +[[package]] +name = "azure-mgmt-containerservice" +version = "33.0.0" +description = "Microsoft Azure Container Service Management Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_mgmt_containerservice-33.0.0-py3-none-any.whl", hash = "sha256:5b36cb25075c9fc3070135b5a45debae9eca3183b00f8c21faddb37b2daf4a60"}, + {file = "azure_mgmt_containerservice-33.0.0.tar.gz", hash = "sha256:868583dcdb8a4905de03a84a9b7903d76a1cb59acd9c3736f02bc743b5047c9e"}, +] + +[package.dependencies] +azure-common = ">=1.1" +azure-mgmt-core = ">=1.3.2" +isodate = ">=0.6.1" +typing-extensions = ">=4.6.0" + +[[package]] +name = "azure-mgmt-core" +version = "1.4.0" +description = "Microsoft Azure Management Core Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "azure-mgmt-core-1.4.0.zip", hash = "sha256:d195208340094f98e5a6661b781cde6f6a051e79ce317caabd8ff97030a9b3ae"}, + {file = "azure_mgmt_core-1.4.0-py3-none-any.whl", hash = "sha256:81071675f186a585555ef01816f2774d49c1c9024cb76e5720c3c0f6b337bb7d"}, +] + +[package.dependencies] +azure-core = ">=1.26.2,<2.0.0" + +[[package]] +name = "azure-mgmt-cosmosdb" +version = "9.6.0" +description = "Microsoft Azure Cosmos DB Management Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_mgmt_cosmosdb-9.6.0-py3-none-any.whl", hash = "sha256:02b4108867de58e0b89a206ee7b7588b439e1f6fef2377ce1979b803a0d02d5a"}, + {file = "azure_mgmt_cosmosdb-9.6.0.tar.gz", hash = "sha256:667c7d8a8f542b0e7972e63274af536ad985187e24a6cc2e3c8eef35560881fc"}, +] + +[package.dependencies] +azure-common = ">=1.1" +azure-mgmt-core = ">=1.3.2" +isodate = ">=0.6.1" +typing-extensions = ">=4.6.0" + +[[package]] +name = "azure-mgmt-keyvault" +version = "10.3.1" +description = "Microsoft Azure Key Vault Management Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure-mgmt-keyvault-10.3.1.tar.gz", hash = "sha256:34b92956aefbdd571cae5a03f7078e037d8087b2c00cfa6748835dc73abb5a30"}, + {file = "azure_mgmt_keyvault-10.3.1-py3-none-any.whl", hash = "sha256:a18a27a06551482d31f92bc43ac8b0846af02cd69511f80090865b4c5caa3c21"}, +] + +[package.dependencies] +azure-common = ">=1.1" +azure-mgmt-core = ">=1.3.2" +isodate = ">=0.6.1" +typing-extensions = ">=4.6.0" + +[[package]] +name = "azure-mgmt-monitor" +version = "6.0.2" +description = "Microsoft Azure Monitor Client Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "azure-mgmt-monitor-6.0.2.tar.gz", hash = "sha256:5ffbf500e499ab7912b1ba6d26cef26480d9ae411532019bb78d72562196e07b"}, + {file = "azure_mgmt_monitor-6.0.2-py3-none-any.whl", hash = "sha256:fe4cf41e6680b74a228f81451dc5522656d599c6f343ecf702fc790fda9a357b"}, +] + +[package.dependencies] +azure-common = ">=1.1,<2.0" +azure-mgmt-core = ">=1.3.2,<2.0.0" +isodate = ">=0.6.1,<1.0.0" + +[[package]] +name = "azure-mgmt-network" +version = "28.0.0" +description = "Microsoft Azure Network Management Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_mgmt_network-28.0.0-py3-none-any.whl", hash = "sha256:2ee23c1f2ba75752187bd7f4c3e94ad172282cbf8153694feadc7886ef88493c"}, + {file = "azure_mgmt_network-28.0.0.tar.gz", hash = "sha256:40356d348ef4838324f19a41cd80340b4f8dd4ac2f0a18a4cbd5cc95ef2974f3"}, +] + +[package.dependencies] +azure-common = ">=1.1" +azure-mgmt-core = ">=1.3.2" +isodate = ">=0.6.1" +typing-extensions = ">=4.6.0" + +[[package]] +name = "azure-mgmt-rdbms" +version = "10.1.0" +description = "Microsoft Azure RDBMS Management Client Library for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "azure-mgmt-rdbms-10.1.0.zip", hash = "sha256:a87d401c876c84734cdd4888af551e4a1461b4b328d9816af60cb8ac5979f035"}, + {file = "azure_mgmt_rdbms-10.1.0-py3-none-any.whl", hash = "sha256:8eac17d1341a91d7ed914435941ba917b5ef1568acabc3e65653603966a7cc88"}, +] + +[package.dependencies] +azure-common = ">=1.1,<2.0" +azure-mgmt-core = ">=1.3.0,<2.0.0" +msrest = ">=0.6.21" + +[[package]] +name = "azure-mgmt-resource" +version = "23.2.0" +description = "Microsoft Azure Resource Management Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_mgmt_resource-23.2.0-py3-none-any.whl", hash = "sha256:7af2bca928ecd58e57ea7f7731d245f45e9d927036d82f1d30b96baa0c26b569"}, + {file = "azure_mgmt_resource-23.2.0.tar.gz", hash = "sha256:747b750df7af23ab30e53d3f36247ab0c16de1e267d666b1a5077c39a4292529"}, +] + +[package.dependencies] +azure-common = ">=1.1" +azure-mgmt-core = ">=1.3.2" +isodate = ">=0.6.1" +typing-extensions = ">=4.6.0" + +[[package]] +name = "azure-mgmt-security" +version = "7.0.0" +description = "Microsoft Azure Security Center Management Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure-mgmt-security-7.0.0.tar.gz", hash = "sha256:5912eed7e9d3758fdca8d26e1dc26b41943dc4703208a1184266e2c252e1ad66"}, + {file = "azure_mgmt_security-7.0.0-py3-none-any.whl", hash = "sha256:85a6d8b7a5cd74884a548ed53fed034449f54a9989edd64e9020c5837db96933"}, +] + +[package.dependencies] +azure-common = ">=1.1" +azure-mgmt-core = ">=1.3.2" +isodate = ">=0.6.1" + +[[package]] +name = "azure-mgmt-sql" +version = "3.0.1" +description = "Microsoft Azure SQL Management Client Library for Python" +optional = false +python-versions = "*" +files = [ + {file = "azure-mgmt-sql-3.0.1.zip", hash = "sha256:129042cc011225e27aee6ef2697d585fa5722e5d1aeb0038af6ad2451a285457"}, + {file = "azure_mgmt_sql-3.0.1-py2.py3-none-any.whl", hash = "sha256:1d1dd940d4d41be4ee319aad626341251572a5bf4a2addec71779432d9a1381f"}, +] + +[package.dependencies] +azure-common = ">=1.1,<2.0" +azure-mgmt-core = ">=1.2.0,<2.0.0" +msrest = ">=0.6.21" + +[[package]] +name = "azure-mgmt-storage" +version = "21.2.1" +description = "Microsoft Azure Storage Management Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure-mgmt-storage-21.2.1.tar.gz", hash = "sha256:503a7ff9c31254092b0656445f5728bfdfda2d09d46a82e97019eaa9a1ecec64"}, + {file = "azure_mgmt_storage-21.2.1-py3-none-any.whl", hash = "sha256:f97df1fa39cde9dbacf2cd96c9cba1fc196932185e24853e276f74b18a0bd031"}, +] + +[package.dependencies] +azure-common = ">=1.1" +azure-mgmt-core = ">=1.3.2" +isodate = ">=0.6.1" + +[[package]] +name = "azure-mgmt-subscription" +version = "3.1.1" +description = "Microsoft Azure Subscription Management Client Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "azure-mgmt-subscription-3.1.1.zip", hash = "sha256:4e255b4ce9b924357bb8c5009b3c88a2014d3203b2495e2256fa027bf84e800e"}, + {file = "azure_mgmt_subscription-3.1.1-py3-none-any.whl", hash = "sha256:38d4574a8d47fa17e3587d756e296cb63b82ad8fb21cd8543bcee443a502bf48"}, +] + +[package.dependencies] +azure-common = ">=1.1,<2.0" +azure-mgmt-core = ">=1.3.2,<2.0.0" +msrest = ">=0.7.1" + +[[package]] +name = "azure-mgmt-web" +version = "7.3.1" +description = "Microsoft Azure Web Apps Management Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure-mgmt-web-7.3.1.tar.gz", hash = "sha256:87b771436bc99a7a8df59d0ad185b96879a06dce14764a06b3fc3dafa8fcb56b"}, + {file = "azure_mgmt_web-7.3.1-py3-none-any.whl", hash = "sha256:ccf881e3ab31c3fdbf9cbff32773d9c0006b5dcd621ea074d7ec89e51049fb72"}, +] + +[package.dependencies] +azure-common = ">=1.1" +azure-mgmt-core = ">=1.3.2" +isodate = ">=0.6.1" +typing-extensions = ">=4.6.0" + +[[package]] +name = "azure-storage-blob" +version = "12.24.0" +description = "Microsoft Azure Blob Storage Client Library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "azure_storage_blob-12.24.0-py3-none-any.whl", hash = "sha256:4f0bb4592ea79a2d986063696514c781c9e62be240f09f6397986e01755bc071"}, + {file = "azure_storage_blob-12.24.0.tar.gz", hash = "sha256:eaaaa1507c8c363d6e1d1342bd549938fdf1adec9b1ada8658c8f5bf3aea844e"}, +] + +[package.dependencies] +azure-core = ">=1.30.0" +cryptography = ">=2.1.4" +isodate = ">=0.6.1" +typing-extensions = ">=4.6.0" + +[package.extras] +aio = ["azure-core[aio] (>=1.30.0)"] + +[[package]] +name = "bandit" +version = "1.7.9" +description = "Security oriented static analyser for python code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "bandit-1.7.9-py3-none-any.whl", hash = "sha256:52077cb339000f337fb25f7e045995c4ad01511e716e5daac37014b9752de8ec"}, + {file = "bandit-1.7.9.tar.gz", hash = "sha256:7c395a436743018f7be0a4cbb0a4ea9b902b6d87264ddecf8cfdc73b4f78ff61"}, +] + +[package.dependencies] +colorama = {version = ">=0.3.9", markers = "platform_system == \"Windows\""} +PyYAML = ">=5.3.1" +rich = "*" +stevedore = ">=1.20.0" + +[package.extras] +baseline = ["GitPython (>=3.1.30)"] +sarif = ["jschema-to-python (>=1.2.3)", "sarif-om (>=1.0.4)"] +test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)"] +toml = ["tomli (>=1.1.0)"] +yaml = ["PyYAML"] + +[[package]] +name = "billiard" +version = "4.2.1" +description = "Python multiprocessing fork with improvements and bugfixes" +optional = false +python-versions = ">=3.7" +files = [ + {file = "billiard-4.2.1-py3-none-any.whl", hash = "sha256:40b59a4ac8806ba2c2369ea98d876bc6108b051c227baffd928c644d15d8f3cb"}, + {file = "billiard-4.2.1.tar.gz", hash = "sha256:12b641b0c539073fc8d3f5b8b7be998956665c4233c7c1fcd66a7e677c4fb36f"}, +] + +[[package]] +name = "blinker" +version = "1.8.2" +description = "Fast, simple object-to-object and broadcast signaling" +optional = false +python-versions = ">=3.8" +files = [ + {file = "blinker-1.8.2-py3-none-any.whl", hash = "sha256:1779309f71bf239144b9399d06ae925637cf6634cf6bd131104184531bf67c01"}, + {file = "blinker-1.8.2.tar.gz", hash = "sha256:8f77b09d3bf7c795e969e9486f39c2c5e9c39d4ee07424be2bc594ece9642d83"}, +] + +[[package]] +name = "boto3" +version = "1.35.60" +description = "The AWS SDK for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "boto3-1.35.60-py3-none-any.whl", hash = "sha256:a34d28de1a1f6ca6ec3edd05c26db16e422293d8f9dcd94f308059a434596753"}, + {file = "boto3-1.35.60.tar.gz", hash = "sha256:e573504c67c3e438fd4b0222119ed1a73b644c78eb3b6dee0b36a6c70ecf7677"}, +] + +[package.dependencies] +botocore = ">=1.35.60,<1.36.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.10.0,<0.11.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "botocore" +version = "1.35.60" +description = "Low-level, data-driven core of boto 3." +optional = false +python-versions = ">=3.8" +files = [ + {file = "botocore-1.35.60-py3-none-any.whl", hash = "sha256:ddccfc39a0a55ac0321191a36d29c2ea9be2c96ceefb3928dd3c91c79c494d50"}, + {file = "botocore-1.35.60.tar.gz", hash = "sha256:378f53037d817bed2c04a006b7319745e664030182211429c924647273b29bc9"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} + +[package.extras] +crt = ["awscrt (==0.22.0)"] + +[[package]] +name = "cachetools" +version = "5.5.0" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, + {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, +] + +[[package]] +name = "celery" +version = "5.4.0" +description = "Distributed Task Queue." +optional = false +python-versions = ">=3.8" +files = [ + {file = "celery-5.4.0-py3-none-any.whl", hash = "sha256:369631eb580cf8c51a82721ec538684994f8277637edde2dfc0dacd73ed97f64"}, + {file = "celery-5.4.0.tar.gz", hash = "sha256:504a19140e8d3029d5acad88330c541d4c3f64c789d85f94756762d8bca7e706"}, +] + +[package.dependencies] +billiard = ">=4.2.0,<5.0" +click = ">=8.1.2,<9.0" +click-didyoumean = ">=0.3.0" +click-plugins = ">=1.1.1" +click-repl = ">=0.2.0" +kombu = ">=5.3.4,<6.0" +pytest-celery = {version = ">=1.0.0", extras = ["all"], optional = true, markers = "extra == \"pytest\""} +python-dateutil = ">=2.8.2" +tzdata = ">=2022.7" +vine = ">=5.1.0,<6.0" + +[package.extras] +arangodb = ["pyArango (>=2.0.2)"] +auth = ["cryptography (==42.0.5)"] +azureblockblob = ["azure-storage-blob (>=12.15.0)"] +brotli = ["brotli (>=1.0.0)", "brotlipy (>=0.7.0)"] +cassandra = ["cassandra-driver (>=3.25.0,<4)"] +consul = ["python-consul2 (==0.1.5)"] +cosmosdbsql = ["pydocumentdb (==2.3.5)"] +couchbase = ["couchbase (>=3.0.0)"] +couchdb = ["pycouchdb (==1.14.2)"] +django = ["Django (>=2.2.28)"] +dynamodb = ["boto3 (>=1.26.143)"] +elasticsearch = ["elastic-transport (<=8.13.0)", "elasticsearch (<=8.13.0)"] +eventlet = ["eventlet (>=0.32.0)"] +gcs = ["google-cloud-storage (>=2.10.0)"] +gevent = ["gevent (>=1.5.0)"] +librabbitmq = ["librabbitmq (>=2.0.0)"] +memcache = ["pylibmc (==1.6.3)"] +mongodb = ["pymongo[srv] (>=4.0.2)"] +msgpack = ["msgpack (==1.0.8)"] +pymemcache = ["python-memcached (>=1.61)"] +pyro = ["pyro4 (==4.82)"] +pytest = ["pytest-celery[all] (>=1.0.0)"] +redis = ["redis (>=4.5.2,!=4.5.5,<6.0.0)"] +s3 = ["boto3 (>=1.26.143)"] +slmq = ["softlayer-messaging (>=1.0.3)"] +solar = ["ephem (==4.1.5)"] +sqlalchemy = ["sqlalchemy (>=1.4.48,<2.1)"] +sqs = ["boto3 (>=1.26.143)", "kombu[sqs] (>=5.3.4)", "pycurl (>=7.43.0.5)", "urllib3 (>=1.26.16)"] +tblib = ["tblib (>=1.3.0)", "tblib (>=1.5.0)"] +yaml = ["PyYAML (>=3.10)"] +zookeeper = ["kazoo (>=1.3.1)"] +zstd = ["zstandard (==0.22.0)"] + +[[package]] +name = "certifi" +version = "2024.8.30" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "click-didyoumean" +version = "0.3.1" +description = "Enables git-like *did-you-mean* feature in click" +optional = false +python-versions = ">=3.6.2" +files = [ + {file = "click_didyoumean-0.3.1-py3-none-any.whl", hash = "sha256:5c4bb6007cfea5f2fd6583a2fb6701a22a41eb98957e63d0fac41c10e7c3117c"}, + {file = "click_didyoumean-0.3.1.tar.gz", hash = "sha256:4f82fdff0dbe64ef8ab2279bd6aa3f6a99c3b28c05aa09cbfc07c9d7fbb5a463"}, +] + +[package.dependencies] +click = ">=7" + +[[package]] +name = "click-plugins" +version = "1.1.1" +description = "An extension module for click to enable registering CLI commands via setuptools entry-points." +optional = false +python-versions = "*" +files = [ + {file = "click-plugins-1.1.1.tar.gz", hash = "sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b"}, + {file = "click_plugins-1.1.1-py2.py3-none-any.whl", hash = "sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8"}, +] + +[package.dependencies] +click = ">=4.0" + +[package.extras] +dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"] + +[[package]] +name = "click-repl" +version = "0.3.0" +description = "REPL plugin for Click" +optional = false +python-versions = ">=3.6" +files = [ + {file = "click-repl-0.3.0.tar.gz", hash = "sha256:17849c23dba3d667247dc4defe1757fff98694e90fe37474f3feebb69ced26a9"}, + {file = "click_repl-0.3.0-py3-none-any.whl", hash = "sha256:fb7e06deb8da8de86180a33a9da97ac316751c094c6899382da7feeeeb51b812"}, +] + +[package.dependencies] +click = ">=7.0" +prompt-toolkit = ">=3.0.36" + +[package.extras] +testing = ["pytest (>=7.2.1)", "pytest-cov (>=4.0.0)", "tox (>=4.4.3)"] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "coverage" +version = "7.5.4" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "coverage-7.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6cfb5a4f556bb51aba274588200a46e4dd6b505fb1a5f8c5ae408222eb416f99"}, + {file = "coverage-7.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2174e7c23e0a454ffe12267a10732c273243b4f2d50d07544a91198f05c48f47"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2214ee920787d85db1b6a0bd9da5f8503ccc8fcd5814d90796c2f2493a2f4d2e"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1137f46adb28e3813dec8c01fefadcb8c614f33576f672962e323b5128d9a68d"}, + {file = "coverage-7.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b385d49609f8e9efc885790a5a0e89f2e3ae042cdf12958b6034cc442de428d3"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b4a474f799456e0eb46d78ab07303286a84a3140e9700b9e154cfebc8f527016"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5cd64adedf3be66f8ccee418473c2916492d53cbafbfcff851cbec5a8454b136"}, + {file = "coverage-7.5.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e564c2cf45d2f44a9da56f4e3a26b2236504a496eb4cb0ca7221cd4cc7a9aca9"}, + {file = "coverage-7.5.4-cp310-cp310-win32.whl", hash = "sha256:7076b4b3a5f6d2b5d7f1185fde25b1e54eb66e647a1dfef0e2c2bfaf9b4c88c8"}, + {file = "coverage-7.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:018a12985185038a5b2bcafab04ab833a9a0f2c59995b3cec07e10074c78635f"}, + {file = "coverage-7.5.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:db14f552ac38f10758ad14dd7b983dbab424e731588d300c7db25b6f89e335b5"}, + {file = "coverage-7.5.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3257fdd8e574805f27bb5342b77bc65578e98cbc004a92232106344053f319ba"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a6612c99081d8d6134005b1354191e103ec9705d7ba2754e848211ac8cacc6b"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d45d3cbd94159c468b9b8c5a556e3f6b81a8d1af2a92b77320e887c3e7a5d080"}, + {file = "coverage-7.5.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed550e7442f278af76d9d65af48069f1fb84c9f745ae249c1a183c1e9d1b025c"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7a892be37ca35eb5019ec85402c3371b0f7cda5ab5056023a7f13da0961e60da"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8192794d120167e2a64721d88dbd688584675e86e15d0569599257566dec9bf0"}, + {file = "coverage-7.5.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:820bc841faa502e727a48311948e0461132a9c8baa42f6b2b84a29ced24cc078"}, + {file = "coverage-7.5.4-cp311-cp311-win32.whl", hash = "sha256:6aae5cce399a0f065da65c7bb1e8abd5c7a3043da9dceb429ebe1b289bc07806"}, + {file = "coverage-7.5.4-cp311-cp311-win_amd64.whl", hash = "sha256:d2e344d6adc8ef81c5a233d3a57b3c7d5181f40e79e05e1c143da143ccb6377d"}, + {file = "coverage-7.5.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:54317c2b806354cbb2dc7ac27e2b93f97096912cc16b18289c5d4e44fc663233"}, + {file = "coverage-7.5.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:042183de01f8b6d531e10c197f7f0315a61e8d805ab29c5f7b51a01d62782747"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6bb74ed465d5fb204b2ec41d79bcd28afccf817de721e8a807d5141c3426638"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3d45ff86efb129c599a3b287ae2e44c1e281ae0f9a9bad0edc202179bcc3a2e"}, + {file = "coverage-7.5.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5013ed890dc917cef2c9f765c4c6a8ae9df983cd60dbb635df8ed9f4ebc9f555"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1014fbf665fef86cdfd6cb5b7371496ce35e4d2a00cda501cf9f5b9e6fced69f"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3684bc2ff328f935981847082ba4fdc950d58906a40eafa93510d1b54c08a66c"}, + {file = "coverage-7.5.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:581ea96f92bf71a5ec0974001f900db495488434a6928a2ca7f01eee20c23805"}, + {file = "coverage-7.5.4-cp312-cp312-win32.whl", hash = "sha256:73ca8fbc5bc622e54627314c1a6f1dfdd8db69788f3443e752c215f29fa87a0b"}, + {file = "coverage-7.5.4-cp312-cp312-win_amd64.whl", hash = "sha256:cef4649ec906ea7ea5e9e796e68b987f83fa9a718514fe147f538cfeda76d7a7"}, + {file = "coverage-7.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdd31315fc20868c194130de9ee6bfd99755cc9565edff98ecc12585b90be882"}, + {file = "coverage-7.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:02ff6e898197cc1e9fa375581382b72498eb2e6d5fc0b53f03e496cfee3fac6d"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d05c16cf4b4c2fc880cb12ba4c9b526e9e5d5bb1d81313d4d732a5b9fe2b9d53"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5986ee7ea0795a4095ac4d113cbb3448601efca7f158ec7f7087a6c705304e4"}, + {file = "coverage-7.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df54843b88901fdc2f598ac06737f03d71168fd1175728054c8f5a2739ac3e4"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ab73b35e8d109bffbda9a3e91c64e29fe26e03e49addf5b43d85fc426dde11f9"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:aea072a941b033813f5e4814541fc265a5c12ed9720daef11ca516aeacd3bd7f"}, + {file = "coverage-7.5.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:16852febd96acd953b0d55fc842ce2dac1710f26729b31c80b940b9afcd9896f"}, + {file = "coverage-7.5.4-cp38-cp38-win32.whl", hash = "sha256:8f894208794b164e6bd4bba61fc98bf6b06be4d390cf2daacfa6eca0a6d2bb4f"}, + {file = "coverage-7.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:e2afe743289273209c992075a5a4913e8d007d569a406ffed0bd080ea02b0633"}, + {file = "coverage-7.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b95c3a8cb0463ba9f77383d0fa8c9194cf91f64445a63fc26fb2327e1e1eb088"}, + {file = "coverage-7.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3d7564cc09dd91b5a6001754a5b3c6ecc4aba6323baf33a12bd751036c998be4"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44da56a2589b684813f86d07597fdf8a9c6ce77f58976727329272f5a01f99f7"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e16f3d6b491c48c5ae726308e6ab1e18ee830b4cdd6913f2d7f77354b33f91c8"}, + {file = "coverage-7.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbc5958cb471e5a5af41b0ddaea96a37e74ed289535e8deca404811f6cb0bc3d"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a04e990a2a41740b02d6182b498ee9796cf60eefe40cf859b016650147908029"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ddbd2f9713a79e8e7242d7c51f1929611e991d855f414ca9996c20e44a895f7c"}, + {file = "coverage-7.5.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b1ccf5e728ccf83acd313c89f07c22d70d6c375a9c6f339233dcf792094bcbf7"}, + {file = "coverage-7.5.4-cp39-cp39-win32.whl", hash = "sha256:56b4eafa21c6c175b3ede004ca12c653a88b6f922494b023aeb1e836df953ace"}, + {file = "coverage-7.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:65e528e2e921ba8fd67d9055e6b9f9e34b21ebd6768ae1c1723f4ea6ace1234d"}, + {file = "coverage-7.5.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:79b356f3dd5b26f3ad23b35c75dbdaf1f9e2450b6bcefc6d0825ea0aa3f86ca5"}, + {file = "coverage-7.5.4.tar.gz", hash = "sha256:a44963520b069e12789d0faea4e9fdb1e410cdc4aab89d94f7f55cbb7fef0353"}, +] + +[package.extras] +toml = ["tomli"] + +[[package]] +name = "cron-descriptor" +version = "1.4.5" +description = "A Python library that converts cron expressions into human readable strings." +optional = false +python-versions = "*" +files = [ + {file = "cron_descriptor-1.4.5-py3-none-any.whl", hash = "sha256:736b3ae9d1a99bc3dbfc5b55b5e6e7c12031e7ba5de716625772f8b02dcd6013"}, + {file = "cron_descriptor-1.4.5.tar.gz", hash = "sha256:f51ce4ffc1d1f2816939add8524f206c376a42c87a5fca3091ce26725b3b1bca"}, +] + +[package.extras] +dev = ["polib"] + +[[package]] +name = "cryptography" +version = "43.0.1" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, + {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, + {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, + {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, + {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, + {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, + {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "dash" +version = "2.18.2" +description = "A Python framework for building reactive web-apps. Developed by Plotly." +optional = false +python-versions = ">=3.8" +files = [ + {file = "dash-2.18.2-py3-none-any.whl", hash = "sha256:0ce0479d1bc958e934630e2de7023b8a4558f23ce1f9f5a4b34b65eb3903a869"}, + {file = "dash-2.18.2.tar.gz", hash = "sha256:20e8404f73d0fe88ce2eae33c25bbc513cbe52f30d23a401fa5f24dbb44296c8"}, +] + +[package.dependencies] +dash-core-components = "2.0.0" +dash-html-components = "2.0.0" +dash-table = "5.0.0" +Flask = ">=1.0.4,<3.1" +importlib-metadata = "*" +nest-asyncio = "*" +plotly = ">=5.0.0" +requests = "*" +retrying = "*" +setuptools = "*" +typing-extensions = ">=4.1.1" +Werkzeug = "<3.1" + +[package.extras] +celery = ["celery[redis] (>=5.1.2)", "redis (>=3.5.3)"] +ci = ["black (==22.3.0)", "dash-dangerously-set-inner-html", "dash-flow-example (==0.0.5)", "flake8 (==7.0.0)", "flaky (==3.8.1)", "flask-talisman (==1.0.0)", "jupyterlab (<4.0.0)", "mimesis (<=11.1.0)", "mock (==4.0.3)", "numpy (<=1.26.3)", "openpyxl", "orjson (==3.10.3)", "pandas (>=1.4.0)", "pyarrow", "pylint (==3.0.3)", "pytest-mock", "pytest-rerunfailures", "pytest-sugar (==0.9.6)", "pyzmq (==25.1.2)", "xlrd (>=2.0.1)"] +compress = ["flask-compress"] +dev = ["PyYAML (>=5.4.1)", "coloredlogs (>=15.0.1)", "fire (>=0.4.0)"] +diskcache = ["diskcache (>=5.2.1)", "multiprocess (>=0.70.12)", "psutil (>=5.8.0)"] +testing = ["beautifulsoup4 (>=4.8.2)", "cryptography", "dash-testing-stub (>=0.0.2)", "lxml (>=4.6.2)", "multiprocess (>=0.70.12)", "percy (>=2.0.2)", "psutil (>=5.8.0)", "pytest (>=6.0.2)", "requests[security] (>=2.21.0)", "selenium (>=3.141.0,<=4.2.0)", "waitress (>=1.4.4)"] + +[[package]] +name = "dash-bootstrap-components" +version = "1.6.0" +description = "Bootstrap themed components for use in Plotly Dash" +optional = false +python-versions = "<4,>=3.8" +files = [ + {file = "dash_bootstrap_components-1.6.0-py3-none-any.whl", hash = "sha256:97f0f47b38363f18863e1b247462229266ce12e1e171cfb34d3c9898e6e5cd1e"}, + {file = "dash_bootstrap_components-1.6.0.tar.gz", hash = "sha256:960a1ec9397574792f49a8241024fa3cecde0f5930c971a3fc81f016cbeb1095"}, +] + +[package.dependencies] +dash = ">=2.0.0" + +[package.extras] +pandas = ["numpy", "pandas"] + +[[package]] +name = "dash-core-components" +version = "2.0.0" +description = "Core component suite for Dash" +optional = false +python-versions = "*" +files = [ + {file = "dash_core_components-2.0.0-py3-none-any.whl", hash = "sha256:52b8e8cce13b18d0802ee3acbc5e888cb1248a04968f962d63d070400af2e346"}, + {file = "dash_core_components-2.0.0.tar.gz", hash = "sha256:c6733874af975e552f95a1398a16c2ee7df14ce43fa60bb3718a3c6e0b63ffee"}, +] + +[[package]] +name = "dash-html-components" +version = "2.0.0" +description = "Vanilla HTML components for Dash" +optional = false +python-versions = "*" +files = [ + {file = "dash_html_components-2.0.0-py3-none-any.whl", hash = "sha256:b42cc903713c9706af03b3f2548bda4be7307a7cf89b7d6eae3da872717d1b63"}, + {file = "dash_html_components-2.0.0.tar.gz", hash = "sha256:8703a601080f02619a6390998e0b3da4a5daabe97a1fd7a9cebc09d015f26e50"}, +] + +[[package]] +name = "dash-table" +version = "5.0.0" +description = "Dash table" +optional = false +python-versions = "*" +files = [ + {file = "dash_table-5.0.0-py3-none-any.whl", hash = "sha256:19036fa352bb1c11baf38068ec62d172f0515f73ca3276c79dee49b95ddc16c9"}, + {file = "dash_table-5.0.0.tar.gz", hash = "sha256:18624d693d4c8ef2ddec99a6f167593437a7ea0bf153aa20f318c170c5bc7308"}, +] + +[[package]] +name = "debugpy" +version = "1.8.6" +description = "An implementation of the Debug Adapter Protocol for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "debugpy-1.8.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:30f467c5345d9dfdcc0afdb10e018e47f092e383447500f125b4e013236bf14b"}, + {file = "debugpy-1.8.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d73d8c52614432f4215d0fe79a7e595d0dd162b5c15233762565be2f014803b"}, + {file = "debugpy-1.8.6-cp310-cp310-win32.whl", hash = "sha256:e3e182cd98eac20ee23a00653503315085b29ab44ed66269482349d307b08df9"}, + {file = "debugpy-1.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:e3a82da039cfe717b6fb1886cbbe5c4a3f15d7df4765af857f4307585121c2dd"}, + {file = "debugpy-1.8.6-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:67479a94cf5fd2c2d88f9615e087fcb4fec169ec780464a3f2ba4a9a2bb79955"}, + {file = "debugpy-1.8.6-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb8653f6cbf1dd0a305ac1aa66ec246002145074ea57933978346ea5afdf70b"}, + {file = "debugpy-1.8.6-cp311-cp311-win32.whl", hash = "sha256:cdaf0b9691879da2d13fa39b61c01887c34558d1ff6e5c30e2eb698f5384cd43"}, + {file = "debugpy-1.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:43996632bee7435583952155c06881074b9a742a86cee74e701d87ca532fe833"}, + {file = "debugpy-1.8.6-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:db891b141fc6ee4b5fc6d1cc8035ec329cabc64bdd2ae672b4550c87d4ecb128"}, + {file = "debugpy-1.8.6-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:567419081ff67da766c898ccf21e79f1adad0e321381b0dfc7a9c8f7a9347972"}, + {file = "debugpy-1.8.6-cp312-cp312-win32.whl", hash = "sha256:c9834dfd701a1f6bf0f7f0b8b1573970ae99ebbeee68314116e0ccc5c78eea3c"}, + {file = "debugpy-1.8.6-cp312-cp312-win_amd64.whl", hash = "sha256:e4ce0570aa4aca87137890d23b86faeadf184924ad892d20c54237bcaab75d8f"}, + {file = "debugpy-1.8.6-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:df5dc9eb4ca050273b8e374a4cd967c43be1327eeb42bfe2f58b3cdfe7c68dcb"}, + {file = "debugpy-1.8.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a85707c6a84b0c5b3db92a2df685b5230dd8fb8c108298ba4f11dba157a615a"}, + {file = "debugpy-1.8.6-cp38-cp38-win32.whl", hash = "sha256:538c6cdcdcdad310bbefd96d7850be1cd46e703079cc9e67d42a9ca776cdc8a8"}, + {file = "debugpy-1.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:22140bc02c66cda6053b6eb56dfe01bbe22a4447846581ba1dd6df2c9f97982d"}, + {file = "debugpy-1.8.6-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:c1cef65cffbc96e7b392d9178dbfd524ab0750da6c0023c027ddcac968fd1caa"}, + {file = "debugpy-1.8.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1e60bd06bb3cc5c0e957df748d1fab501e01416c43a7bdc756d2a992ea1b881"}, + {file = "debugpy-1.8.6-cp39-cp39-win32.whl", hash = "sha256:f7158252803d0752ed5398d291dee4c553bb12d14547c0e1843ab74ee9c31123"}, + {file = "debugpy-1.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:3358aa619a073b620cd0d51d8a6176590af24abcc3fe2e479929a154bf591b51"}, + {file = "debugpy-1.8.6-py2.py3-none-any.whl", hash = "sha256:b48892df4d810eff21d3ef37274f4c60d32cdcafc462ad5647239036b0f0649f"}, + {file = "debugpy-1.8.6.zip", hash = "sha256:c931a9371a86784cee25dec8d65bc2dc7a21f3f1552e3833d9ef8f919d22280a"}, +] + +[[package]] +name = "deprecated" +version = "1.2.14" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, + {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] + +[[package]] +name = "detect_secrets" +version = "1.5.0" +description = "Tool for detecting secrets in the codebase" +optional = false +python-versions = "*" +files = [] +develop = false + +[package.dependencies] +pyyaml = "*" +requests = "*" + +[package.extras] +gibberish = ["gibberish-detector"] +word-list = ["pyahocorasick"] + +[package.source] +type = "git" +url = "https://github.com/Yelp/detect-secrets.git" +reference = "master" +resolved_reference = "462720710ec337300fab2b4f2290949c7ee141eb" + +[[package]] +name = "dill" +version = "0.3.9" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, + {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + +[[package]] +name = "django" +version = "5.1.1" +description = "A high-level Python web framework that encourages rapid development and clean, pragmatic design." +optional = false +python-versions = ">=3.10" +files = [ + {file = "Django-5.1.1-py3-none-any.whl", hash = "sha256:71603f27dac22a6533fb38d83072eea9ddb4017fead6f67f2562a40402d61c3f"}, + {file = "Django-5.1.1.tar.gz", hash = "sha256:021ffb7fdab3d2d388bc8c7c2434eb9c1f6f4d09e6119010bbb1694dda286bc2"}, +] + +[package.dependencies] +asgiref = ">=3.8.1,<4" +sqlparse = ">=0.3.1" +tzdata = {version = "*", markers = "sys_platform == \"win32\""} + +[package.extras] +argon2 = ["argon2-cffi (>=19.1.0)"] +bcrypt = ["bcrypt"] + +[[package]] +name = "django-celery-beat" +version = "2.7.0" +description = "Database-backed Periodic Tasks." +optional = false +python-versions = ">=3.8" +files = [ + {file = "django_celery_beat-2.7.0-py3-none-any.whl", hash = "sha256:851c680d8fbf608ca5fecd5836622beea89fa017bc2b3f94a5b8c648c32d84b1"}, + {file = "django_celery_beat-2.7.0.tar.gz", hash = "sha256:8482034925e09b698c05ad61c36ed2a8dbc436724a3fe119215193a4ca6dc967"}, +] + +[package.dependencies] +celery = ">=5.2.3,<6.0" +cron-descriptor = ">=1.2.32" +Django = ">=2.2,<5.2" +django-timezone-field = ">=5.0" +python-crontab = ">=2.3.4" +tzdata = "*" + +[[package]] +name = "django-celery-results" +version = "2.5.1" +description = "Celery result backends for Django." +optional = false +python-versions = "*" +files = [ + {file = "django_celery_results-2.5.1-py3-none-any.whl", hash = "sha256:0da4cd5ecc049333e4524a23fcfc3460dfae91aa0a60f1fae4b6b2889c254e01"}, + {file = "django_celery_results-2.5.1.tar.gz", hash = "sha256:3ecb7147f773f34d0381bac6246337ce4cf88a2ea7b82774ed48e518b67bb8fd"}, +] + +[package.dependencies] +celery = ">=5.2.7,<6.0" +Django = ">=3.2.18" + +[[package]] +name = "django-cors-headers" +version = "4.4.0" +description = "django-cors-headers is a Django application for handling the server headers required for Cross-Origin Resource Sharing (CORS)." +optional = false +python-versions = ">=3.8" +files = [ + {file = "django_cors_headers-4.4.0-py3-none-any.whl", hash = "sha256:5c6e3b7fe870876a1efdfeb4f433782c3524078fa0dc9e0195f6706ce7a242f6"}, + {file = "django_cors_headers-4.4.0.tar.gz", hash = "sha256:92cf4633e22af67a230a1456cb1b7a02bb213d6536d2dcb2a4a24092ea9cebc2"}, +] + +[package.dependencies] +asgiref = ">=3.6" +django = ">=3.2" + +[[package]] +name = "django-environ" +version = "0.11.2" +description = "A package that allows you to utilize 12factor inspired environment variables to configure your Django application." +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "django-environ-0.11.2.tar.gz", hash = "sha256:f32a87aa0899894c27d4e1776fa6b477e8164ed7f6b3e410a62a6d72caaf64be"}, + {file = "django_environ-0.11.2-py2.py3-none-any.whl", hash = "sha256:0ff95ab4344bfeff693836aa978e6840abef2e2f1145adff7735892711590c05"}, +] + +[package.extras] +develop = ["coverage[toml] (>=5.0a4)", "furo (>=2021.8.17b43,<2021.9.dev0)", "pytest (>=4.6.11)", "sphinx (>=3.5.0)", "sphinx-notfound-page"] +docs = ["furo (>=2021.8.17b43,<2021.9.dev0)", "sphinx (>=3.5.0)", "sphinx-notfound-page"] +testing = ["coverage[toml] (>=5.0a4)", "pytest (>=4.6.11)"] + +[[package]] +name = "django-filter" +version = "24.3" +description = "Django-filter is a reusable Django application for allowing users to filter querysets dynamically." +optional = false +python-versions = ">=3.8" +files = [ + {file = "django_filter-24.3-py3-none-any.whl", hash = "sha256:c4852822928ce17fb699bcfccd644b3574f1a2d80aeb2b4ff4f16b02dd49dc64"}, + {file = "django_filter-24.3.tar.gz", hash = "sha256:d8ccaf6732afd21ca0542f6733b11591030fa98669f8d15599b358e24a2cd9c3"}, +] + +[package.dependencies] +Django = ">=4.2" + +[[package]] +name = "django-guid" +version = "3.5.0" +description = "Middleware that enables single request-response cycle tracing by injecting a unique ID into project logs" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "django_guid-3.5.0-py3-none-any.whl", hash = "sha256:28f52cfeac47e8e22ea889a3845bc2b1c604dd842e495dadd44ad5184db72c76"}, + {file = "django_guid-3.5.0.tar.gz", hash = "sha256:5f32f70287e4f36addc79f29f2a7b2f56fc5f4e4cfb2023141525be8baa35d9e"}, +] + +[package.dependencies] +django = {version = ">=4.0,<6.0", markers = "python_version >= \"3.10\""} + +[[package]] +name = "django-postgres-extra" +version = "2.0.9rc11" +description = "Bringing all of PostgreSQL's awesomeness to Django." +optional = false +python-versions = ">=3.6" +files = [ + {file = "django_postgres_extra-2.0.9rc11-py3-none-any.whl", hash = "sha256:23fb08261963bebf6560a2bb248b2c404f9c5f650734472ff074977952efbd56"}, + {file = "django_postgres_extra-2.0.9rc11.tar.gz", hash = "sha256:a7738125c84d133d1dbbdeab66dd14e897e01284b5e1e02e588c84891c8b8ded"}, +] + +[package.dependencies] +Django = ">=2.0,<6.0" +python-dateutil = ">=2.8.0,<=3.0.0" + +[package.extras] +analysis = ["autoflake (==1.4)", "autopep8 (==1.6.0)", "black (==22.3.0)", "django-stubs (==1.16.0)", "django-stubs (==1.9.0)", "docformatter (==1.4)", "flake8 (==4.0.1)", "isort (==5.10.0)", "mypy (==0.971)", "mypy (==1.2.0)", "types-dj-database-url (==1.3.0.0)", "types-psycopg2 (==2.9.21.9)", "types-python-dateutil (==2.8.19.12)", "typing-extensions (==4.1.0)", "typing-extensions (==4.5.0)"] +docs = ["Sphinx (==2.2.0)", "docutils (<0.18)", "sphinx-rtd-theme (==0.4.3)"] +publish = ["build (==0.7.0)", "twine (==3.7.1)"] +test = ["coveralls (==3.3.0)", "dj-database-url (==0.5.0)", "freezegun (==1.1.0)", "psycopg2 (>=2.8.4,<3.0.0)", "pytest (==6.2.5)", "pytest-benchmark (==3.4.1)", "pytest-cov (==3.0.0)", "pytest-django (==4.4.0)", "pytest-freezegun (==0.4.2)", "pytest-lazy-fixture (==0.6.3)", "snapshottest (==0.6.0)", "tox (==3.24.4)"] + +[[package]] +name = "django-timezone-field" +version = "7.0" +description = "A Django app providing DB, form, and REST framework fields for zoneinfo and pytz timezone objects." +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "django_timezone_field-7.0-py3-none-any.whl", hash = "sha256:3232e7ecde66ba4464abb6f9e6b8cc739b914efb9b29dc2cf2eee451f7cc2acb"}, + {file = "django_timezone_field-7.0.tar.gz", hash = "sha256:aa6f4965838484317b7f08d22c0d91a53d64e7bbbd34264468ae83d4023898a7"}, +] + +[package.dependencies] +Django = ">=3.2,<6.0" + +[[package]] +name = "djangorestframework" +version = "3.15.2" +description = "Web APIs for Django, made easy." +optional = false +python-versions = ">=3.8" +files = [ + {file = "djangorestframework-3.15.2-py3-none-any.whl", hash = "sha256:2b8871b062ba1aefc2de01f773875441a961fefbf79f5eed1e32b2f096944b20"}, + {file = "djangorestframework-3.15.2.tar.gz", hash = "sha256:36fe88cd2d6c6bec23dca9804bab2ba5517a8bb9d8f47ebc68981b56840107ad"}, +] + +[package.dependencies] +django = ">=4.2" + +[[package]] +name = "djangorestframework-jsonapi" +version = "7.0.2" +description = "A Django REST framework API adapter for the JSON:API spec." +optional = false +python-versions = ">=3.8" +files = [ + {file = "djangorestframework-jsonapi-7.0.2.tar.gz", hash = "sha256:d6c72a2bee539f1093dd86620e862af2d1a0e60408e38a710146286dbde71d75"}, + {file = "djangorestframework_jsonapi-7.0.2-py2.py3-none-any.whl", hash = "sha256:be457adb50aac77eec8893048bf46ad6926dcd26204aa10965a1430610828d50"}, +] + +[package.dependencies] +django = ">=4.2" +djangorestframework = ">=3.14" +inflection = ">=0.5.0" + +[package.extras] +django-filter = ["django-filter (>=2.4)"] +django-polymorphic = ["django-polymorphic (>=3.0)"] +openapi = ["pyyaml (>=5.4)", "uritemplate (>=3.0.1)"] + +[[package]] +name = "djangorestframework-simplejwt" +version = "5.3.1" +description = "A minimal JSON Web Token authentication plugin for Django REST Framework" +optional = false +python-versions = ">=3.8" +files = [ + {file = "djangorestframework_simplejwt-5.3.1-py3-none-any.whl", hash = "sha256:381bc966aa46913905629d472cd72ad45faa265509764e20ffd440164c88d220"}, + {file = "djangorestframework_simplejwt-5.3.1.tar.gz", hash = "sha256:6c4bd37537440bc439564ebf7d6085e74c5411485197073f508ebdfa34bc9fae"}, +] + +[package.dependencies] +django = ">=3.2" +djangorestframework = ">=3.12" +pyjwt = ">=1.7.1,<3" + +[package.extras] +crypto = ["cryptography (>=3.3.1)"] +dev = ["Sphinx (>=1.6.5,<2)", "cryptography", "flake8", "freezegun", "ipython", "isort", "pep8", "pytest", "pytest-cov", "pytest-django", "pytest-watch", "pytest-xdist", "python-jose (==3.3.0)", "sphinx_rtd_theme (>=0.1.9)", "tox", "twine", "wheel"] +doc = ["Sphinx (>=1.6.5,<2)", "sphinx_rtd_theme (>=0.1.9)"] +lint = ["flake8", "isort", "pep8"] +python-jose = ["python-jose (==3.3.0)"] +test = ["cryptography", "freezegun", "pytest", "pytest-cov", "pytest-django", "pytest-xdist", "tox"] + +[[package]] +name = "dnspython" +version = "2.6.1" +description = "DNS toolkit" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dnspython-2.6.1-py3-none-any.whl", hash = "sha256:5ef3b9680161f6fa89daf8ad451b5f1a33b18ae8a1c6778cdf4b43f08c0a6e50"}, + {file = "dnspython-2.6.1.tar.gz", hash = "sha256:e8f0f9c23a7b7cb99ded64e6c3a6f3e701d78f50c55e002b839dea7225cff7cc"}, +] + +[package.extras] +dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "sphinx (>=7.2.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"] +dnssec = ["cryptography (>=41)"] +doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"] +doq = ["aioquic (>=0.9.25)"] +idna = ["idna (>=3.6)"] +trio = ["trio (>=0.23)"] +wmi = ["wmi (>=1.5.1)"] + +[[package]] +name = "docker" +version = "7.1.0" +description = "A Python library for the Docker Engine API." +optional = false +python-versions = ">=3.8" +files = [ + {file = "docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0"}, + {file = "docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c"}, +] + +[package.dependencies] +pywin32 = {version = ">=304", markers = "sys_platform == \"win32\""} +requests = ">=2.26.0" +urllib3 = ">=1.26.0" + +[package.extras] +dev = ["coverage (==7.2.7)", "pytest (==7.4.2)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.1.0)", "ruff (==0.1.8)"] +docs = ["myst-parser (==0.18.0)", "sphinx (==5.1.1)"] +ssh = ["paramiko (>=2.4.3)"] +websockets = ["websocket-client (>=1.3.0)"] + +[[package]] +name = "dparse" +version = "0.6.4b0" +description = "A parser for Python dependency files" +optional = false +python-versions = ">=3.7" +files = [ + {file = "dparse-0.6.4b0-py3-none-any.whl", hash = "sha256:592ff183348b8a5ea0a18442a7965e29445d3a26063654ec2c7e8ef42cd5753c"}, + {file = "dparse-0.6.4b0.tar.gz", hash = "sha256:f8d49b41a527f3d16a269f854e6665245b325e50e41d2c213810cb984553e5c8"}, +] + +[package.dependencies] +packaging = "*" + +[package.extras] +all = ["dparse[conda]", "dparse[pipenv]", "dparse[poetry]"] +conda = ["pyyaml"] +pipenv = ["pipenv"] +poetry = ["poetry"] + +[[package]] +name = "drf-extensions" +version = "0.7.1" +description = "Extensions for Django REST Framework" +optional = false +python-versions = "*" +files = [ + {file = "drf-extensions-0.7.1.tar.gz", hash = "sha256:90abfc11a2221e8daf4cd54457e41ed38cd71134678de9622e806193db027db1"}, + {file = "drf_extensions-0.7.1-py2.py3-none-any.whl", hash = "sha256:007910437e64aa1d5ad6fc47266a4ac4280e31761e6458eb30fcac7494ac7d4e"}, +] + +[package.dependencies] +djangorestframework = ">=3.9.3" + +[[package]] +name = "drf-nested-routers" +version = "0.94.1" +description = "Nested resources for the Django Rest Framework" +optional = false +python-versions = ">=3.8" +files = [ + {file = "drf-nested-routers-0.94.1.tar.gz", hash = "sha256:2b846385ed95c9f17bf4242db3b264ac826b5af00dda6c737d3fe7cc7bf2c7db"}, + {file = "drf_nested_routers-0.94.1-py2.py3-none-any.whl", hash = "sha256:3a8ec45a025c0f39188ec1ec415244beb875a6f4db87911a1f5a606d09b68c9f"}, +] + +[package.dependencies] +Django = ">=4.2" +djangorestframework = ">=3.14.0" + +[[package]] +name = "drf-spectacular" +version = "0.27.2" +description = "Sane and flexible OpenAPI 3 schema generation for Django REST framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "drf-spectacular-0.27.2.tar.gz", hash = "sha256:a199492f2163c4101055075ebdbb037d59c6e0030692fc83a1a8c0fc65929981"}, + {file = "drf_spectacular-0.27.2-py3-none-any.whl", hash = "sha256:b1c04bf8b2fbbeaf6f59414b4ea448c8787aba4d32f76055c3b13335cf7ec37b"}, +] + +[package.dependencies] +Django = ">=2.2" +djangorestframework = ">=3.10.3" +inflection = ">=0.3.1" +jsonschema = ">=2.6.0" +PyYAML = ">=5.1" +uritemplate = ">=2.0.0" + +[package.extras] +offline = ["drf-spectacular-sidecar"] +sidecar = ["drf-spectacular-sidecar"] + +[[package]] +name = "drf-spectacular-jsonapi" +version = "0.5.1" +description = "open api 3 schema generator for drf-json-api package based on drf-spectacular package." +optional = false +python-versions = ">=3.7" +files = [ + {file = "drf-spectacular-jsonapi-0.5.1.tar.gz", hash = "sha256:e45f87f3cce2692f4f546e0785d8fcc32c6b49770fff858065c267ae8f9cfde5"}, + {file = "drf_spectacular_jsonapi-0.5.1-py3-none-any.whl", hash = "sha256:abac728abd83e2544408cc900d682d532ca2088f2f9321d1c9101bcdfdabca78"}, +] + +[package.dependencies] +Django = ">=3.2" +djangorestframework = ">=3.13" +djangorestframework-jsonapi = ">=6.0.0" +drf-extensions = ">=0.7.1" +drf-spectacular = ">=0.25.0" + +[[package]] +name = "durationpy" +version = "0.9" +description = "Module for converting between datetime.timedelta and Go's Duration strings." +optional = false +python-versions = "*" +files = [ + {file = "durationpy-0.9-py3-none-any.whl", hash = "sha256:e65359a7af5cedad07fb77a2dd3f390f8eb0b74cb845589fa6c057086834dd38"}, + {file = "durationpy-0.9.tar.gz", hash = "sha256:fd3feb0a69a0057d582ef643c355c40d2fa1c942191f914d12203b1a01ac722a"}, +] + +[[package]] +name = "email-validator" +version = "2.2.0" +description = "A robust email address syntax and deliverability validation library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631"}, + {file = "email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7"}, +] + +[package.dependencies] +dnspython = ">=2.0.0" +idna = ">=2.0.0" + +[[package]] +name = "execnet" +version = "2.1.1" +description = "execnet: rapid multi-Python deployment" +optional = false +python-versions = ">=3.8" +files = [ + {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"}, + {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"}, +] + +[package.extras] +testing = ["hatch", "pre-commit", "pytest", "tox"] + +[[package]] +name = "filelock" +version = "3.16.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] + +[[package]] +name = "flask" +version = "3.0.3" +description = "A simple framework for building complex web applications." +optional = false +python-versions = ">=3.8" +files = [ + {file = "flask-3.0.3-py3-none-any.whl", hash = "sha256:34e815dfaa43340d1d15a5c3a02b8476004037eb4840b34910c6e21679d288f3"}, + {file = "flask-3.0.3.tar.gz", hash = "sha256:ceb27b0af3823ea2737928a4d99d125a06175b8512c445cbd9a9ce200ef76842"}, +] + +[package.dependencies] +blinker = ">=1.6.2" +click = ">=8.1.3" +itsdangerous = ">=2.1.2" +Jinja2 = ">=3.1.2" +Werkzeug = ">=3.0.0" + +[package.extras] +async = ["asgiref (>=3.2)"] +dotenv = ["python-dotenv"] + +[[package]] +name = "freezegun" +version = "1.5.1" +description = "Let your Python tests travel through time" +optional = false +python-versions = ">=3.7" +files = [ + {file = "freezegun-1.5.1-py3-none-any.whl", hash = "sha256:bf111d7138a8abe55ab48a71755673dbaa4ab87f4cff5634a4442dfec34c15f1"}, + {file = "freezegun-1.5.1.tar.gz", hash = "sha256:b29dedfcda6d5e8e083ce71b2b542753ad48cfec44037b3fc79702e2980a89e9"}, +] + +[package.dependencies] +python-dateutil = ">=2.7" + +[[package]] +name = "frozenlist" +version = "1.4.1" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +files = [ + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, + {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, + {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, + {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, + {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, + {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, + {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, + {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, + {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, + {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, + {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, + {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, + {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, +] + +[[package]] +name = "google-api-core" +version = "2.20.0" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_api_core-2.20.0-py3-none-any.whl", hash = "sha256:ef0591ef03c30bb83f79b3d0575c3f31219001fc9c5cf37024d08310aeffed8a"}, + {file = "google_api_core-2.20.0.tar.gz", hash = "sha256:f74dff1889ba291a4b76c5079df0711810e2d9da81abfdc99957bc961c1eb28f"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + +[[package]] +name = "google-api-python-client" +version = "2.153.0" +description = "Google API Client Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_api_python_client-2.153.0-py2.py3-none-any.whl", hash = "sha256:6ff13bbfa92a57972e33ec3808e18309e5981b8ca1300e5da23bf2b4d6947384"}, + {file = "google_api_python_client-2.153.0.tar.gz", hash = "sha256:35cce8647f9c163fc04fb4d811fc91aae51954a2bdd74918decbe0e65d791dd2"}, +] + +[package.dependencies] +google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0.dev0" +google-auth = ">=1.32.0,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0.dev0" +google-auth-httplib2 = ">=0.2.0,<1.0.0" +httplib2 = ">=0.19.0,<1.dev0" +uritemplate = ">=3.0.1,<5" + +[[package]] +name = "google-auth" +version = "2.35.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_auth-2.35.0-py2.py3-none-any.whl", hash = "sha256:25df55f327ef021de8be50bad0dfd4a916ad0de96da86cd05661c9297723ad3f"}, + {file = "google_auth-2.35.0.tar.gz", hash = "sha256:f4c64ed4e01e8e8b646ef34c018f8bf3338df0c8e37d8b3bba40e7f574a3278a"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography", "pyopenssl"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "google-auth-httplib2" +version = "0.2.0" +description = "Google Authentication Library: httplib2 transport" +optional = false +python-versions = "*" +files = [ + {file = "google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05"}, + {file = "google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"}, +] + +[package.dependencies] +google-auth = "*" +httplib2 = ">=0.19.0" + +[[package]] +name = "googleapis-common-protos" +version = "1.65.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "googleapis_common_protos-1.65.0-py2.py3-none-any.whl", hash = "sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63"}, + {file = "googleapis_common_protos-1.65.0.tar.gz", hash = "sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0"}, +] + +[package.dependencies] +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + +[[package]] +name = "grapheme" +version = "0.6.0" +description = "Unicode grapheme helpers" +optional = false +python-versions = "*" +files = [ + {file = "grapheme-0.6.0.tar.gz", hash = "sha256:44c2b9f21bbe77cfb05835fec230bd435954275267fea1858013b102f8603cca"}, +] + +[package.extras] +test = ["pytest", "sphinx", "sphinx-autobuild", "twine", "wheel"] + +[[package]] +name = "gunicorn" +version = "23.0.0" +description = "WSGI HTTP Server for UNIX" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gunicorn-23.0.0-py3-none-any.whl", hash = "sha256:ec400d38950de4dfd418cff8328b2c8faed0edb0d517d3394e457c317908ca4d"}, + {file = "gunicorn-23.0.0.tar.gz", hash = "sha256:f014447a0101dc57e294f6c18ca6b40227a4c90e9bdb586042628030cba004ec"}, +] + +[package.dependencies] +packaging = "*" + +[package.extras] +eventlet = ["eventlet (>=0.24.1,!=0.36.0)"] +gevent = ["gevent (>=1.4.0)"] +setproctitle = ["setproctitle"] +testing = ["coverage", "eventlet", "gevent", "pytest", "pytest-cov"] +tornado = ["tornado (>=0.2)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "h2" +version = "4.1.0" +description = "HTTP/2 State-Machine based protocol implementation" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "h2-4.1.0-py3-none-any.whl", hash = "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d"}, + {file = "h2-4.1.0.tar.gz", hash = "sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb"}, +] + +[package.dependencies] +hpack = ">=4.0,<5" +hyperframe = ">=6.0,<7" + +[[package]] +name = "hpack" +version = "4.0.0" +description = "Pure-Python HPACK header compression" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "hpack-4.0.0-py3-none-any.whl", hash = "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c"}, + {file = "hpack-4.0.0.tar.gz", hash = "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"}, +] + +[[package]] +name = "httpcore" +version = "1.0.6" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, + {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httplib2" +version = "0.22.0" +description = "A comprehensive HTTP client library." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"}, + {file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"}, +] + +[package.dependencies] +pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""} + +[[package]] +name = "httpx" +version = "0.27.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +h2 = {version = ">=3,<5", optional = true, markers = "extra == \"http2\""} +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "hyperframe" +version = "6.0.1" +description = "HTTP/2 framing layer for Python" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "hyperframe-6.0.1-py3-none-any.whl", hash = "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15"}, + {file = "hyperframe-6.0.1.tar.gz", hash = "sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914"}, +] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "importlib-metadata" +version = "8.4.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"}, + {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] + +[[package]] +name = "inflection" +version = "0.5.1" +description = "A port of Ruby on Rails inflector to Python" +optional = false +python-versions = ">=3.5" +files = [ + {file = "inflection-0.5.1-py2.py3-none-any.whl", hash = "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2"}, + {file = "inflection-0.5.1.tar.gz", hash = "sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isodate" +version = "0.6.1" +description = "An ISO 8601 date/time/duration parser and formatter" +optional = false +python-versions = "*" +files = [ + {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, + {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "itsdangerous" +version = "2.2.0" +description = "Safely pass data to untrusted environments and back." +optional = false +python-versions = ">=3.8" +files = [ + {file = "itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef"}, + {file = "itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173"}, +] + +[[package]] +name = "jinja2" +version = "3.1.4" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "jsonschema" +version = "4.23.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, + {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.12.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, + {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "kombu" +version = "5.4.2" +description = "Messaging library for Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "kombu-5.4.2-py3-none-any.whl", hash = "sha256:14212f5ccf022fc0a70453bb025a1dcc32782a588c49ea866884047d66e14763"}, + {file = "kombu-5.4.2.tar.gz", hash = "sha256:eef572dd2fd9fc614b37580e3caeafdd5af46c1eff31e7fba89138cdb406f2cf"}, +] + +[package.dependencies] +amqp = ">=5.1.1,<6.0.0" +tzdata = {version = "*", markers = "python_version >= \"3.9\""} +vine = "5.1.0" + +[package.extras] +azureservicebus = ["azure-servicebus (>=7.10.0)"] +azurestoragequeues = ["azure-identity (>=1.12.0)", "azure-storage-queue (>=12.6.0)"] +confluentkafka = ["confluent-kafka (>=2.2.0)"] +consul = ["python-consul2 (==0.1.5)"] +librabbitmq = ["librabbitmq (>=2.0.0)"] +mongodb = ["pymongo (>=4.1.1)"] +msgpack = ["msgpack (==1.1.0)"] +pyro = ["pyro4 (==4.82)"] +qpid = ["qpid-python (>=0.26)", "qpid-tools (>=0.26)"] +redis = ["redis (>=4.5.2,!=4.5.5,!=5.0.2)"] +slmq = ["softlayer-messaging (>=1.0.3)"] +sqlalchemy = ["sqlalchemy (>=1.4.48,<2.1)"] +sqs = ["boto3 (>=1.26.143)", "pycurl (>=7.43.0.5)", "urllib3 (>=1.26.16)"] +yaml = ["PyYAML (>=3.10)"] +zookeeper = ["kazoo (>=2.8.0)"] + +[[package]] +name = "kubernetes" +version = "31.0.0" +description = "Kubernetes python client" +optional = false +python-versions = ">=3.6" +files = [ + {file = "kubernetes-31.0.0-py2.py3-none-any.whl", hash = "sha256:bf141e2d380c8520eada8b351f4e319ffee9636328c137aa432bc486ca1200e1"}, + {file = "kubernetes-31.0.0.tar.gz", hash = "sha256:28945de906c8c259c1ebe62703b56a03b714049372196f854105afe4e6d014c0"}, +] + +[package.dependencies] +certifi = ">=14.05.14" +durationpy = ">=0.7" +google-auth = ">=1.0.1" +oauthlib = ">=3.2.2" +python-dateutil = ">=2.5.3" +pyyaml = ">=5.4.1" +requests = "*" +requests-oauthlib = "*" +six = ">=1.9.0" +urllib3 = ">=1.24.2" +websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" + +[package.extras] +adal = ["adal (>=1.0.2)"] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markupsafe" +version = "2.1.5" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +] + +[[package]] +name = "marshmallow" +version = "3.22.0" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, + {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "microsoft-kiota-abstractions" +version = "1.6.2" +description = "Core abstractions for kiota generated libraries in Python" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "microsoft_kiota_abstractions-1.6.2-py3-none-any.whl", hash = "sha256:8c2c777748e80f17dba3809b5d149585d9918198f0f94125e87432f7165ba80e"}, + {file = "microsoft_kiota_abstractions-1.6.2.tar.gz", hash = "sha256:dec30f0fb427a051003e94b5c6fcf266f4702ecbd9d6961e3966124b9cbe41bf"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.27.0" +opentelemetry-sdk = ">=1.27.0" +std-uritemplate = ">=2.0.0" + +[[package]] +name = "microsoft-kiota-authentication-azure" +version = "1.1.0" +description = "Authentication provider for Kiota using Azure Identity" +optional = false +python-versions = "*" +files = [ + {file = "microsoft_kiota_authentication_azure-1.1.0-py2.py3-none-any.whl", hash = "sha256:22ecbf7e89120aca525cb3f89576617e6c6601b20b06ac720a8e66175698cf84"}, + {file = "microsoft_kiota_authentication_azure-1.1.0.tar.gz", hash = "sha256:8940084a3c1c25d1cde1d3b193ef7164dc6323e3b8200e2f695cbc937797bac6"}, +] + +[package.dependencies] +aiohttp = ">=3.8.0" +azure-core = ">=1.21.1" +microsoft-kiota-abstractions = ">=1.0.0,<2.0.0" +opentelemetry-api = ">=1.20.0" +opentelemetry-sdk = ">=1.20.0" + +[[package]] +name = "microsoft-kiota-http" +version = "1.3.3" +description = "Kiota http request adapter implementation for httpx library" +optional = false +python-versions = "*" +files = [ + {file = "microsoft_kiota_http-1.3.3-py2.py3-none-any.whl", hash = "sha256:21109a34140bf42e18855b7cf983939b891ae30739f21a9ce045c3a715f325fd"}, + {file = "microsoft_kiota_http-1.3.3.tar.gz", hash = "sha256:0b40f37c6c158c2e5b2dffa963a7fc342d368c1a64b8cca08631ba19d0ff94a9"}, +] + +[package.dependencies] +httpx = {version = ">=0.23.0", extras = ["http2"]} +microsoft-kiota_abstractions = ">=1.0.0,<2.0.0" +opentelemetry-api = ">=1.20.0" +opentelemetry-sdk = ">=1.20.0" + +[[package]] +name = "microsoft-kiota-serialization-form" +version = "0.1.1" +description = "Implementation of Kiota Serialization Interfaces for URI-Form encoded serialization" +optional = false +python-versions = "*" +files = [ + {file = "microsoft_kiota_serialization_form-0.1.1-py2.py3-none-any.whl", hash = "sha256:7a2da956edd3329ff74ec1a34eded910fd9cda57ce8c71f3797e359adc9993f7"}, + {file = "microsoft_kiota_serialization_form-0.1.1.tar.gz", hash = "sha256:f72dd50081250d1e49111682eccf620d3c2e335195d50c096b35ec5a5ef59a54"}, +] + +[package.dependencies] +microsoft-kiota_abstractions = ">=1.0.0,<2.0.0" +pendulum = ">=3.0.0" + +[[package]] +name = "microsoft-kiota-serialization-json" +version = "1.3.3" +description = "Implementation of Kiota Serialization interfaces for JSON" +optional = false +python-versions = "*" +files = [ + {file = "microsoft_kiota_serialization_json-1.3.3-py2.py3-none-any.whl", hash = "sha256:3003385e0277c5e92cdbe4227f1dd4a5e3715bbcf329ce949af986d886fc73f7"}, + {file = "microsoft_kiota_serialization_json-1.3.3.tar.gz", hash = "sha256:348fd75ec6f9993b79172018cb986ecbaf73255d7930b89ad68e648b22b579aa"}, +] + +[package.dependencies] +microsoft-kiota_abstractions = ">=1.0.0,<2.0.0" +pendulum = ">=3.0.0b1" + +[[package]] +name = "microsoft-kiota-serialization-multipart" +version = "0.1.0" +description = "Implementation of Kiota Serialization Interfaces for Multipart serialization" +optional = false +python-versions = "*" +files = [ + {file = "microsoft_kiota_serialization_multipart-0.1.0-py2.py3-none-any.whl", hash = "sha256:ef183902e77807806b8a181cdde53ba5bc04c6c9bdb2f7d80f8bad5d720e0015"}, + {file = "microsoft_kiota_serialization_multipart-0.1.0.tar.gz", hash = "sha256:14e89e92582e6630ddbc70ac67b70bf189dacbfc41a96d3e1d10339e86c8dde5"}, +] + +[package.dependencies] +microsoft-kiota_abstractions = ">=1.0.0,<2.0.0" + +[[package]] +name = "microsoft-kiota-serialization-text" +version = "1.0.0" +description = "Implementation of Kiota Serialization interfaces for text/plain" +optional = false +python-versions = "*" +files = [ + {file = "microsoft_kiota_serialization_text-1.0.0-py2.py3-none-any.whl", hash = "sha256:1d3789e012b603e059a36cc675d1fd08cb81e0dde423d970c0af2eabce9c0d43"}, + {file = "microsoft_kiota_serialization_text-1.0.0.tar.gz", hash = "sha256:c3dd3f409b1c4f4963bd1e41d51b65f7e53e852130bb441d79b77dad88ee76ed"}, +] + +[package.dependencies] +microsoft-kiota_abstractions = ">=1.0.0,<2.0.0" +python-dateutil = ">=2.8.2" + +[[package]] +name = "msal" +version = "1.31.0" +description = "The Microsoft Authentication Library (MSAL) for Python library enables your app to access the Microsoft Cloud by supporting authentication of users with Microsoft Azure Active Directory accounts (AAD) and Microsoft Accounts (MSA) using industry standard OAuth2 and OpenID Connect." +optional = false +python-versions = ">=3.7" +files = [ + {file = "msal-1.31.0-py3-none-any.whl", hash = "sha256:96bc37cff82ebe4b160d5fc0f1196f6ca8b50e274ecd0ec5bf69c438514086e7"}, + {file = "msal-1.31.0.tar.gz", hash = "sha256:2c4f189cf9cc8f00c80045f66d39b7c0f3ed45873fd3d1f2af9f22db2e12ff4b"}, +] + +[package.dependencies] +cryptography = ">=2.5,<46" +PyJWT = {version = ">=1.0.0,<3", extras = ["crypto"]} +requests = ">=2.0.0,<3" + +[package.extras] +broker = ["pymsalruntime (>=0.14,<0.18)", "pymsalruntime (>=0.17,<0.18)"] + +[[package]] +name = "msal-extensions" +version = "1.2.0" +description = "Microsoft Authentication Library extensions (MSAL EX) provides a persistence API that can save your data on disk, encrypted on Windows, macOS and Linux. Concurrent data access will be coordinated by a file lock mechanism." +optional = false +python-versions = ">=3.7" +files = [ + {file = "msal_extensions-1.2.0-py3-none-any.whl", hash = "sha256:cf5ba83a2113fa6dc011a254a72f1c223c88d7dfad74cc30617c4679a417704d"}, + {file = "msal_extensions-1.2.0.tar.gz", hash = "sha256:6f41b320bfd2933d631a215c91ca0dd3e67d84bd1a2f50ce917d5874ec646bef"}, +] + +[package.dependencies] +msal = ">=1.29,<2" +portalocker = ">=1.4,<3" + +[[package]] +name = "msgraph-core" +version = "1.1.5" +description = "Core component of the Microsoft Graph Python SDK" +optional = false +python-versions = ">=3.8" +files = [ + {file = "msgraph_core-1.1.5-py3-none-any.whl", hash = "sha256:c6f2ba5c2d8ae31e7a2aec5161ec695a79066be5c7584845d4057cb4e785ed0d"}, + {file = "msgraph_core-1.1.5.tar.gz", hash = "sha256:0af2e6fe0d802bea8fe7e4d74381a4299e900d4b3886f148e88db2fff24e3926"}, +] + +[package.dependencies] +httpx = {version = ">=0.23.0", extras = ["http2"]} +microsoft-kiota-abstractions = ">=1.0.0,<2.0.0" +microsoft-kiota-authentication-azure = ">=1.0.0,<2.0.0" +microsoft-kiota-http = ">=1.0.0,<2.0.0" + +[package.extras] +dev = ["bumpver", "isort", "mypy", "pylint", "pytest", "yapf"] + +[[package]] +name = "msgraph-sdk" +version = "1.12.0" +description = "The Microsoft Graph Python SDK" +optional = false +python-versions = ">=3.8" +files = [ + {file = "msgraph_sdk-1.12.0-py3-none-any.whl", hash = "sha256:ac298b546b240391b0e407379d039db32862a56d6fe15cf7c5f7e77631fc6771"}, + {file = "msgraph_sdk-1.12.0.tar.gz", hash = "sha256:fbb5a8a9f6eed89b496f207eb35b6b4cfc7fefa75608aeef07477a3b2276d4fa"}, +] + +[package.dependencies] +azure-identity = ">=1.12.0" +microsoft-kiota-abstractions = ">=1.3.0,<2.0.0" +microsoft-kiota-authentication-azure = ">=1.0.0,<2.0.0" +microsoft-kiota-http = ">=1.0.0,<2.0.0" +microsoft-kiota-serialization-form = ">=0.1.0" +microsoft-kiota-serialization-json = ">=1.3.0,<2.0.0" +microsoft-kiota-serialization-multipart = ">=0.1.0" +microsoft-kiota-serialization-text = ">=1.0.0,<2.0.0" +msgraph_core = ">=1.0.0" + +[package.extras] +dev = ["bumpver", "isort", "mypy", "pylint", "pytest", "yapf"] + +[[package]] +name = "msrest" +version = "0.7.1" +description = "AutoRest swagger generator Python client runtime." +optional = false +python-versions = ">=3.6" +files = [ + {file = "msrest-0.7.1-py3-none-any.whl", hash = "sha256:21120a810e1233e5e6cc7fe40b474eeb4ec6f757a15d7cf86702c369f9567c32"}, + {file = "msrest-0.7.1.zip", hash = "sha256:6e7661f46f3afd88b75667b7187a92829924446c7ea1d169be8c4bb7eeb788b9"}, +] + +[package.dependencies] +azure-core = ">=1.24.0" +certifi = ">=2017.4.17" +isodate = ">=0.6.0" +requests = ">=2.16,<3.0" +requests-oauthlib = ">=0.5.0" + +[package.extras] +async = ["aiodns", "aiohttp (>=3.0)"] + +[[package]] +name = "multidict" +version = "6.1.0" +description = "multidict implementation" +optional = false +python-versions = ">=3.8" +files = [ + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, + {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, + {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, + {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, + {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, + {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, + {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, + {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, + {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, + {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, + {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, +] + +[[package]] +name = "mypy" +version = "1.10.1" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, + {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, + {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, + {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, + {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, + {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, + {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, + {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, + {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, + {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, + {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, + {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, + {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, + {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, + {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, + {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, + {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, + {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +typing-extensions = ">=4.1.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +description = "Patch asyncio to allow nested event loops" +optional = false +python-versions = ">=3.5" +files = [ + {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, + {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, +] + +[[package]] +name = "numpy" +version = "2.0.2" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8c5713284ce4e282544c68d1c3b2c7161d38c256d2eefc93c1d683cf47683e66"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:becfae3ddd30736fe1889a37f1f580e245ba79a5855bff5f2a29cb3ccc22dd7b"}, + {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2da5960c3cf0df7eafefd806d4e612c5e19358de82cb3c343631188991566ccd"}, + {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:496f71341824ed9f3d2fd36cf3ac57ae2e0165c143b55c3a035ee219413f3318"}, + {file = "numpy-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a61ec659f68ae254e4d237816e33171497e978140353c0c2038d46e63282d0c8"}, + {file = "numpy-2.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d731a1c6116ba289c1e9ee714b08a8ff882944d4ad631fd411106a30f083c326"}, + {file = "numpy-2.0.2-cp310-cp310-win32.whl", hash = "sha256:984d96121c9f9616cd33fbd0618b7f08e0cfc9600a7ee1d6fd9b239186d19d97"}, + {file = "numpy-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:c7b0be4ef08607dd04da4092faee0b86607f111d5ae68036f16cc787e250a131"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:49ca4decb342d66018b01932139c0961a8f9ddc7589611158cb3c27cbcf76448"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:11a76c372d1d37437857280aa142086476136a8c0f373b2e648ab2c8f18fb195"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:807ec44583fd708a21d4a11d94aedf2f4f3c3719035c76a2bbe1fe8e217bdc57"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8cafab480740e22f8d833acefed5cc87ce276f4ece12fdaa2e8903db2f82897a"}, + {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a15f476a45e6e5a3a79d8a14e62161d27ad897381fecfa4a09ed5322f2085669"}, + {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13e689d772146140a252c3a28501da66dfecd77490b498b168b501835041f951"}, + {file = "numpy-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9ea91dfb7c3d1c56a0e55657c0afb38cf1eeae4544c208dc465c3c9f3a7c09f9"}, + {file = "numpy-2.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c1c9307701fec8f3f7a1e6711f9089c06e6284b3afbbcd259f7791282d660a15"}, + {file = "numpy-2.0.2-cp311-cp311-win32.whl", hash = "sha256:a392a68bd329eafac5817e5aefeb39038c48b671afd242710b451e76090e81f4"}, + {file = "numpy-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:286cd40ce2b7d652a6f22efdfc6d1edf879440e53e76a75955bc0c826c7e64dc"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:df55d490dea7934f330006d0f81e8551ba6010a5bf035a249ef61a94f21c500b"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8df823f570d9adf0978347d1f926b2a867d5608f434a7cff7f7908c6570dcf5e"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9a92ae5c14811e390f3767053ff54eaee3bf84576d99a2456391401323f4ec2c"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a842d573724391493a97a62ebbb8e731f8a5dcc5d285dfc99141ca15a3302d0c"}, + {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05e238064fc0610c840d1cf6a13bf63d7e391717d247f1bf0318172e759e692"}, + {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0123ffdaa88fa4ab64835dcbde75dcdf89c453c922f18dced6e27c90d1d0ec5a"}, + {file = "numpy-2.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:96a55f64139912d61de9137f11bf39a55ec8faec288c75a54f93dfd39f7eb40c"}, + {file = "numpy-2.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec9852fb39354b5a45a80bdab5ac02dd02b15f44b3804e9f00c556bf24b4bded"}, + {file = "numpy-2.0.2-cp312-cp312-win32.whl", hash = "sha256:671bec6496f83202ed2d3c8fdc486a8fc86942f2e69ff0e986140339a63bcbe5"}, + {file = "numpy-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:cfd41e13fdc257aa5778496b8caa5e856dc4896d4ccf01841daee1d96465467a"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9059e10581ce4093f735ed23f3b9d283b9d517ff46009ddd485f1747eb22653c"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:423e89b23490805d2a5a96fe40ec507407b8ee786d66f7328be214f9679df6dd"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:2b2955fa6f11907cf7a70dab0d0755159bca87755e831e47932367fc8f2f2d0b"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:97032a27bd9d8988b9a97a8c4d2c9f2c15a81f61e2f21404d7e8ef00cb5be729"}, + {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e795a8be3ddbac43274f18588329c72939870a16cae810c2b73461c40718ab1"}, + {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b258c385842546006213344c50655ff1555a9338e2e5e02a0756dc3e803dd"}, + {file = "numpy-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fec9451a7789926bcf7c2b8d187292c9f93ea30284802a0ab3f5be8ab36865d"}, + {file = "numpy-2.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9189427407d88ff25ecf8f12469d4d39d35bee1db5d39fc5c168c6f088a6956d"}, + {file = "numpy-2.0.2-cp39-cp39-win32.whl", hash = "sha256:905d16e0c60200656500c95b6b8dca5d109e23cb24abc701d41c02d74c6b3afa"}, + {file = "numpy-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:a3f4ab0caa7f053f6797fcd4e1e25caee367db3112ef2b6ef82d749530768c73"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7f0a0c6f12e07fa94133c8a67404322845220c06a9e80e85999afe727f7438b8"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:312950fdd060354350ed123c0e25a71327d3711584beaef30cdaa93320c392d4"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26df23238872200f63518dd2aa984cfca675d82469535dc7162dc2ee52d9dd5c"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a46288ec55ebbd58947d31d72be2c63cbf839f0a63b49cb755022310792a3385"}, + {file = "numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78"}, +] + +[[package]] +name = "oauthlib" +version = "3.2.2" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +optional = false +python-versions = ">=3.6" +files = [ + {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, + {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, +] + +[package.extras] +rsa = ["cryptography (>=3.0.0)"] +signals = ["blinker (>=1.4.0)"] +signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] + +[[package]] +name = "opentelemetry-api" +version = "1.27.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"}, + {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0,<=8.4.0" + +[[package]] +name = "opentelemetry-sdk" +version = "1.27.0" +description = "OpenTelemetry Python SDK" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"}, + {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"}, +] + +[package.dependencies] +opentelemetry-api = "1.27.0" +opentelemetry-semantic-conventions = "0.48b0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.48b0" +description = "OpenTelemetry Semantic Conventions" +optional = false +python-versions = ">=3.8" +files = [ + {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"}, + {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +opentelemetry-api = "1.27.0" + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "pandas" +version = "2.2.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, + {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, + {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, + {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, + {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, + {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, + {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + +[[package]] +name = "pbr" +version = "6.1.0" +description = "Python Build Reasonableness" +optional = false +python-versions = ">=2.6" +files = [ + {file = "pbr-6.1.0-py2.py3-none-any.whl", hash = "sha256:a776ae228892d8013649c0aeccbb3d5f99ee15e005a4cbb7e61d55a067b28a2a"}, + {file = "pbr-6.1.0.tar.gz", hash = "sha256:788183e382e3d1d7707db08978239965e8b9e4e5ed42669bf4758186734d5f24"}, +] + +[[package]] +name = "pendulum" +version = "3.0.0" +description = "Python datetimes made easy" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pendulum-3.0.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2cf9e53ef11668e07f73190c805dbdf07a1939c3298b78d5a9203a86775d1bfd"}, + {file = "pendulum-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fb551b9b5e6059377889d2d878d940fd0bbb80ae4810543db18e6f77b02c5ef6"}, + {file = "pendulum-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c58227ac260d5b01fc1025176d7b31858c9f62595737f350d22124a9a3ad82d"}, + {file = "pendulum-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60fb6f415fea93a11c52578eaa10594568a6716602be8430b167eb0d730f3332"}, + {file = "pendulum-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b69f6b4dbcb86f2c2fe696ba991e67347bcf87fe601362a1aba6431454b46bde"}, + {file = "pendulum-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:138afa9c373ee450ede206db5a5e9004fd3011b3c6bbe1e57015395cd076a09f"}, + {file = "pendulum-3.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:83d9031f39c6da9677164241fd0d37fbfc9dc8ade7043b5d6d62f56e81af8ad2"}, + {file = "pendulum-3.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0c2308af4033fa534f089595bcd40a95a39988ce4059ccd3dc6acb9ef14ca44a"}, + {file = "pendulum-3.0.0-cp310-none-win_amd64.whl", hash = "sha256:9a59637cdb8462bdf2dbcb9d389518c0263799189d773ad5c11db6b13064fa79"}, + {file = "pendulum-3.0.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3725245c0352c95d6ca297193192020d1b0c0f83d5ee6bb09964edc2b5a2d508"}, + {file = "pendulum-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6c035f03a3e565ed132927e2c1b691de0dbf4eb53b02a5a3c5a97e1a64e17bec"}, + {file = "pendulum-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:597e66e63cbd68dd6d58ac46cb7a92363d2088d37ccde2dae4332ef23e95cd00"}, + {file = "pendulum-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99a0f8172e19f3f0c0e4ace0ad1595134d5243cf75985dc2233e8f9e8de263ca"}, + {file = "pendulum-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:77d8839e20f54706aed425bec82a83b4aec74db07f26acd039905d1237a5e1d4"}, + {file = "pendulum-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afde30e8146292b059020fbc8b6f8fd4a60ae7c5e6f0afef937bbb24880bdf01"}, + {file = "pendulum-3.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:660434a6fcf6303c4efd36713ca9212c753140107ee169a3fc6c49c4711c2a05"}, + {file = "pendulum-3.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dee9e5a48c6999dc1106eb7eea3e3a50e98a50651b72c08a87ee2154e544b33e"}, + {file = "pendulum-3.0.0-cp311-none-win_amd64.whl", hash = "sha256:d4cdecde90aec2d67cebe4042fd2a87a4441cc02152ed7ed8fb3ebb110b94ec4"}, + {file = "pendulum-3.0.0-cp311-none-win_arm64.whl", hash = "sha256:773c3bc4ddda2dda9f1b9d51fe06762f9200f3293d75c4660c19b2614b991d83"}, + {file = "pendulum-3.0.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:409e64e41418c49f973d43a28afe5df1df4f1dd87c41c7c90f1a63f61ae0f1f7"}, + {file = "pendulum-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a38ad2121c5ec7c4c190c7334e789c3b4624798859156b138fcc4d92295835dc"}, + {file = "pendulum-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fde4d0b2024b9785f66b7f30ed59281bd60d63d9213cda0eb0910ead777f6d37"}, + {file = "pendulum-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b2c5675769fb6d4c11238132962939b960fcb365436b6d623c5864287faa319"}, + {file = "pendulum-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8af95e03e066826f0f4c65811cbee1b3123d4a45a1c3a2b4fc23c4b0dff893b5"}, + {file = "pendulum-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2165a8f33cb15e06c67070b8afc87a62b85c5a273e3aaa6bc9d15c93a4920d6f"}, + {file = "pendulum-3.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ad5e65b874b5e56bd942546ea7ba9dd1d6a25121db1c517700f1c9de91b28518"}, + {file = "pendulum-3.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17fe4b2c844bbf5f0ece69cfd959fa02957c61317b2161763950d88fed8e13b9"}, + {file = "pendulum-3.0.0-cp312-none-win_amd64.whl", hash = "sha256:78f8f4e7efe5066aca24a7a57511b9c2119f5c2b5eb81c46ff9222ce11e0a7a5"}, + {file = "pendulum-3.0.0-cp312-none-win_arm64.whl", hash = "sha256:28f49d8d1e32aae9c284a90b6bb3873eee15ec6e1d9042edd611b22a94ac462f"}, + {file = "pendulum-3.0.0-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d4e2512f4e1a4670284a153b214db9719eb5d14ac55ada5b76cbdb8c5c00399d"}, + {file = "pendulum-3.0.0-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:3d897eb50883cc58d9b92f6405245f84b9286cd2de6e8694cb9ea5cb15195a32"}, + {file = "pendulum-3.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e169cc2ca419517f397811bbe4589cf3cd13fca6dc38bb352ba15ea90739ebb"}, + {file = "pendulum-3.0.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f17c3084a4524ebefd9255513692f7e7360e23c8853dc6f10c64cc184e1217ab"}, + {file = "pendulum-3.0.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:826d6e258052715f64d05ae0fc9040c0151e6a87aae7c109ba9a0ed930ce4000"}, + {file = "pendulum-3.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2aae97087872ef152a0c40e06100b3665d8cb86b59bc8471ca7c26132fccd0f"}, + {file = "pendulum-3.0.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ac65eeec2250d03106b5e81284ad47f0d417ca299a45e89ccc69e36130ca8bc7"}, + {file = "pendulum-3.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a5346d08f3f4a6e9e672187faa179c7bf9227897081d7121866358af369f44f9"}, + {file = "pendulum-3.0.0-cp37-none-win_amd64.whl", hash = "sha256:235d64e87946d8f95c796af34818c76e0f88c94d624c268693c85b723b698aa9"}, + {file = "pendulum-3.0.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:6a881d9c2a7f85bc9adafcfe671df5207f51f5715ae61f5d838b77a1356e8b7b"}, + {file = "pendulum-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d7762d2076b9b1cb718a6631ad6c16c23fc3fac76cbb8c454e81e80be98daa34"}, + {file = "pendulum-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e8e36a8130819d97a479a0e7bf379b66b3b1b520e5dc46bd7eb14634338df8c"}, + {file = "pendulum-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7dc843253ac373358ffc0711960e2dd5b94ab67530a3e204d85c6e8cb2c5fa10"}, + {file = "pendulum-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a78ad3635d609ceb1e97d6aedef6a6a6f93433ddb2312888e668365908c7120"}, + {file = "pendulum-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b30a137e9e0d1f751e60e67d11fc67781a572db76b2296f7b4d44554761049d6"}, + {file = "pendulum-3.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c95984037987f4a457bb760455d9ca80467be792236b69d0084f228a8ada0162"}, + {file = "pendulum-3.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d29c6e578fe0f893766c0d286adbf0b3c726a4e2341eba0917ec79c50274ec16"}, + {file = "pendulum-3.0.0-cp38-none-win_amd64.whl", hash = "sha256:deaba8e16dbfcb3d7a6b5fabdd5a38b7c982809567479987b9c89572df62e027"}, + {file = "pendulum-3.0.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b11aceea5b20b4b5382962b321dbc354af0defe35daa84e9ff3aae3c230df694"}, + {file = "pendulum-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a90d4d504e82ad236afac9adca4d6a19e4865f717034fc69bafb112c320dcc8f"}, + {file = "pendulum-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:825799c6b66e3734227756fa746cc34b3549c48693325b8b9f823cb7d21b19ac"}, + {file = "pendulum-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad769e98dc07972e24afe0cff8d365cb6f0ebc7e65620aa1976fcfbcadc4c6f3"}, + {file = "pendulum-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6fc26907eb5fb8cc6188cc620bc2075a6c534d981a2f045daa5f79dfe50d512"}, + {file = "pendulum-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c717eab1b6d898c00a3e0fa7781d615b5c5136bbd40abe82be100bb06df7a56"}, + {file = "pendulum-3.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3ddd1d66d1a714ce43acfe337190be055cdc221d911fc886d5a3aae28e14b76d"}, + {file = "pendulum-3.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:822172853d7a9cf6da95d7b66a16c7160cb99ae6df55d44373888181d7a06edc"}, + {file = "pendulum-3.0.0-cp39-none-win_amd64.whl", hash = "sha256:840de1b49cf1ec54c225a2a6f4f0784d50bd47f68e41dc005b7f67c7d5b5f3ae"}, + {file = "pendulum-3.0.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3b1f74d1e6ffe5d01d6023870e2ce5c2191486928823196f8575dcc786e107b1"}, + {file = "pendulum-3.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:729e9f93756a2cdfa77d0fc82068346e9731c7e884097160603872686e570f07"}, + {file = "pendulum-3.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e586acc0b450cd21cbf0db6bae386237011b75260a3adceddc4be15334689a9a"}, + {file = "pendulum-3.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22e7944ffc1f0099a79ff468ee9630c73f8c7835cd76fdb57ef7320e6a409df4"}, + {file = "pendulum-3.0.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fa30af36bd8e50686846bdace37cf6707bdd044e5cb6e1109acbad3277232e04"}, + {file = "pendulum-3.0.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:440215347b11914ae707981b9a57ab9c7b6983ab0babde07063c6ee75c0dc6e7"}, + {file = "pendulum-3.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:314c4038dc5e6a52991570f50edb2f08c339debdf8cea68ac355b32c4174e820"}, + {file = "pendulum-3.0.0-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5acb1d386337415f74f4d1955c4ce8d0201978c162927d07df8eb0692b2d8533"}, + {file = "pendulum-3.0.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a789e12fbdefaffb7b8ac67f9d8f22ba17a3050ceaaa635cd1cc4645773a4b1e"}, + {file = "pendulum-3.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:860aa9b8a888e5913bd70d819306749e5eb488e6b99cd6c47beb701b22bdecf5"}, + {file = "pendulum-3.0.0-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5ebc65ea033ef0281368217fbf59f5cb05b338ac4dd23d60959c7afcd79a60a0"}, + {file = "pendulum-3.0.0-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9fef18ab0386ef6a9ac7bad7e43ded42c83ff7ad412f950633854f90d59afa8"}, + {file = "pendulum-3.0.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1c134ba2f0571d0b68b83f6972e2307a55a5a849e7dac8505c715c531d2a8795"}, + {file = "pendulum-3.0.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:385680812e7e18af200bb9b4a49777418c32422d05ad5a8eb85144c4a285907b"}, + {file = "pendulum-3.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9eec91cd87c59fb32ec49eb722f375bd58f4be790cae11c1b70fac3ee4f00da0"}, + {file = "pendulum-3.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4386bffeca23c4b69ad50a36211f75b35a4deb6210bdca112ac3043deb7e494a"}, + {file = "pendulum-3.0.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dfbcf1661d7146d7698da4b86e7f04814221081e9fe154183e34f4c5f5fa3bf8"}, + {file = "pendulum-3.0.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:04a1094a5aa1daa34a6b57c865b25f691848c61583fb22722a4df5699f6bf74c"}, + {file = "pendulum-3.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5b0ec85b9045bd49dd3a3493a5e7ddfd31c36a2a60da387c419fa04abcaecb23"}, + {file = "pendulum-3.0.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:0a15b90129765b705eb2039062a6daf4d22c4e28d1a54fa260892e8c3ae6e157"}, + {file = "pendulum-3.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:bb8f6d7acd67a67d6fedd361ad2958ff0539445ef51cbe8cd288db4306503cd0"}, + {file = "pendulum-3.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd69b15374bef7e4b4440612915315cc42e8575fcda2a3d7586a0d88192d0c88"}, + {file = "pendulum-3.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc00f8110db6898360c53c812872662e077eaf9c75515d53ecc65d886eec209a"}, + {file = "pendulum-3.0.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:83a44e8b40655d0ba565a5c3d1365d27e3e6778ae2a05b69124db9e471255c4a"}, + {file = "pendulum-3.0.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1a3604e9fbc06b788041b2a8b78f75c243021e0f512447806a6d37ee5214905d"}, + {file = "pendulum-3.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:92c307ae7accebd06cbae4729f0ba9fa724df5f7d91a0964b1b972a22baa482b"}, + {file = "pendulum-3.0.0.tar.gz", hash = "sha256:5d034998dea404ec31fae27af6b22cff1708f830a1ed7353be4d1019bb9f584e"}, +] + +[package.dependencies] +python-dateutil = ">=2.6" +tzdata = ">=2020.1" + +[package.extras] +test = ["time-machine (>=2.6.0)"] + +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "plotly" +version = "5.24.1" +description = "An open-source, interactive data visualization library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "plotly-5.24.1-py3-none-any.whl", hash = "sha256:f67073a1e637eb0dc3e46324d9d51e2fe76e9727c892dde64ddf1e1b51f29089"}, + {file = "plotly-5.24.1.tar.gz", hash = "sha256:dbc8ac8339d248a4bcc36e08a5659bacfe1b079390b8953533f4eb22169b4bae"}, +] + +[package.dependencies] +packaging = "*" +tenacity = ">=6.2.0" + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "portalocker" +version = "2.10.1" +description = "Wraps the portalocker recipe for easy usage" +optional = false +python-versions = ">=3.8" +files = [ + {file = "portalocker-2.10.1-py3-none-any.whl", hash = "sha256:53a5984ebc86a025552264b459b46a2086e269b21823cb572f8f28ee759e45bf"}, + {file = "portalocker-2.10.1.tar.gz", hash = "sha256:ef1bf844e878ab08aee7e40184156e1151f228f103aa5c6bd0724cc330960f8f"}, +] + +[package.dependencies] +pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""} + +[package.extras] +docs = ["sphinx (>=1.7.1)"] +redis = ["redis"] +tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)", "types-redis"] + +[[package]] +name = "prompt-toolkit" +version = "3.0.48" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, + {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, +] + +[package.dependencies] +wcwidth = "*" + +[[package]] +name = "proto-plus" +version = "1.24.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"}, + {file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<6.0.0dev" + +[package.extras] +testing = ["google-api-core (>=1.31.5)"] + +[[package]] +name = "protobuf" +version = "5.28.2" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-5.28.2-cp310-abi3-win32.whl", hash = "sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d"}, + {file = "protobuf-5.28.2-cp310-abi3-win_amd64.whl", hash = "sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132"}, + {file = "protobuf-5.28.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7"}, + {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f"}, + {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f"}, + {file = "protobuf-5.28.2-cp38-cp38-win32.whl", hash = "sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0"}, + {file = "protobuf-5.28.2-cp38-cp38-win_amd64.whl", hash = "sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3"}, + {file = "protobuf-5.28.2-cp39-cp39-win32.whl", hash = "sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36"}, + {file = "protobuf-5.28.2-cp39-cp39-win_amd64.whl", hash = "sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276"}, + {file = "protobuf-5.28.2-py3-none-any.whl", hash = "sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece"}, + {file = "protobuf-5.28.2.tar.gz", hash = "sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0"}, +] + +[[package]] +name = "prowler" +version = "4.6.0" +description = "Prowler is an Open Source security tool to perform AWS, GCP and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness. It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks." +optional = false +python-versions = ">=3.9,<3.13" +files = [] +develop = false + +[package.dependencies] +alive-progress = "3.2.0" +awsipranges = "0.3.3" +azure-identity = "1.19.0" +azure-keyvault-keys = "4.10.0" +azure-mgmt-applicationinsights = "4.0.0" +azure-mgmt-authorization = "4.0.0" +azure-mgmt-compute = "33.0.0" +azure-mgmt-containerregistry = "10.3.0" +azure-mgmt-containerservice = "33.0.0" +azure-mgmt-cosmosdb = "9.6.0" +azure-mgmt-keyvault = "10.3.1" +azure-mgmt-monitor = "6.0.2" +azure-mgmt-network = "28.0.0" +azure-mgmt-rdbms = "10.1.0" +azure-mgmt-resource = "23.2.0" +azure-mgmt-security = "7.0.0" +azure-mgmt-sql = "3.0.1" +azure-mgmt-storage = "21.2.1" +azure-mgmt-subscription = "3.1.1" +azure-mgmt-web = "7.3.1" +azure-storage-blob = "12.24.0" +boto3 = "1.35.60" +botocore = "1.35.60" +colorama = "0.4.6" +cryptography = "43.0.1" +dash = "2.18.2" +dash-bootstrap-components = "1.6.0" +detect-secrets = "1.5.0" +google-api-python-client = "2.153.0" +google-auth-httplib2 = ">=0.1,<0.3" +jsonschema = "4.23.0" +kubernetes = "31.0.0" +microsoft-kiota-abstractions = "1.6.2" +msgraph-sdk = "1.12.0" +numpy = "2.0.2" +pandas = "2.2.3" +py-ocsf-models = "0.2.0" +pydantic = "1.10.18" +python-dateutil = "^2.9.0.post0" +pytz = "2024.2" +schema = "0.7.7" +shodan = "1.31.0" +slack-sdk = "3.33.3" +tabulate = "0.9.0" +tzlocal = "5.2" + +[package.source] +type = "git" +url = "https://github.com/prowler-cloud/prowler.git" +reference = "master" +resolved_reference = "8be83fc632445cd25eeb90ed20257716b673cead" + +[[package]] +name = "psutil" +version = "6.0.0" +description = "Cross-platform lib for process and system monitoring in Python." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"}, + {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"}, + {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c"}, + {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3"}, + {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c"}, + {file = "psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35"}, + {file = "psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1"}, + {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"}, + {file = "psutil-6.0.0-cp36-cp36m-win32.whl", hash = "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14"}, + {file = "psutil-6.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c"}, + {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"}, + {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"}, + {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"}, + {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"}, +] + +[package.extras] +test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] + +[[package]] +name = "psycopg2-binary" +version = "2.9.9" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "psycopg2-binary-2.9.9.tar.gz", hash = "sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-win32.whl", hash = "sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-win32.whl", hash = "sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-win32.whl", hash = "sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957"}, +] + +[[package]] +name = "py-ocsf-models" +version = "0.2.0" +description = "This is a Python implementation of the OCSF models. The models are used to represent the data of the OCSF Schema defined in https://schema.ocsf.io/." +optional = false +python-versions = "<3.13,>=3.9" +files = [ + {file = "py_ocsf_models-0.2.0-py3-none-any.whl", hash = "sha256:ac75fd21077694b343ebaad3479194db113c274879b114277560ff287d5cd7b5"}, + {file = "py_ocsf_models-0.2.0.tar.gz", hash = "sha256:3e12648d05329e6776a0e6b1ffea87a3eb60aa7d8cb2c4afd69e5724f443ce03"}, +] + +[package.dependencies] +cryptography = "43.0.1" +email-validator = "2.2.0" +pydantic = "1.10.18" + +[[package]] +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.1" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pycurl" +version = "7.45.3" +description = "PycURL -- A Python Interface To The cURL library" +optional = false +python-versions = ">=3.5" +files = [ + {file = "pycurl-7.45.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:86f66d334deaaab20a576fb785587566081407adc703318203fe26e43277ef12"}, + {file = "pycurl-7.45.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:205983e87d6aa0b6e93ec7320060de44efaa905ecc5d13f70cbe38c65684c5c4"}, + {file = "pycurl-7.45.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fbd4a6b8654b779089c5a44af1c65c1419c2cd60718780df6d8f354eb35d6d55"}, + {file = "pycurl-7.45.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:5ebc6a0ac60c371a9efaf7d55dec5820f76fdafb43a3be1e390011339dc329ae"}, + {file = "pycurl-7.45.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:2facab1c35600088cb82b5b093bd700bfbd1e3191deab24f7d1803d9dc5b76fc"}, + {file = "pycurl-7.45.3-cp310-cp310-win32.whl", hash = "sha256:7cfca02d70579853041063e53ca713d31161b8831b98d4f68c3554dc0448beec"}, + {file = "pycurl-7.45.3-cp310-cp310-win_amd64.whl", hash = "sha256:8451e8475051f16eb4776380384699cb8ddd10ea8410bcbfaee5a6fc4c046de6"}, + {file = "pycurl-7.45.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1610cc45b5bc8b39bc18b981d0473e59ef41226ee467eaa8fbfc7276603ef5af"}, + {file = "pycurl-7.45.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c854885398410fa6e88fc29f7a420a3c13b88bae9b4e10a804437b582e24f58b"}, + {file = "pycurl-7.45.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:921c9db0c3128481954f625b3b1bc10c730100aa944d54643528f716676439ee"}, + {file = "pycurl-7.45.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:483f3aa5d1bc8cff5657ad96f68e1d89281f971a7b6aa93408a31e3199981ea9"}, + {file = "pycurl-7.45.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1e0d32d6ed3a7ba13dbbd3a6fb50ca76c40c70e6bc6fe347f90677478d3422c7"}, + {file = "pycurl-7.45.3-cp311-cp311-win32.whl", hash = "sha256:beaaa4450e23d41dd0c2f2f47a4f8a171210271543550c2c556090c7eeea88f5"}, + {file = "pycurl-7.45.3-cp311-cp311-win_amd64.whl", hash = "sha256:dd33fd9de8907a6275c70113124aeb7eea672c1324f5d5423f203738b341697d"}, + {file = "pycurl-7.45.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0c41a172d5e8a5cdd8328cc8134f47b2a57960ac677f7cda8520eaa9fbe7d990"}, + {file = "pycurl-7.45.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:13006b62c157bb4483c58e1abdced6df723c9399255a4f5f6bb7f8e425106679"}, + {file = "pycurl-7.45.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27f4c5c20c86a9a823677316724306fb1ce3b25ec568efd52026dc6c563e5b29"}, + {file = "pycurl-7.45.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:c2c246bc29e8762ff4c8a833ac5b4da4c797d16ab138286e8aec9b0c0a0da2d4"}, + {file = "pycurl-7.45.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3d07c5daef2d0d85949e32ec254ee44232bb57febb0634194379dd14d1ff4f87"}, + {file = "pycurl-7.45.3-cp312-cp312-win32.whl", hash = "sha256:9f7afe5ef0e4750ac4515baebc251ee94aaefe5de6e2e8a24668473128d69904"}, + {file = "pycurl-7.45.3-cp312-cp312-win_amd64.whl", hash = "sha256:3648ed9a57a6b704673faeab3dc64d1469cc69f2bc1ed8227ffa0f84e147c500"}, + {file = "pycurl-7.45.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c0915ea139f66a289edc4f9de10cb45078af1bb950491c5612969864236a2e7e"}, + {file = "pycurl-7.45.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:43c5e61a58783ddf78ef84949f6bb6e52e092a13ec67678e9a9e21071ecf5b80"}, + {file = "pycurl-7.45.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bf613844a1647fe3d2bba1f5c9c96a62a85280123a57a8a0c8d2f37d518bc10a"}, + {file = "pycurl-7.45.3-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:936afd9c5ff7fe7457065e878a279811787778f472f9a4e8c5df79e7728358e2"}, + {file = "pycurl-7.45.3-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:dbf816a6d0cb71e7fd06609246bbea4eaf100649d9decf49e4eb329594f70be7"}, + {file = "pycurl-7.45.3-cp38-cp38-win32.whl", hash = "sha256:2c8a2ce568193f9f84763717d8961cec0db4ec1aa08c6bcf4d90da5eb72bec86"}, + {file = "pycurl-7.45.3-cp38-cp38-win_amd64.whl", hash = "sha256:80ac7c17e69ca6b76ccccb4255f7c29a2a36e5b69eb10c2adba82135d43afe8c"}, + {file = "pycurl-7.45.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fa7751b614d9aa82d7a0f49ca90924c29c6cedf85a2f8687fb6a772dbfe48711"}, + {file = "pycurl-7.45.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b129e9ee07f80b4af957607917af46ab517b0c4e746692f6d9e50e973edba8d8"}, + {file = "pycurl-7.45.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a0f920582b8713ca87d5a288a7532607bc4454275d733fc880650d602dbe3c67"}, + {file = "pycurl-7.45.3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c7c13e4268550cde14a6f4743cc8bd8c035d4cd36514d58eff70276d68954b6f"}, + {file = "pycurl-7.45.3-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:0f0e1251a608ffd75fc502f4014442e554c67d3d7a1b0a839c35efb6ad2f8bf8"}, + {file = "pycurl-7.45.3-cp39-cp39-win32.whl", hash = "sha256:51a40a56c58e63dac6145829f9e9bd66e5867a9f0741bcb9ffefab619851d44f"}, + {file = "pycurl-7.45.3-cp39-cp39-win_amd64.whl", hash = "sha256:e08a06802c8c8a9d04cf3319f9230ec09062c55d2550bd48f8ada1df1431adcf"}, + {file = "pycurl-7.45.3.tar.gz", hash = "sha256:8c2471af9079ad798e1645ec0b0d3d4223db687379d17dd36a70637449f81d6b"}, +] + +[[package]] +name = "pydantic" +version = "1.10.18" +description = "Data validation and settings management using python type hints" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-1.10.18-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e405ffcc1254d76bb0e760db101ee8916b620893e6edfbfee563b3c6f7a67c02"}, + {file = "pydantic-1.10.18-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e306e280ebebc65040034bff1a0a81fd86b2f4f05daac0131f29541cafd80b80"}, + {file = "pydantic-1.10.18-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11d9d9b87b50338b1b7de4ebf34fd29fdb0d219dc07ade29effc74d3d2609c62"}, + {file = "pydantic-1.10.18-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b661ce52c7b5e5f600c0c3c5839e71918346af2ef20062705ae76b5c16914cab"}, + {file = "pydantic-1.10.18-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c20f682defc9ef81cd7eaa485879ab29a86a0ba58acf669a78ed868e72bb89e0"}, + {file = "pydantic-1.10.18-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c5ae6b7c8483b1e0bf59e5f1843e4fd8fd405e11df7de217ee65b98eb5462861"}, + {file = "pydantic-1.10.18-cp310-cp310-win_amd64.whl", hash = "sha256:74fe19dda960b193b0eb82c1f4d2c8e5e26918d9cda858cbf3f41dd28549cb70"}, + {file = "pydantic-1.10.18-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:72fa46abace0a7743cc697dbb830a41ee84c9db8456e8d77a46d79b537efd7ec"}, + {file = "pydantic-1.10.18-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ef0fe7ad7cbdb5f372463d42e6ed4ca9c443a52ce544472d8842a0576d830da5"}, + {file = "pydantic-1.10.18-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a00e63104346145389b8e8f500bc6a241e729feaf0559b88b8aa513dd2065481"}, + {file = "pydantic-1.10.18-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae6fa2008e1443c46b7b3a5eb03800121868d5ab6bc7cda20b5df3e133cde8b3"}, + {file = "pydantic-1.10.18-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9f463abafdc92635da4b38807f5b9972276be7c8c5121989768549fceb8d2588"}, + {file = "pydantic-1.10.18-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3445426da503c7e40baccefb2b2989a0c5ce6b163679dd75f55493b460f05a8f"}, + {file = "pydantic-1.10.18-cp311-cp311-win_amd64.whl", hash = "sha256:467a14ee2183bc9c902579bb2f04c3d3dac00eff52e252850509a562255b2a33"}, + {file = "pydantic-1.10.18-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:efbc8a7f9cb5fe26122acba1852d8dcd1e125e723727c59dcd244da7bdaa54f2"}, + {file = "pydantic-1.10.18-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24a4a159d0f7a8e26bf6463b0d3d60871d6a52eac5bb6a07a7df85c806f4c048"}, + {file = "pydantic-1.10.18-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b74be007703547dc52e3c37344d130a7bfacca7df112a9e5ceeb840a9ce195c7"}, + {file = "pydantic-1.10.18-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcb20d4cb355195c75000a49bb4a31d75e4295200df620f454bbc6bdf60ca890"}, + {file = "pydantic-1.10.18-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:46f379b8cb8a3585e3f61bf9ae7d606c70d133943f339d38b76e041ec234953f"}, + {file = "pydantic-1.10.18-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cbfbca662ed3729204090c4d09ee4beeecc1a7ecba5a159a94b5a4eb24e3759a"}, + {file = "pydantic-1.10.18-cp312-cp312-win_amd64.whl", hash = "sha256:c6d0a9f9eccaf7f438671a64acf654ef0d045466e63f9f68a579e2383b63f357"}, + {file = "pydantic-1.10.18-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3d5492dbf953d7d849751917e3b2433fb26010d977aa7a0765c37425a4026ff1"}, + {file = "pydantic-1.10.18-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe734914977eed33033b70bfc097e1baaffb589517863955430bf2e0846ac30f"}, + {file = "pydantic-1.10.18-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15fdbe568beaca9aacfccd5ceadfb5f1a235087a127e8af5e48df9d8a45ae85c"}, + {file = "pydantic-1.10.18-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c3e742f62198c9eb9201781fbebe64533a3bbf6a76a91b8d438d62b813079dbc"}, + {file = "pydantic-1.10.18-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:19a3bd00b9dafc2cd7250d94d5b578edf7a0bd7daf102617153ff9a8fa37871c"}, + {file = "pydantic-1.10.18-cp37-cp37m-win_amd64.whl", hash = "sha256:2ce3fcf75b2bae99aa31bd4968de0474ebe8c8258a0110903478bd83dfee4e3b"}, + {file = "pydantic-1.10.18-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:335a32d72c51a313b33fa3a9b0fe283503272ef6467910338e123f90925f0f03"}, + {file = "pydantic-1.10.18-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:34a3613c7edb8c6fa578e58e9abe3c0f5e7430e0fc34a65a415a1683b9c32d9a"}, + {file = "pydantic-1.10.18-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9ee4e6ca1d9616797fa2e9c0bfb8815912c7d67aca96f77428e316741082a1b"}, + {file = "pydantic-1.10.18-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23e8ec1ce4e57b4f441fc91e3c12adba023fedd06868445a5b5f1d48f0ab3682"}, + {file = "pydantic-1.10.18-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:44ae8a3e35a54d2e8fa88ed65e1b08967a9ef8c320819a969bfa09ce5528fafe"}, + {file = "pydantic-1.10.18-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5389eb3b48a72da28c6e061a247ab224381435256eb541e175798483368fdd3"}, + {file = "pydantic-1.10.18-cp38-cp38-win_amd64.whl", hash = "sha256:069b9c9fc645474d5ea3653788b544a9e0ccd3dca3ad8c900c4c6eac844b4620"}, + {file = "pydantic-1.10.18-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:80b982d42515632eb51f60fa1d217dfe0729f008e81a82d1544cc392e0a50ddf"}, + {file = "pydantic-1.10.18-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:aad8771ec8dbf9139b01b56f66386537c6fe4e76c8f7a47c10261b69ad25c2c9"}, + {file = "pydantic-1.10.18-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941a2eb0a1509bd7f31e355912eb33b698eb0051730b2eaf9e70e2e1589cae1d"}, + {file = "pydantic-1.10.18-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65f7361a09b07915a98efd17fdec23103307a54db2000bb92095457ca758d485"}, + {file = "pydantic-1.10.18-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6951f3f47cb5ca4da536ab161ac0163cab31417d20c54c6de5ddcab8bc813c3f"}, + {file = "pydantic-1.10.18-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7a4c5eec138a9b52c67f664c7d51d4c7234c5ad65dd8aacd919fb47445a62c86"}, + {file = "pydantic-1.10.18-cp39-cp39-win_amd64.whl", hash = "sha256:49e26c51ca854286bffc22b69787a8d4063a62bf7d83dc21d44d2ff426108518"}, + {file = "pydantic-1.10.18-py3-none-any.whl", hash = "sha256:06a189b81ffc52746ec9c8c007f16e5167c8b0a696e1a726369327e3db7b2a82"}, + {file = "pydantic-1.10.18.tar.gz", hash = "sha256:baebdff1907d1d96a139c25136a9bb7d17e118f133a76a2ef3b845e831e3403a"}, +] + +[package.dependencies] +typing-extensions = ">=4.2.0" + +[package.extras] +dotenv = ["python-dotenv (>=0.10.4)"] +email = ["email-validator (>=1.0.3)"] + +[[package]] +name = "pygments" +version = "2.18.0" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pyjwt" +version = "2.9.0" +description = "JSON Web Token implementation in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyJWT-2.9.0-py3-none-any.whl", hash = "sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850"}, + {file = "pyjwt-2.9.0.tar.gz", hash = "sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c"}, +] + +[package.dependencies] +cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"crypto\""} + +[package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] + +[[package]] +name = "pylint" +version = "3.2.5" +description = "python code static checker" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "pylint-3.2.5-py3-none-any.whl", hash = "sha256:32cd6c042b5004b8e857d727708720c54a676d1e22917cf1a2df9b4d4868abd6"}, + {file = "pylint-3.2.5.tar.gz", hash = "sha256:e9b7171e242dcc6ebd0aaa7540481d1a72860748a0a7816b8fe6cf6c80a6fe7e"}, +] + +[package.dependencies] +astroid = ">=3.2.2,<=3.3.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, +] +isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomlkit = ">=0.10.1" + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + +[[package]] +name = "pyparsing" +version = "3.1.4" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"}, + {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pytest" +version = "8.2.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.2.2-py3-none-any.whl", hash = "sha256:c434598117762e2bd304e526244f67bf66bbd7b5d6cf22138be51ff661980343"}, + {file = "pytest-8.2.2.tar.gz", hash = "sha256:de4bb8104e201939ccdc688b27a89a7be2079b22e2bd2b07f806b6ba71117977"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2.0" + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-celery" +version = "1.1.3" +description = "Pytest plugin for Celery" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "pytest_celery-1.1.3-py3-none-any.whl", hash = "sha256:4cdb5f658dc472509e8be71f745d26bcb8246397661534f5709d2a55edc43286"}, + {file = "pytest_celery-1.1.3.tar.gz", hash = "sha256:ac7eee546b4d9fb5c742eaaece98187f1f5e5f5622fbaa8e7729bb46923c54fc"}, +] + +[package.dependencies] +boto3 = {version = "*", optional = true, markers = "extra == \"all\" or extra == \"sqs\""} +botocore = {version = "*", optional = true, markers = "extra == \"all\" or extra == \"sqs\""} +celery = "*" +debugpy = ">=1.8.5,<2.0.0" +docker = ">=7.1.0,<8.0.0" +psutil = ">=6.0.0" +pycurl = {version = "*", optional = true, markers = "sys_platform != \"win32\" and platform_python_implementation == \"CPython\" and (extra == \"all\" or extra == \"sqs\")"} +pytest-docker-tools = ">=3.1.3" +python-memcached = {version = "*", optional = true, markers = "extra == \"all\" or extra == \"memcached\""} +redis = {version = "*", optional = true, markers = "extra == \"all\" or extra == \"redis\""} +setuptools = ">=75.1.0" +tenacity = ">=9.0.0" +urllib3 = {version = "*", optional = true, markers = "extra == \"all\" or extra == \"sqs\""} + +[package.extras] +all = ["boto3", "botocore", "pycurl", "python-memcached", "redis", "urllib3"] +memcached = ["python-memcached"] +redis = ["redis"] +sqs = ["boto3", "botocore", "pycurl", "urllib3"] + +[[package]] +name = "pytest-cov" +version = "5.0.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, + {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, +] + +[package.dependencies] +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + +[[package]] +name = "pytest-django" +version = "4.8.0" +description = "A Django plugin for pytest." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-django-4.8.0.tar.gz", hash = "sha256:5d054fe011c56f3b10f978f41a8efb2e5adfc7e680ef36fb571ada1f24779d90"}, + {file = "pytest_django-4.8.0-py3-none-any.whl", hash = "sha256:ca1ddd1e0e4c227cf9e3e40a6afc6d106b3e70868fd2ac5798a22501271cd0c7"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[package.extras] +docs = ["sphinx", "sphinx-rtd-theme"] +testing = ["Django", "django-configurations (>=2.0)"] + +[[package]] +name = "pytest-docker-tools" +version = "3.1.3" +description = "Docker integration tests for pytest" +optional = false +python-versions = ">=3.7.0,<4.0.0" +files = [ + {file = "pytest_docker_tools-3.1.3-py3-none-any.whl", hash = "sha256:63e659043160f41d89f94ea42616102594bcc85682aac394fcbc14f14cd1b189"}, + {file = "pytest_docker_tools-3.1.3.tar.gz", hash = "sha256:c7e28841839d67b3ac80ad7b345b953701d5ae61ffda97586114244292aeacc0"}, +] + +[package.dependencies] +docker = ">=4.3.1" +pytest = ">=6.0.1" + +[[package]] +name = "pytest-env" +version = "1.1.3" +description = "pytest plugin that allows you to add environment variables." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_env-1.1.3-py3-none-any.whl", hash = "sha256:aada77e6d09fcfb04540a6e462c58533c37df35fa853da78707b17ec04d17dfc"}, + {file = "pytest_env-1.1.3.tar.gz", hash = "sha256:fcd7dc23bb71efd3d35632bde1bbe5ee8c8dc4489d6617fb010674880d96216b"}, +] + +[package.dependencies] +pytest = ">=7.4.3" + +[package.extras] +test = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "pytest-mock (>=3.12)"] + +[[package]] +name = "pytest-randomly" +version = "3.15.0" +description = "Pytest plugin to randomly order tests and control random.seed." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_randomly-3.15.0-py3-none-any.whl", hash = "sha256:0516f4344b29f4e9cdae8bce31c4aeebf59d0b9ef05927c33354ff3859eeeca6"}, + {file = "pytest_randomly-3.15.0.tar.gz", hash = "sha256:b908529648667ba5e54723088edd6f82252f540cc340d748d1fa985539687047"}, +] + +[package.dependencies] +pytest = "*" + +[[package]] +name = "pytest-xdist" +version = "3.6.1" +description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"}, + {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"}, +] + +[package.dependencies] +execnet = ">=2.1" +pytest = ">=7.0.0" + +[package.extras] +psutil = ["psutil (>=3.0)"] +setproctitle = ["setproctitle"] +testing = ["filelock"] + +[[package]] +name = "python-crontab" +version = "3.2.0" +description = "Python Crontab API" +optional = false +python-versions = "*" +files = [ + {file = "python_crontab-3.2.0-py3-none-any.whl", hash = "sha256:82cb9b6a312d41ff66fd3caf3eed7115c28c195bfb50711bc2b4b9592feb9fe5"}, + {file = "python_crontab-3.2.0.tar.gz", hash = "sha256:40067d1dd39ade3460b2ad8557c7651514cd3851deffff61c5c60e1227c5c36b"}, +] + +[package.dependencies] +python-dateutil = "*" + +[package.extras] +cron-description = ["cron-descriptor"] +cron-schedule = ["croniter"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-memcached" +version = "1.62" +description = "Pure python memcached client" +optional = false +python-versions = "*" +files = [ + {file = "python-memcached-1.62.tar.gz", hash = "sha256:0285470599b7f593fbf3bec084daa1f483221e68c1db2cf1d846a9f7c2655103"}, + {file = "python_memcached-1.62-py2.py3-none-any.whl", hash = "sha256:1bdd8d2393ff53e80cd5e9442d750e658e0b35c3eebb3211af137303e3b729d1"}, +] + +[[package]] +name = "pytz" +version = "2024.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, +] + +[[package]] +name = "pywin32" +version = "306" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, + {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, + {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, + {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, + {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, + {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, + {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, + {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "redis" +version = "5.1.0" +description = "Python client for Redis database and key-value store" +optional = false +python-versions = ">=3.8" +files = [ + {file = "redis-5.1.0-py3-none-any.whl", hash = "sha256:fd4fccba0d7f6aa48c58a78d76ddb4afc698f5da4a2c1d03d916e4fd7ab88cdd"}, + {file = "redis-5.1.0.tar.gz", hash = "sha256:b756df1e4a3858fcc0ef861f3fc53623a96c41e2b1f5304e09e0fe758d333d40"}, +] + +[package.dependencies] +async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""} + +[package.extras] +hiredis = ["hiredis (>=3.0.0)"] +ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==23.2.1)", "requests (>=2.31.0)"] + +[[package]] +name = "referencing" +version = "0.35.1" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, + {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-file" +version = "2.1.0" +description = "File transport adapter for Requests" +optional = false +python-versions = "*" +files = [ + {file = "requests_file-2.1.0-py2.py3-none-any.whl", hash = "sha256:cf270de5a4c5874e84599fc5778303d496c10ae5e870bfa378818f35d21bda5c"}, + {file = "requests_file-2.1.0.tar.gz", hash = "sha256:0f549a3f3b0699415ac04d167e9cb39bccfb730cb832b4d20be3d9867356e658"}, +] + +[package.dependencies] +requests = ">=1.0.0" + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +description = "OAuthlib authentication support for Requests." +optional = false +python-versions = ">=3.4" +files = [ + {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"}, + {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"}, +] + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib[signedtoken] (>=3.0.0)"] + +[[package]] +name = "retrying" +version = "1.3.4" +description = "Retrying" +optional = false +python-versions = "*" +files = [ + {file = "retrying-1.3.4-py3-none-any.whl", hash = "sha256:8cc4d43cb8e1125e0ff3344e9de678fefd85db3b750b81b2240dc0183af37b35"}, + {file = "retrying-1.3.4.tar.gz", hash = "sha256:345da8c5765bd982b1d1915deb9102fd3d1f7ad16bd84a9700b85f64d24e8f3e"}, +] + +[package.dependencies] +six = ">=1.7.0" + +[[package]] +name = "rich" +version = "13.9.1" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "rich-13.9.1-py3-none-any.whl", hash = "sha256:b340e739f30aa58921dc477b8adaa9ecdb7cecc217be01d93730ee1bc8aa83be"}, + {file = "rich-13.9.1.tar.gz", hash = "sha256:097cffdf85db1babe30cc7deba5ab3a29e1b9885047dab24c57e9a7f8a9c1466"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "rpds-py" +version = "0.20.0" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2"}, + {file = "rpds_py-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6377e647bbfd0a0b159fe557f2c6c602c159fc752fa316572f012fc0bf67150"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb851b7df9dda52dc1415ebee12362047ce771fc36914586b2e9fcbd7d293b3e"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e0f80b739e5a8f54837be5d5c924483996b603d5502bfff79bf33da06164ee2"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a8c94dad2e45324fc74dce25e1645d4d14df9a4e54a30fa0ae8bad9a63928e3"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e604fe73ba048c06085beaf51147eaec7df856824bfe7b98657cf436623daf"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df3de6b7726b52966edf29663e57306b23ef775faf0ac01a3e9f4012a24a4140"}, + {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf258ede5bc22a45c8e726b29835b9303c285ab46fc7c3a4cc770736b5304c9f"}, + {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:55fea87029cded5df854ca7e192ec7bdb7ecd1d9a3f63d5c4eb09148acf4a7ce"}, + {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ae94bd0b2f02c28e199e9bc51485d0c5601f58780636185660f86bf80c89af94"}, + {file = "rpds_py-0.20.0-cp310-none-win32.whl", hash = "sha256:28527c685f237c05445efec62426d285e47a58fb05ba0090a4340b73ecda6dee"}, + {file = "rpds_py-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:238a2d5b1cad28cdc6ed15faf93a998336eb041c4e440dd7f902528b8891b399"}, + {file = "rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489"}, + {file = "rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3"}, + {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272"}, + {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad"}, + {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58"}, + {file = "rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0"}, + {file = "rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c"}, + {file = "rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6"}, + {file = "rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef"}, + {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821"}, + {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940"}, + {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174"}, + {file = "rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139"}, + {file = "rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585"}, + {file = "rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29"}, + {file = "rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f"}, + {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c"}, + {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2"}, + {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57"}, + {file = "rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a"}, + {file = "rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2"}, + {file = "rpds_py-0.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f2fbf7db2012d4876fb0d66b5b9ba6591197b0f165db8d99371d976546472a24"}, + {file = "rpds_py-0.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1e5f3cd7397c8f86c8cc72d5a791071431c108edd79872cdd96e00abd8497d29"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce9845054c13696f7af7f2b353e6b4f676dab1b4b215d7fe5e05c6f8bb06f965"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c3e130fd0ec56cb76eb49ef52faead8ff09d13f4527e9b0c400307ff72b408e1"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b16aa0107ecb512b568244ef461f27697164d9a68d8b35090e9b0c1c8b27752"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7f429242aae2947246587d2964fad750b79e8c233a2367f71b554e9447949c"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0fc424a5842a11e28956e69395fbbeab2c97c42253169d87e90aac2886d751"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8c00a3b1e70c1d3891f0db1b05292747f0dbcfb49c43f9244d04c70fbc40eb8"}, + {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:40ce74fc86ee4645d0a225498d091d8bc61f39b709ebef8204cb8b5a464d3c0e"}, + {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4fe84294c7019456e56d93e8ababdad5a329cd25975be749c3f5f558abb48253"}, + {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:338ca4539aad4ce70a656e5187a3a31c5204f261aef9f6ab50e50bcdffaf050a"}, + {file = "rpds_py-0.20.0-cp38-none-win32.whl", hash = "sha256:54b43a2b07db18314669092bb2de584524d1ef414588780261e31e85846c26a5"}, + {file = "rpds_py-0.20.0-cp38-none-win_amd64.whl", hash = "sha256:a1862d2d7ce1674cffa6d186d53ca95c6e17ed2b06b3f4c476173565c862d232"}, + {file = "rpds_py-0.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3fde368e9140312b6e8b6c09fb9f8c8c2f00999d1823403ae90cc00480221b22"}, + {file = "rpds_py-0.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9824fb430c9cf9af743cf7aaf6707bf14323fb51ee74425c380f4c846ea70789"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11ef6ce74616342888b69878d45e9f779b95d4bd48b382a229fe624a409b72c5"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c52d3f2f82b763a24ef52f5d24358553e8403ce05f893b5347098014f2d9eff2"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d35cef91e59ebbeaa45214861874bc6f19eb35de96db73e467a8358d701a96c"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d72278a30111e5b5525c1dd96120d9e958464316f55adb030433ea905866f4de"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c29cbbba378759ac5786730d1c3cb4ec6f8ababf5c42a9ce303dc4b3d08cda"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6632f2d04f15d1bd6fe0eedd3b86d9061b836ddca4c03d5cf5c7e9e6b7c14580"}, + {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d0b67d87bb45ed1cd020e8fbf2307d449b68abc45402fe1a4ac9e46c3c8b192b"}, + {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ec31a99ca63bf3cd7f1a5ac9fe95c5e2d060d3c768a09bc1d16e235840861420"}, + {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e6c9976e38f4d8c4a63bd8a8edac5307dffd3ee7e6026d97f3cc3a2dc02a0b"}, + {file = "rpds_py-0.20.0-cp39-none-win32.whl", hash = "sha256:569b3ea770c2717b730b61998b6c54996adee3cef69fc28d444f3e7920313cf7"}, + {file = "rpds_py-0.20.0-cp39-none-win_amd64.whl", hash = "sha256:e6900ecdd50ce0facf703f7a00df12374b74bbc8ad9fe0f6559947fb20f82364"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaeb7b57f1a1e071ebd748984359fef83ecb026325b9d4ca847c95bc7311c92"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d454b8749b4bd70dd0a79f428731ee263fa6995f83ccb8bada706e8d1d3ff89d"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d807dc2051abe041b6649681dce568f8e10668e3c1c6543ebae58f2d7e617855"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c20f0ddeb6e29126d45f89206b8291352b8c5b44384e78a6499d68b52ae511"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b7f19250ceef892adf27f0399b9e5afad019288e9be756d6919cb58892129f51"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1ed4749a08379555cebf4650453f14452eaa9c43d0a95c49db50c18b7da075"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:dcedf0b42bcb4cfff4101d7771a10532415a6106062f005ab97d1d0ab5681c60"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:39ed0d010457a78f54090fafb5d108501b5aa5604cc22408fc1c0c77eac14344"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bb273176be34a746bdac0b0d7e4e2c467323d13640b736c4c477881a3220a989"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f918a1a130a6dfe1d7fe0f105064141342e7dd1611f2e6a21cd2f5c8cb1cfb3e"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f60012a73aa396be721558caa3a6fd49b3dd0033d1675c6d59c4502e870fcf0c"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d2b1ad682a3dfda2a4e8ad8572f3100f95fad98cb99faf37ff0ddfe9cbf9d03"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:614fdafe9f5f19c63ea02817fa4861c606a59a604a77c8cdef5aa01d28b97921"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa518bcd7600c584bf42e6617ee8132869e877db2f76bcdc281ec6a4113a53ab"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0475242f447cc6cb8a9dd486d68b2ef7fbee84427124c232bff5f63b1fe11e5"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90a4cd061914a60bd51c68bcb4357086991bd0bb93d8aa66a6da7701370708f"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:def7400461c3a3f26e49078302e1c1b38f6752342c77e3cf72ce91ca69fb1bc1"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:65794e4048ee837494aea3c21a28ad5fc080994dfba5b036cf84de37f7ad5074"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:faefcc78f53a88f3076b7f8be0a8f8d35133a3ecf7f3770895c25f8813460f08"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5b4f105deeffa28bbcdff6c49b34e74903139afa690e35d2d9e3c2c2fba18cec"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdfc3a892927458d98f3d55428ae46b921d1f7543b89382fdb483f5640daaec8"}, + {file = "rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121"}, +] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "ruamel-yaml" +version = "0.18.6" +description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruamel.yaml-0.18.6-py3-none-any.whl", hash = "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636"}, + {file = "ruamel.yaml-0.18.6.tar.gz", hash = "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b"}, +] + +[package.dependencies] +"ruamel.yaml.clib" = {version = ">=0.2.7", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.13\""} + +[package.extras] +docs = ["mercurial (>5.7)", "ryd"] +jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"] + +[[package]] +name = "ruamel-yaml-clib" +version = "0.2.8" +description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" +optional = false +python-versions = ">=3.6" +files = [ + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win_amd64.whl", hash = "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b"}, + {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win32.whl", hash = "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win32.whl", hash = "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win32.whl", hash = "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win_amd64.whl", hash = "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15"}, + {file = "ruamel.yaml.clib-0.2.8.tar.gz", hash = "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512"}, +] + +[[package]] +name = "ruff" +version = "0.5.0" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.5.0-py3-none-linux_armv6l.whl", hash = "sha256:ee770ea8ab38918f34e7560a597cc0a8c9a193aaa01bfbd879ef43cb06bd9c4c"}, + {file = "ruff-0.5.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38f3b8327b3cb43474559d435f5fa65dacf723351c159ed0dc567f7ab735d1b6"}, + {file = "ruff-0.5.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7594f8df5404a5c5c8f64b8311169879f6cf42142da644c7e0ba3c3f14130370"}, + {file = "ruff-0.5.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:adc7012d6ec85032bc4e9065110df205752d64010bed5f958d25dbee9ce35de3"}, + {file = "ruff-0.5.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d505fb93b0fabef974b168d9b27c3960714d2ecda24b6ffa6a87ac432905ea38"}, + {file = "ruff-0.5.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dc5cfd3558f14513ed0d5b70ce531e28ea81a8a3b1b07f0f48421a3d9e7d80a"}, + {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:db3ca35265de239a1176d56a464b51557fce41095c37d6c406e658cf80bbb362"}, + {file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1a321c4f68809fddd9b282fab6a8d8db796b270fff44722589a8b946925a2a8"}, + {file = "ruff-0.5.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c4dfcd8d34b143916994b3876b63d53f56724c03f8c1a33a253b7b1e6bf2a7d"}, + {file = "ruff-0.5.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81e5facfc9f4a674c6a78c64d38becfbd5e4f739c31fcd9ce44c849f1fad9e4c"}, + {file = "ruff-0.5.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e589e27971c2a3efff3fadafb16e5aef7ff93250f0134ec4b52052b673cf988d"}, + {file = "ruff-0.5.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2ffbc3715a52b037bcb0f6ff524a9367f642cdc5817944f6af5479bbb2eb50e"}, + {file = "ruff-0.5.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cd096e23c6a4f9c819525a437fa0a99d1c67a1b6bb30948d46f33afbc53596cf"}, + {file = "ruff-0.5.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:46e193b36f2255729ad34a49c9a997d506e58f08555366b2108783b3064a0e1e"}, + {file = "ruff-0.5.0-py3-none-win32.whl", hash = "sha256:49141d267100f5ceff541b4e06552e98527870eafa1acc9dec9139c9ec5af64c"}, + {file = "ruff-0.5.0-py3-none-win_amd64.whl", hash = "sha256:e9118f60091047444c1b90952736ee7b1792910cab56e9b9a9ac20af94cd0440"}, + {file = "ruff-0.5.0-py3-none-win_arm64.whl", hash = "sha256:ed5c4df5c1fb4518abcb57725b576659542bdbe93366f4f329e8f398c4b71178"}, + {file = "ruff-0.5.0.tar.gz", hash = "sha256:eb641b5873492cf9bd45bc9c5ae5320648218e04386a5f0c264ad6ccce8226a1"}, +] + +[[package]] +name = "s3transfer" +version = "0.10.2" +description = "An Amazon S3 Transfer Manager" +optional = false +python-versions = ">=3.8" +files = [ + {file = "s3transfer-0.10.2-py3-none-any.whl", hash = "sha256:eca1c20de70a39daee580aef4986996620f365c4e0fda6a86100231d62f1bf69"}, + {file = "s3transfer-0.10.2.tar.gz", hash = "sha256:0711534e9356d3cc692fdde846b4a1e4b0cb6519971860796e6bc4c7aea00ef6"}, +] + +[package.dependencies] +botocore = ">=1.33.2,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"] + +[[package]] +name = "safety" +version = "3.2.3" +description = "Checks installed dependencies for known vulnerabilities and licenses." +optional = false +python-versions = ">=3.7" +files = [ + {file = "safety-3.2.3-py3-none-any.whl", hash = "sha256:cda1e91749f610337a18b7f21f78267c127e44ebbbbcbbd419c83284279a5024"}, + {file = "safety-3.2.3.tar.gz", hash = "sha256:414154934f1727daf8a6473493944fecb380540c3f00875dc1ae377382f7d83f"}, +] + +[package.dependencies] +Authlib = ">=1.2.0" +Click = ">=8.0.2" +dparse = ">=0.6.4b0" +jinja2 = ">=3.1.0" +marshmallow = ">=3.15.0" +packaging = ">=21.0" +pydantic = ">=1.10.12" +requests = "*" +rich = "*" +"ruamel.yaml" = ">=0.17.21" +safety-schemas = ">=0.0.2" +setuptools = ">=65.5.1" +typer = "*" +typing-extensions = ">=4.7.1" +urllib3 = ">=1.26.5" + +[package.extras] +github = ["pygithub (>=1.43.3)"] +gitlab = ["python-gitlab (>=1.3.0)"] +spdx = ["spdx-tools (>=0.8.2)"] + +[[package]] +name = "safety-schemas" +version = "0.0.5" +description = "Schemas for Safety tools" +optional = false +python-versions = ">=3.7" +files = [ + {file = "safety_schemas-0.0.5-py3-none-any.whl", hash = "sha256:6ac9eb71e60f0d4e944597c01dd48d6d8cd3d467c94da4aba3702a05a3a6ab4f"}, + {file = "safety_schemas-0.0.5.tar.gz", hash = "sha256:0de5fc9a53d4423644a8ce9a17a2e474714aa27e57f3506146e95a41710ff104"}, +] + +[package.dependencies] +dparse = ">=0.6.4b0" +packaging = ">=21.0" +pydantic = "*" +ruamel-yaml = ">=0.17.21" +typing-extensions = ">=4.7.1" + +[[package]] +name = "schema" +version = "0.7.7" +description = "Simple data validation library" +optional = false +python-versions = "*" +files = [ + {file = "schema-0.7.7-py2.py3-none-any.whl", hash = "sha256:5d976a5b50f36e74e2157b47097b60002bd4d42e65425fcc9c9befadb4255dde"}, + {file = "schema-0.7.7.tar.gz", hash = "sha256:7da553abd2958a19dc2547c388cde53398b39196175a9be59ea1caf5ab0a1807"}, +] + +[[package]] +name = "setuptools" +version = "75.1.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-75.1.0-py3-none-any.whl", hash = "sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2"}, + {file = "setuptools-75.1.0.tar.gz", hash = "sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"] + +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + +[[package]] +name = "shodan" +version = "1.31.0" +description = "Python library and command-line utility for Shodan (https://developer.shodan.io)" +optional = false +python-versions = "*" +files = [ + {file = "shodan-1.31.0.tar.gz", hash = "sha256:c73275386ea02390e196c35c660706a28dd4d537c5a21eb387ab6236fac251f6"}, +] + +[package.dependencies] +click = "*" +click-plugins = "*" +colorama = "*" +requests = ">=2.2.1" +tldextract = "*" +XlsxWriter = "*" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "slack-sdk" +version = "3.33.3" +description = "The Slack API Platform SDK for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "slack_sdk-3.33.3-py2.py3-none-any.whl", hash = "sha256:0515fb93cd03b18de61f876a8304c4c3cef4dd3c2a3bad62d7394d2eb5a3c8e6"}, + {file = "slack_sdk-3.33.3.tar.gz", hash = "sha256:4cc44c9ffe4bb28a01fbe3264c2f466c783b893a4eca62026ab845ec7c176ff1"}, +] + +[package.extras] +optional = ["SQLAlchemy (>=1.4,<3)", "aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "boto3 (<=2)", "websocket-client (>=1,<2)", "websockets (>=9.1,<14)"] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "sqlparse" +version = "0.5.1" +description = "A non-validating SQL parser." +optional = false +python-versions = ">=3.8" +files = [ + {file = "sqlparse-0.5.1-py3-none-any.whl", hash = "sha256:773dcbf9a5ab44a090f3441e2180efe2560220203dc2f8c0b0fa141e18b505e4"}, + {file = "sqlparse-0.5.1.tar.gz", hash = "sha256:bb6b4df465655ef332548e24f08e205afc81b9ab86cb1c45657a7ff173a3a00e"}, +] + +[package.extras] +dev = ["build", "hatch"] +doc = ["sphinx"] + +[[package]] +name = "std-uritemplate" +version = "2.0.0" +description = "std-uritemplate implementation for Python" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "std_uritemplate-2.0.0-py3-none-any.whl", hash = "sha256:b69a80b707b93095075066005155eebd9172ec1ac7601992e2f9e4ec380f5a7b"}, + {file = "std_uritemplate-2.0.0.tar.gz", hash = "sha256:be88c8a4a89fee6768237f594c537d9347d96d7e68b8ad12200a3f11a0f6398f"}, +] + +[[package]] +name = "stevedore" +version = "5.3.0" +description = "Manage dynamic plugins for Python applications" +optional = false +python-versions = ">=3.8" +files = [ + {file = "stevedore-5.3.0-py3-none-any.whl", hash = "sha256:1efd34ca08f474dad08d9b19e934a22c68bb6fe416926479ba29e5013bcc8f78"}, + {file = "stevedore-5.3.0.tar.gz", hash = "sha256:9a64265f4060312828151c204efbe9b7a9852a0d9228756344dbc7e4023e375a"}, +] + +[package.dependencies] +pbr = ">=2.0.0" + +[[package]] +name = "tabulate" +version = "0.9.0" +description = "Pretty-print tabular data" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, + {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, +] + +[package.extras] +widechars = ["wcwidth"] + +[[package]] +name = "tenacity" +version = "9.0.0" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"}, + {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + +[[package]] +name = "tldextract" +version = "5.1.2" +description = "Accurately separates a URL's subdomain, domain, and public suffix, using the Public Suffix List (PSL). By default, this includes the public ICANN TLDs and their exceptions. You can optionally support the Public Suffix List's private domains as well." +optional = false +python-versions = ">=3.8" +files = [ + {file = "tldextract-5.1.2-py3-none-any.whl", hash = "sha256:4dfc4c277b6b97fa053899fcdb892d2dc27295851ab5fac4e07797b6a21b2e46"}, + {file = "tldextract-5.1.2.tar.gz", hash = "sha256:c9e17f756f05afb5abac04fe8f766e7e70f9fe387adb1859f0f52408ee060200"}, +] + +[package.dependencies] +filelock = ">=3.0.8" +idna = "*" +requests = ">=2.1.0" +requests-file = ">=1.4" + +[package.extras] +release = ["build", "twine"] +testing = ["black", "mypy", "pytest", "pytest-gitignore", "pytest-mock", "responses", "ruff", "syrupy", "tox", "types-filelock", "types-requests"] + +[[package]] +name = "tomlkit" +version = "0.13.2" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, + {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, +] + +[[package]] +name = "typer" +version = "0.12.5" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.7" +files = [ + {file = "typer-0.12.5-py3-none-any.whl", hash = "sha256:62fe4e471711b147e3365034133904df3e235698399bc4de2b36c8579298d52b"}, + {file = "typer-0.12.5.tar.gz", hash = "sha256:f592f089bedcc8ec1b974125d64851029c3b1af145f04aca64d69410f0c9b722"}, +] + +[package.dependencies] +click = ">=8.0.0" +rich = ">=10.11.0" +shellingham = ">=1.3.0" +typing-extensions = ">=3.7.4.3" + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "tzdata" +version = "2024.2" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, +] + +[[package]] +name = "tzlocal" +version = "5.2" +description = "tzinfo object for the local timezone" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tzlocal-5.2-py3-none-any.whl", hash = "sha256:49816ef2fe65ea8ac19d19aa7a1ae0551c834303d5014c6d5a62e4cbda8047b8"}, + {file = "tzlocal-5.2.tar.gz", hash = "sha256:8d399205578f1a9342816409cc1e46a93ebd5755e39ea2d85334bea911bf0e6e"}, +] + +[package.dependencies] +tzdata = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +devenv = ["check-manifest", "pytest (>=4.3)", "pytest-cov", "pytest-mock (>=3.3)", "zest.releaser"] + +[[package]] +name = "uritemplate" +version = "4.1.1" +description = "Implementation of RFC 6570 URI Templates" +optional = false +python-versions = ">=3.6" +files = [ + {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"}, + {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, +] + +[[package]] +name = "urllib3" +version = "2.2.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "uuid6" +version = "2024.7.10" +description = "New time-based UUID formats which are suited for use as a database key" +optional = false +python-versions = ">=3.8" +files = [ + {file = "uuid6-2024.7.10-py3-none-any.whl", hash = "sha256:93432c00ba403751f722829ad21759ff9db051dea140bf81493271e8e4dd18b7"}, + {file = "uuid6-2024.7.10.tar.gz", hash = "sha256:2d29d7f63f593caaeea0e0d0dd0ad8129c9c663b29e19bdf882e864bedf18fb0"}, +] + +[[package]] +name = "vine" +version = "5.1.0" +description = "Python promises." +optional = false +python-versions = ">=3.6" +files = [ + {file = "vine-5.1.0-py3-none-any.whl", hash = "sha256:40fdf3c48b2cfe1c38a49e9ae2da6fda88e4794c810050a728bd7413811fb1dc"}, + {file = "vine-5.1.0.tar.gz", hash = "sha256:8b62e981d35c41049211cf62a0a1242d8c1ee9bd15bb196ce38aefd6799e61e0"}, +] + +[[package]] +name = "vulture" +version = "2.11" +description = "Find dead code" +optional = false +python-versions = ">=3.8" +files = [ + {file = "vulture-2.11-py2.py3-none-any.whl", hash = "sha256:12d745f7710ffbf6aeb8279ba9068a24d4e52e8ed333b8b044035c9d6b823aba"}, + {file = "vulture-2.11.tar.gz", hash = "sha256:f0fbb60bce6511aad87ee0736c502456737490a82d919a44e6d92262cb35f1c2"}, +] + +[[package]] +name = "wcwidth" +version = "0.2.13" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, + {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, +] + +[[package]] +name = "websocket-client" +version = "1.8.0" +description = "WebSocket client for Python with low level API options" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, + {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, +] + +[package.extras] +docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] +optional = ["python-socks", "wsaccel"] +test = ["websockets"] + +[[package]] +name = "werkzeug" +version = "3.0.6" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "werkzeug-3.0.6-py3-none-any.whl", hash = "sha256:1bc0c2310d2fbb07b1dd1105eba2f7af72f322e1e455f2f93c993bee8c8a5f17"}, + {file = "werkzeug-3.0.6.tar.gz", hash = "sha256:a8dd59d4de28ca70471a34cba79bed5f7ef2e036a76b3ab0835474246eb41f8d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wrapt" +version = "1.16.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.6" +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + +[[package]] +name = "xlsxwriter" +version = "3.2.0" +description = "A Python module for creating Excel XLSX files." +optional = false +python-versions = ">=3.6" +files = [ + {file = "XlsxWriter-3.2.0-py3-none-any.whl", hash = "sha256:ecfd5405b3e0e228219bcaf24c2ca0915e012ca9464a14048021d21a995d490e"}, + {file = "XlsxWriter-3.2.0.tar.gz", hash = "sha256:9977d0c661a72866a61f9f7a809e25ebbb0fb7036baa3b9fe74afcfca6b3cb8c"}, +] + +[[package]] +name = "yarl" +version = "1.13.1" +description = "Yet another URL library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "yarl-1.13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:82e692fb325013a18a5b73a4fed5a1edaa7c58144dc67ad9ef3d604eccd451ad"}, + {file = "yarl-1.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df4e82e68f43a07735ae70a2d84c0353e58e20add20ec0af611f32cd5ba43fb4"}, + {file = "yarl-1.13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ec9dd328016d8d25702a24ee274932aebf6be9787ed1c28d021945d264235b3c"}, + {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5820bd4178e6a639b3ef1db8b18500a82ceab6d8b89309e121a6859f56585b05"}, + {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86c438ce920e089c8c2388c7dcc8ab30dfe13c09b8af3d306bcabb46a053d6f7"}, + {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3de86547c820e4f4da4606d1c8ab5765dd633189791f15247706a2eeabc783ae"}, + {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca53632007c69ddcdefe1e8cbc3920dd88825e618153795b57e6ebcc92e752a"}, + {file = "yarl-1.13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4ee1d240b84e2f213565f0ec08caef27a0e657d4c42859809155cf3a29d1735"}, + {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c49f3e379177f4477f929097f7ed4b0622a586b0aa40c07ac8c0f8e40659a1ac"}, + {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5c5e32fef09ce101fe14acd0f498232b5710effe13abac14cd95de9c274e689e"}, + {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ab9524e45ee809a083338a749af3b53cc7efec458c3ad084361c1dbf7aaf82a2"}, + {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:b1481c048fe787f65e34cb06f7d6824376d5d99f1231eae4778bbe5c3831076d"}, + {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:31497aefd68036d8e31bfbacef915826ca2e741dbb97a8d6c7eac66deda3b606"}, + {file = "yarl-1.13.1-cp310-cp310-win32.whl", hash = "sha256:1fa56f34b2236f5192cb5fceba7bbb09620e5337e0b6dfe2ea0ddbd19dd5b154"}, + {file = "yarl-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:1bbb418f46c7f7355084833051701b2301092e4611d9e392360c3ba2e3e69f88"}, + {file = "yarl-1.13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:216a6785f296169ed52cd7dcdc2612f82c20f8c9634bf7446327f50398732a51"}, + {file = "yarl-1.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:40c6e73c03a6befb85b72da213638b8aaa80fe4136ec8691560cf98b11b8ae6e"}, + {file = "yarl-1.13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2430cf996113abe5aee387d39ee19529327205cda975d2b82c0e7e96e5fdabdc"}, + {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fb4134cc6e005b99fa29dbc86f1ea0a298440ab6b07c6b3ee09232a3b48f495"}, + {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:309c104ecf67626c033845b860d31594a41343766a46fa58c3309c538a1e22b2"}, + {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f90575e9fe3aae2c1e686393a9689c724cd00045275407f71771ae5d690ccf38"}, + {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d2e1626be8712333a9f71270366f4a132f476ffbe83b689dd6dc0d114796c74"}, + {file = "yarl-1.13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b66c87da3c6da8f8e8b648878903ca54589038a0b1e08dde2c86d9cd92d4ac9"}, + {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cf1ad338620249f8dd6d4b6a91a69d1f265387df3697ad5dc996305cf6c26fb2"}, + {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9915300fe5a0aa663c01363db37e4ae8e7c15996ebe2c6cce995e7033ff6457f"}, + {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:703b0f584fcf157ef87816a3c0ff868e8c9f3c370009a8b23b56255885528f10"}, + {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1d8e3ca29f643dd121f264a7c89f329f0fcb2e4461833f02de6e39fef80f89da"}, + {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7055bbade838d68af73aea13f8c86588e4bcc00c2235b4b6d6edb0dbd174e246"}, + {file = "yarl-1.13.1-cp311-cp311-win32.whl", hash = "sha256:a3442c31c11088e462d44a644a454d48110f0588de830921fd201060ff19612a"}, + {file = "yarl-1.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:81bad32c8f8b5897c909bf3468bf601f1b855d12f53b6af0271963ee67fff0d2"}, + {file = "yarl-1.13.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f452cc1436151387d3d50533523291d5f77c6bc7913c116eb985304abdbd9ec9"}, + {file = "yarl-1.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9cec42a20eae8bebf81e9ce23fb0d0c729fc54cf00643eb251ce7c0215ad49fe"}, + {file = "yarl-1.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d959fe96e5c2712c1876d69af0507d98f0b0e8d81bee14cfb3f6737470205419"}, + {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8c837ab90c455f3ea8e68bee143472ee87828bff19ba19776e16ff961425b57"}, + {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:94a993f976cdcb2dc1b855d8b89b792893220db8862d1a619efa7451817c836b"}, + {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b2442a415a5f4c55ced0fade7b72123210d579f7d950e0b5527fc598866e62c"}, + {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fdbf0418489525231723cdb6c79e7738b3cbacbaed2b750cb033e4ea208f220"}, + {file = "yarl-1.13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b7f6e699304717fdc265a7e1922561b02a93ceffdaefdc877acaf9b9f3080b8"}, + {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bcd5bf4132e6a8d3eb54b8d56885f3d3a38ecd7ecae8426ecf7d9673b270de43"}, + {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2a93a4557f7fc74a38ca5a404abb443a242217b91cd0c4840b1ebedaad8919d4"}, + {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:22b739f99c7e4787922903f27a892744189482125cc7b95b747f04dd5c83aa9f"}, + {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2db874dd1d22d4c2c657807562411ffdfabec38ce4c5ce48b4c654be552759dc"}, + {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4feaaa4742517eaceafcbe74595ed335a494c84634d33961214b278126ec1485"}, + {file = "yarl-1.13.1-cp312-cp312-win32.whl", hash = "sha256:bbf9c2a589be7414ac4a534d54e4517d03f1cbb142c0041191b729c2fa23f320"}, + {file = "yarl-1.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:d07b52c8c450f9366c34aa205754355e933922c79135125541daae6cbf31c799"}, + {file = "yarl-1.13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:95c6737f28069153c399d875317f226bbdea939fd48a6349a3b03da6829fb550"}, + {file = "yarl-1.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cd66152561632ed4b2a9192e7f8e5a1d41e28f58120b4761622e0355f0fe034c"}, + {file = "yarl-1.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6a2acde25be0cf9be23a8f6cbd31734536a264723fca860af3ae5e89d771cd71"}, + {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a18595e6a2ee0826bf7dfdee823b6ab55c9b70e8f80f8b77c37e694288f5de1"}, + {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a31d21089894942f7d9a8df166b495101b7258ff11ae0abec58e32daf8088813"}, + {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45f209fb4bbfe8630e3d2e2052535ca5b53d4ce2d2026bed4d0637b0416830da"}, + {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f722f30366474a99745533cc4015b1781ee54b08de73260b2bbe13316079851"}, + {file = "yarl-1.13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3bf60444269345d712838bb11cc4eadaf51ff1a364ae39ce87a5ca8ad3bb2c8"}, + {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:942c80a832a79c3707cca46bd12ab8aa58fddb34b1626d42b05aa8f0bcefc206"}, + {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:44b07e1690f010c3c01d353b5790ec73b2f59b4eae5b0000593199766b3f7a5c"}, + {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:396e59b8de7e4d59ff5507fb4322d2329865b909f29a7ed7ca37e63ade7f835c"}, + {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:3bb83a0f12701c0b91112a11148b5217617982e1e466069d0555be9b372f2734"}, + {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c92b89bffc660f1274779cb6fbb290ec1f90d6dfe14492523a0667f10170de26"}, + {file = "yarl-1.13.1-cp313-cp313-win32.whl", hash = "sha256:269c201bbc01d2cbba5b86997a1e0f73ba5e2f471cfa6e226bcaa7fd664b598d"}, + {file = "yarl-1.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:1d0828e17fa701b557c6eaed5edbd9098eb62d8838344486248489ff233998b8"}, + {file = "yarl-1.13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8be8cdfe20787e6a5fcbd010f8066227e2bb9058331a4eccddec6c0db2bb85b2"}, + {file = "yarl-1.13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:08d7148ff11cb8e886d86dadbfd2e466a76d5dd38c7ea8ebd9b0e07946e76e4b"}, + {file = "yarl-1.13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4afdf84610ca44dcffe8b6c22c68f309aff96be55f5ea2fa31c0c225d6b83e23"}, + {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0d12fe78dcf60efa205e9a63f395b5d343e801cf31e5e1dda0d2c1fb618073d"}, + {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:298c1eecfd3257aa16c0cb0bdffb54411e3e831351cd69e6b0739be16b1bdaa8"}, + {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c14c16831b565707149c742d87a6203eb5597f4329278446d5c0ae7a1a43928e"}, + {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a9bacedbb99685a75ad033fd4de37129449e69808e50e08034034c0bf063f99"}, + {file = "yarl-1.13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:658e8449b84b92a4373f99305de042b6bd0d19bf2080c093881e0516557474a5"}, + {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:373f16f38721c680316a6a00ae21cc178e3a8ef43c0227f88356a24c5193abd6"}, + {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:45d23c4668d4925688e2ea251b53f36a498e9ea860913ce43b52d9605d3d8177"}, + {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f7917697bcaa3bc3e83db91aa3a0e448bf5cde43c84b7fc1ae2427d2417c0224"}, + {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:5989a38ba1281e43e4663931a53fbf356f78a0325251fd6af09dd03b1d676a09"}, + {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:11b3ca8b42a024513adce810385fcabdd682772411d95bbbda3b9ed1a4257644"}, + {file = "yarl-1.13.1-cp38-cp38-win32.whl", hash = "sha256:dcaef817e13eafa547cdfdc5284fe77970b891f731266545aae08d6cce52161e"}, + {file = "yarl-1.13.1-cp38-cp38-win_amd64.whl", hash = "sha256:7addd26594e588503bdef03908fc207206adac5bd90b6d4bc3e3cf33a829f57d"}, + {file = "yarl-1.13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a0ae6637b173d0c40b9c1462e12a7a2000a71a3258fa88756a34c7d38926911c"}, + {file = "yarl-1.13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:576365c9f7469e1f6124d67b001639b77113cfd05e85ce0310f5f318fd02fe85"}, + {file = "yarl-1.13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:78f271722423b2d4851cf1f4fa1a1c4833a128d020062721ba35e1a87154a049"}, + {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d74f3c335cfe9c21ea78988e67f18eb9822f5d31f88b41aec3a1ec5ecd32da5"}, + {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1891d69a6ba16e89473909665cd355d783a8a31bc84720902c5911dbb6373465"}, + {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fb382fd7b4377363cc9f13ba7c819c3c78ed97c36a82f16f3f92f108c787cbbf"}, + {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c8854b9f80693d20cec797d8e48a848c2fb273eb6f2587b57763ccba3f3bd4b"}, + {file = "yarl-1.13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbf2c3f04ff50f16404ce70f822cdc59760e5e2d7965905f0e700270feb2bbfc"}, + {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fb9f59f3848edf186a76446eb8bcf4c900fe147cb756fbbd730ef43b2e67c6a7"}, + {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ef9b85fa1bc91c4db24407e7c4da93a5822a73dd4513d67b454ca7064e8dc6a3"}, + {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:098b870c18f1341786f290b4d699504e18f1cd050ed179af8123fd8232513424"}, + {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:8c723c91c94a3bc8033dd2696a0f53e5d5f8496186013167bddc3fb5d9df46a3"}, + {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:44a4c40a6f84e4d5955b63462a0e2a988f8982fba245cf885ce3be7618f6aa7d"}, + {file = "yarl-1.13.1-cp39-cp39-win32.whl", hash = "sha256:84bbcdcf393139f0abc9f642bf03f00cac31010f3034faa03224a9ef0bb74323"}, + {file = "yarl-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:fc2931ac9ce9c61c9968989ec831d3a5e6fcaaff9474e7cfa8de80b7aff5a093"}, + {file = "yarl-1.13.1-py3-none-any.whl", hash = "sha256:6a5185ad722ab4dd52d5fb1f30dcc73282eb1ed494906a92d1a228d3f89607b0"}, + {file = "yarl-1.13.1.tar.gz", hash = "sha256:ec8cfe2295f3e5e44c51f57272afbd69414ae629ec7c6b27f5a410efc78b70a0"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + +[[package]] +name = "zipp" +version = "3.20.2" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, + {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.0" +python-versions = ">=3.11,<3.13" +content-hash = "b9ad538d2d86a404dd212731bd1eb6555b6a72a232870f5da24da6b6bc7aa040" diff --git a/api/pyproject.toml b/api/pyproject.toml new file mode 100644 index 0000000000..b4550fbae5 --- /dev/null +++ b/api/pyproject.toml @@ -0,0 +1,55 @@ +[build-system] +build-backend = "poetry.core.masonry.api" +requires = ["poetry-core"] + +[tool.poetry] +authors = ["Prowler Team"] +description = "Prowler's API (Django/DRF)" +license = "AGPL-3.0" +name = "prowler-api" +package-mode = false +version = "1.0.0" + +[tool.poetry.dependencies] +celery = {extras = ["pytest"], version = "^5.4.0"} +django = "5.1.1" +django-celery-beat = "^2.7.0" +django-celery-results = "^2.5.1" +django-cors-headers = "4.4.0" +django-environ = "0.11.2" +django-filter = "24.3" +django-guid = "3.5.0" +django-postgres-extra = "^2.0.8" +djangorestframework = "3.15.2" +djangorestframework-jsonapi = "7.0.2" +djangorestframework-simplejwt = "^5.3.1" +drf-nested-routers = "^0.94.1" +drf-spectacular = "0.27.2" +drf-spectacular-jsonapi = "0.5.1" +gunicorn = "23.0.0" +prowler = {git = "https://github.com/prowler-cloud/prowler.git", branch = "master"} +psycopg2-binary = "2.9.9" +pytest-celery = {extras = ["redis"], version = "^1.0.1"} +# Needed for prowler compatibility +python = ">=3.11,<3.13" +uuid6 = "2024.7.10" + +[tool.poetry.group.dev.dependencies] +bandit = "1.7.9" +coverage = "7.5.4" +docker = "7.1.0" +freezegun = "1.5.1" +mypy = "1.10.1" +pylint = "3.2.5" +pytest = "8.2.2" +pytest-cov = "5.0.0" +pytest-django = "4.8.0" +pytest-env = "1.1.3" +pytest-randomly = "3.15.0" +pytest-xdist = "3.6.1" +ruff = "0.5.0" +safety = "3.2.3" +vulture = "2.11" + +[tool.poetry.scripts] +celery = "src.backend.config.settings.celery" diff --git a/api/src/__init__.py b/api/src/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/src/backend/__init__.py b/api/src/backend/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/src/backend/api/__init__.py b/api/src/backend/api/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/src/backend/api/admin.py b/api/src/backend/api/admin.py new file mode 100644 index 0000000000..4185d360e9 --- /dev/null +++ b/api/src/backend/api/admin.py @@ -0,0 +1,3 @@ +# from django.contrib import admin + +# Register your models here. diff --git a/api/src/backend/api/apps.py b/api/src/backend/api/apps.py new file mode 100644 index 0000000000..bfd78e30c7 --- /dev/null +++ b/api/src/backend/api/apps.py @@ -0,0 +1,12 @@ +from django.apps import AppConfig + + +class ApiConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "api" + + def ready(self): + from api import signals # noqa: F401 + from api.compliance import load_prowler_compliance + + load_prowler_compliance() diff --git a/api/src/backend/api/base_views.py b/api/src/backend/api/base_views.py new file mode 100644 index 0000000000..05cb174680 --- /dev/null +++ b/api/src/backend/api/base_views.py @@ -0,0 +1,103 @@ +import uuid + +from django.db import transaction, connection +from rest_framework import permissions +from rest_framework.exceptions import NotAuthenticated +from rest_framework.filters import SearchFilter +from rest_framework_json_api import filters +from rest_framework_json_api.serializers import ValidationError +from rest_framework_json_api.views import ModelViewSet +from rest_framework_simplejwt.authentication import JWTAuthentication + +from api.filters import CustomDjangoFilterBackend + + +class BaseViewSet(ModelViewSet): + authentication_classes = [JWTAuthentication] + permission_classes = [permissions.IsAuthenticated] + filter_backends = [ + filters.QueryParameterValidationFilter, + filters.OrderingFilter, + CustomDjangoFilterBackend, + SearchFilter, + ] + + filterset_fields = [] + search_fields = [] + + ordering_fields = "__all__" + ordering = ["id"] + + def get_queryset(self): + raise NotImplementedError + + +class BaseRLSViewSet(BaseViewSet): + def dispatch(self, request, *args, **kwargs): + with transaction.atomic(): + return super().dispatch(request, *args, **kwargs) + + def initial(self, request, *args, **kwargs): + # Ideally, this logic would be in the `.setup()` method but DRF view sets don't call it + # https://docs.djangoproject.com/en/5.1/ref/class-based-views/base/#django.views.generic.base.View.setup + if request.auth is None: + raise NotAuthenticated + + tenant_id = request.auth.get("tenant_id") + if tenant_id is None: + raise NotAuthenticated("Tenant ID is not present in token") + + try: + uuid.UUID(tenant_id) + except ValueError: + raise ValidationError("Tenant ID must be a valid UUID") + + with connection.cursor() as cursor: + cursor.execute(f"SELECT set_config('api.tenant_id', '{tenant_id}', TRUE);") + self.request.tenant_id = tenant_id + return super().initial(request, *args, **kwargs) + + def get_serializer_context(self): + context = super().get_serializer_context() + context["tenant_id"] = self.request.tenant_id + return context + + +class BaseTenantViewset(BaseViewSet): + def dispatch(self, request, *args, **kwargs): + with transaction.atomic(): + return super().dispatch(request, *args, **kwargs) + + def initial(self, request, *args, **kwargs): + user_id = str(request.user.id) + + with connection.cursor() as cursor: + cursor.execute(f"SELECT set_config('api.user_id', '{user_id}', TRUE);") + return super().initial(request, *args, **kwargs) + + +class BaseUserViewset(BaseViewSet): + def dispatch(self, request, *args, **kwargs): + with transaction.atomic(): + return super().dispatch(request, *args, **kwargs) + + def initial(self, request, *args, **kwargs): + # TODO refactor after improving RLS on users + if request.stream is not None and request.stream.method == "POST": + return super().initial(request, *args, **kwargs) + if request.auth is None: + raise NotAuthenticated + + tenant_id = request.auth.get("tenant_id") + if tenant_id is None: + raise NotAuthenticated("Tenant ID is not present in token") + + try: + uuid.UUID(tenant_id) + except ValueError: + raise ValidationError("Tenant ID must be a valid UUID") + + with connection.cursor() as cursor: + cursor.execute(f"SELECT set_config('api.tenant_id', '{tenant_id}', TRUE);") + self.request.tenant_id = tenant_id + return super().initial(request, *args, **kwargs) diff --git a/api/src/backend/api/compliance.py b/api/src/backend/api/compliance.py new file mode 100644 index 0000000000..ae1b89a4a9 --- /dev/null +++ b/api/src/backend/api/compliance.py @@ -0,0 +1,209 @@ +from types import MappingProxyType + +from prowler.lib.check.compliance_models import Compliance +from prowler.lib.check.models import CheckMetadata + +from api.models import Provider + +PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE = {} +PROWLER_CHECKS = {} + + +def get_prowler_provider_checks(provider_type: Provider.ProviderChoices): + """ + Retrieve all check IDs for the specified provider type. + + This function fetches the check metadata for the given cloud provider + and returns an iterable of check IDs. + + Args: + provider_type (Provider.ProviderChoices): The provider type + (e.g., 'aws', 'azure') for which to retrieve check IDs. + + Returns: + Iterable[str]: An iterable of check IDs associated with the specified provider type. + """ + return CheckMetadata.get_bulk(provider_type).keys() + + +def get_prowler_provider_compliance(provider_type: Provider.ProviderChoices) -> dict: + """ + Retrieve the Prowler compliance data for a specified provider type. + + This function fetches the compliance frameworks and their associated + requirements for the given cloud provider. + + Args: + provider_type (Provider.ProviderChoices): The provider type + (e.g., 'aws', 'azure') for which to retrieve compliance data. + + Returns: + dict: A dictionary mapping compliance framework names to their respective + Compliance objects for the specified provider. + """ + return Compliance.get_bulk(provider_type) + + +def load_prowler_compliance(): + """ + Load and initialize the Prowler compliance data and checks for all provider types. + + This function retrieves compliance data for all supported provider types, + generates a compliance overview template, and populates the global variables + `PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE` and `PROWLER_CHECKS` with read-only mappings + of the compliance templates and checks, respectively. + """ + global PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE + global PROWLER_CHECKS + + prowler_compliance = { + provider_type: get_prowler_provider_compliance(provider_type) + for provider_type in Provider.ProviderChoices.values + } + template = generate_compliance_overview_template(prowler_compliance) + PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE = MappingProxyType(template) + PROWLER_CHECKS = MappingProxyType(load_prowler_checks(prowler_compliance)) + + +def load_prowler_checks(prowler_compliance): + """ + Generate a mapping of checks to the compliance frameworks that include them. + + This function processes the provided compliance data and creates a dictionary + mapping each provider type to a dictionary where each check ID maps to a set + of compliance names that include that check. + + Args: + prowler_compliance (dict): The compliance data for all provider types, + as returned by `get_prowler_provider_compliance`. + + Returns: + dict: A nested dictionary where the first-level keys are provider types, + and the values are dictionaries mapping check IDs to sets of compliance names. + """ + checks = {} + for provider_type in Provider.ProviderChoices.values: + checks[provider_type] = { + check_id: set() for check_id in get_prowler_provider_checks(provider_type) + } + for compliance_name, compliance_data in prowler_compliance[ + provider_type + ].items(): + for requirement in compliance_data.Requirements: + for check in requirement.Checks: + try: + checks[provider_type][check].add(compliance_name) + except KeyError: + continue + return checks + + +def generate_scan_compliance( + compliance_overview, provider_type: str, check_id: str, status: str +): + """ + Update the compliance overview with the status of a specific check. + + This function updates the compliance overview by setting the status of the given check + within all compliance frameworks and requirements that include it. It then updates the + requirement status to 'FAIL' if any of its checks have failed, and adjusts the counts + of passed and failed requirements in the compliance overview. + + Args: + compliance_overview (dict): The compliance overview data structure to update. + provider_type (str): The provider type (e.g., 'aws', 'azure') associated with the check. + check_id (str): The identifier of the check whose status is being updated. + status (str): The status of the check (e.g., 'PASS', 'FAIL', 'MUTED'). + + Returns: + None: This function modifies the compliance_overview in place. + """ + for compliance_id in PROWLER_CHECKS[provider_type][check_id]: + for requirement in compliance_overview[compliance_id]["requirements"].values(): + if check_id in requirement["checks"]: + requirement["checks"][check_id] = status + requirement["checks_status"][status.lower()] += 1 + + if requirement["status"] != "FAIL" and any( + value == "FAIL" for value in requirement["checks"].values() + ): + requirement["status"] = "FAIL" + compliance_overview[compliance_id]["requirements_status"]["passed"] -= 1 + compliance_overview[compliance_id]["requirements_status"]["failed"] += 1 + + +def generate_compliance_overview_template(prowler_compliance: dict): + """ + Generate a compliance overview template for all provider types. + + This function creates a nested dictionary structure representing the compliance + overview template for each provider type, compliance framework, and requirement. + It initializes the status of all checks and requirements, and calculates initial + counts for requirements status. + + Args: + prowler_compliance (dict): The compliance data for all provider types, + as returned by `get_prowler_provider_compliance`. + + Returns: + dict: A nested dictionary representing the compliance overview template, + structured by provider type and compliance framework. + """ + template = {} + for provider_type in Provider.ProviderChoices.values: + provider_compliance = template.setdefault(provider_type, {}) + compliance_data_dict = prowler_compliance[provider_type] + + for compliance_name, compliance_data in compliance_data_dict.items(): + compliance_requirements = {} + requirements_status = {"passed": 0, "failed": 0, "manual": 0} + total_requirements = 0 + + for requirement in compliance_data.Requirements: + total_requirements += 1 + total_checks = len(requirement.Checks) + checks_dict = {check: None for check in requirement.Checks} + + # Build requirement dictionary + requirement_dict = { + "name": requirement.Name or requirement.Id, + "description": requirement.Description, + "attributes": [ + dict(attribute) for attribute in requirement.Attributes + ], + "checks": checks_dict, + "checks_status": { + "pass": 0, + "fail": 0, + "manual": 0, + "total": total_checks, + }, + "status": "PASS", + } + + # Update requirements status + if total_checks == 0: + requirements_status["manual"] += 1 + + # Add requirement to compliance requirements + compliance_requirements[requirement.Id] = requirement_dict + + # Calculate pending requirements + pending_requirements = total_requirements - requirements_status["manual"] + requirements_status["passed"] = pending_requirements + + # Build compliance dictionary + compliance_dict = { + "framework": compliance_data.Framework, + "version": compliance_data.Version, + "provider": provider_type, + "description": compliance_data.Description, + "requirements": compliance_requirements, + "requirements_status": requirements_status, + "total_requirements": total_requirements, + } + + # Add compliance to provider compliance + provider_compliance[compliance_name] = compliance_dict + + return template diff --git a/api/src/backend/api/db_router.py b/api/src/backend/api/db_router.py new file mode 100644 index 0000000000..f3a986ac20 --- /dev/null +++ b/api/src/backend/api/db_router.py @@ -0,0 +1,18 @@ +class MainRouter: + default_db = "default" + admin_db = "admin" + + def db_for_read(self, model, **hints): # noqa: F841 + model_table_name = model._meta.db_table + if model_table_name.startswith("django_"): + return self.admin_db + return None + + def db_for_write(self, model, **hints): # noqa: F841 + model_table_name = model._meta.db_table + if model_table_name.startswith("django_"): + return self.admin_db + return None + + def allow_migrate(self, db, app_label, model_name=None, **hints): # noqa: F841 + return db == self.admin_db diff --git a/api/src/backend/api/db_utils.py b/api/src/backend/api/db_utils.py new file mode 100644 index 0000000000..d90c2c2340 --- /dev/null +++ b/api/src/backend/api/db_utils.py @@ -0,0 +1,271 @@ +import secrets +from contextlib import contextmanager +from datetime import datetime, timezone, timedelta + +from django.conf import settings +from django.contrib.auth.models import BaseUserManager +from django.db import models, transaction, connection +from psycopg2 import connect as psycopg2_connect +from psycopg2.extensions import new_type, register_type, register_adapter, AsIs + +DB_USER = settings.DATABASES["default"]["USER"] if not settings.TESTING else "test" +DB_PASSWORD = ( + settings.DATABASES["default"]["PASSWORD"] if not settings.TESTING else "test" +) +DB_PROWLER_USER = ( + settings.DATABASES["prowler_user"]["USER"] if not settings.TESTING else "test" +) +DB_PROWLER_PASSWORD = ( + settings.DATABASES["prowler_user"]["PASSWORD"] if not settings.TESTING else "test" +) +TASK_RUNNER_DB_TABLE = "django_celery_results_taskresult" +POSTGRES_TENANT_VAR = "api.tenant_id" +POSTGRES_USER_VAR = "api.user_id" + + +@contextmanager +def psycopg_connection(database_alias: str): + psycopg2_connection = None + try: + admin_db = settings.DATABASES[database_alias] + + psycopg2_connection = psycopg2_connect( + dbname=admin_db["NAME"], + user=admin_db["USER"], + password=admin_db["PASSWORD"], + host=admin_db["HOST"], + port=admin_db["PORT"], + ) + yield psycopg2_connection + finally: + if psycopg2_connection is not None: + psycopg2_connection.close() + + +@contextmanager +def tenant_transaction(tenant_id: str): + with transaction.atomic(): + with connection.cursor() as cursor: + cursor.execute(f"SELECT set_config('api.tenant_id', '{tenant_id}', TRUE);") + yield cursor + + +class CustomUserManager(BaseUserManager): + def create_user(self, email, password=None, **extra_fields): + if not email: + raise ValueError("The email field must be set") + email = self.normalize_email(email) + user = self.model(email=email, **extra_fields) + user.set_password(password) + user.save(using=self._db) + return user + + def get_by_natural_key(self, email): + return self.get(email__iexact=email) + + +def enum_to_choices(enum_class): + """ + This function converts a Python Enum to a list of tuples, where the first element is the value and the second element is the name. + + It's for use with Django's `choices` attribute, which expects a list of tuples. + """ + return [(item.value, item.name.replace("_", " ").title()) for item in enum_class] + + +def one_week_from_now(): + """ + Return a datetime object with a date one week from now. + """ + return datetime.now(timezone.utc) + timedelta(days=7) + + +def generate_random_token(length: int = 14, symbols: str | None = None) -> str: + """ + Generate a random token with the specified length. + """ + _symbols = "23456789ABCDEFGHJKMNPQRSTVWXYZ" + return "".join(secrets.choice(symbols or _symbols) for _ in range(length)) + + +# Postgres Enums + + +class PostgresEnumMigration: + def __init__(self, enum_name: str, enum_values: tuple): + self.enum_name = enum_name + self.enum_values = enum_values + + def create_enum_type(self, apps, schema_editor): # noqa: F841 + string_enum_values = ", ".join([f"'{value}'" for value in self.enum_values]) + with schema_editor.connection.cursor() as cursor: + cursor.execute( + f"CREATE TYPE {self.enum_name} AS ENUM ({string_enum_values});" + ) + + def drop_enum_type(self, apps, schema_editor): # noqa: F841 + with schema_editor.connection.cursor() as cursor: + cursor.execute(f"DROP TYPE {self.enum_name};") + + +class PostgresEnumField(models.Field): + def __init__(self, enum_type_name, *args, **kwargs): + self.enum_type_name = enum_type_name + super().__init__(*args, **kwargs) + + def db_type(self, connection): + return self.enum_type_name + + def from_db_value(self, value, expression, connection): # noqa: F841 + return value + + def to_python(self, value): + if isinstance(value, EnumType): + return value.value + return value + + def get_prep_value(self, value): + if isinstance(value, EnumType): + return value.value + return value + + +class EnumType: + def __init__(self, value): + self.value = value + + def __str__(self): + return self.value + + +def enum_adapter(enum_obj): + return AsIs(f"'{enum_obj.value}'::{enum_obj.__class__.enum_type_name}") + + +def get_enum_oid(connection, enum_type_name: str): + with connection.cursor() as cursor: + cursor.execute("SELECT oid FROM pg_type WHERE typname = %s;", (enum_type_name,)) + result = cursor.fetchone() + if result is None: + raise ValueError(f"Enum type '{enum_type_name}' not found") + return result[0] + + +def register_enum(apps, schema_editor, enum_class): # noqa: F841 + with psycopg_connection(schema_editor.connection.alias) as connection: + enum_oid = get_enum_oid(connection, enum_class.enum_type_name) + enum_instance = new_type( + (enum_oid,), + enum_class.enum_type_name, + lambda value, cur: value, # noqa: F841 + ) + register_type(enum_instance, connection) + register_adapter(enum_class, enum_adapter) + + +# Postgres enum definition for member role + + +class MemberRoleEnum(EnumType): + enum_type_name = "member_role" + + +class MemberRoleEnumField(PostgresEnumField): + def __init__(self, *args, **kwargs): + super().__init__("member_role", *args, **kwargs) + + +# Postgres enum definition for Provider.provider + + +class ProviderEnum(EnumType): + enum_type_name = "provider" + + +class ProviderEnumField(PostgresEnumField): + def __init__(self, *args, **kwargs): + super().__init__("provider", *args, **kwargs) + + +# Postgres enum definition for Scan.type + + +class ScanTriggerEnum(EnumType): + enum_type_name = "scan_trigger" + + +class ScanTriggerEnumField(PostgresEnumField): + def __init__(self, *args, **kwargs): + super().__init__("scan_trigger", *args, **kwargs) + + +# Postgres enum definition for state + + +class StateEnum(EnumType): + enum_type_name = "state" + + +class StateEnumField(PostgresEnumField): + def __init__(self, *args, **kwargs): + super().__init__("state", *args, **kwargs) + + +# Postgres enum definition for Finding.Delta + + +class FindingDeltaEnum(EnumType): + enum_type_name = "finding_delta" + + +class FindingDeltaEnumField(PostgresEnumField): + def __init__(self, *args, **kwargs): + super().__init__("finding_delta", *args, **kwargs) + + +# Postgres enum definition for Severity + + +class SeverityEnum(EnumType): + enum_type_name = "severity" + + +class SeverityEnumField(PostgresEnumField): + def __init__(self, *args, **kwargs): + super().__init__("severity", *args, **kwargs) + + +# Postgres enum definition for Status + + +class StatusEnum(EnumType): + enum_type_name = "status" + + +class StatusEnumField(PostgresEnumField): + def __init__(self, *args, **kwargs): + super().__init__("status", *args, **kwargs) + + +# Postgres enum definition for Provider secrets type + + +class ProviderSecretTypeEnum(EnumType): + enum_type_name = "provider_secret_type" + + +class ProviderSecretTypeEnumField(PostgresEnumField): + def __init__(self, *args, **kwargs): + super().__init__("provider_secret_type", *args, **kwargs) + + +# Postgres enum definition for Provider secrets type + + +class InvitationStateEnum(EnumType): + enum_type_name = "invitation_state" + + +class InvitationStateEnumField(PostgresEnumField): + def __init__(self, *args, **kwargs): + super().__init__("invitation_state", *args, **kwargs) diff --git a/api/src/backend/api/decorators.py b/api/src/backend/api/decorators.py new file mode 100644 index 0000000000..f30cb2458e --- /dev/null +++ b/api/src/backend/api/decorators.py @@ -0,0 +1,52 @@ +from functools import wraps + +from django.db import connection, transaction + + +def set_tenant(func): + """ + Decorator to set the tenant context for a Celery task based on the provided tenant_id. + + This decorator extracts the `tenant_id` from the task's keyword arguments, + and uses it to set the tenant context for the current database session. + The `tenant_id` is then removed from the kwargs before the task function + is executed. If `tenant_id` is not provided, a KeyError is raised. + + Args: + func (function): The Celery task function to be decorated. + + Raises: + KeyError: If `tenant_id` is not found in the task's keyword arguments. + + Returns: + function: The wrapped function with tenant context set. + + Example: + # This decorator MUST be defined the last in the decorator chain + + @shared_task + @set_tenant + def some_task(arg1, **kwargs): + # Task logic here + pass + + # When calling the task + some_task.delay(arg1, tenant_id="1234-abcd-5678") + + # The tenant context will be set before the task logic executes. + """ + + @wraps(func) + @transaction.atomic + def wrapper(*args, **kwargs): + try: + tenant_id = kwargs.pop("tenant_id") + except KeyError: + raise KeyError("This task requires the tenant_id") + + with connection.cursor() as cursor: + cursor.execute(f"SELECT set_config('api.tenant_id', '{tenant_id}', TRUE);") + + return func(*args, **kwargs) + + return wrapper diff --git a/api/src/backend/api/exceptions.py b/api/src/backend/api/exceptions.py new file mode 100644 index 0000000000..12bc788d68 --- /dev/null +++ b/api/src/backend/api/exceptions.py @@ -0,0 +1,45 @@ +from django.core.exceptions import ValidationError as django_validation_error +from rest_framework import status +from rest_framework.exceptions import APIException +from rest_framework_json_api.exceptions import exception_handler +from rest_framework_json_api.serializers import ValidationError +from rest_framework_simplejwt.exceptions import TokenError, InvalidToken + + +class ModelValidationError(ValidationError): + def __init__( + self, + detail: str | None = None, + code: str | None = None, + pointer: str | None = None, + status_code: int = 400, + ): + super().__init__( + detail=[ + { + "detail": detail, + "status": str(status_code), + "source": {"pointer": pointer}, + "code": code, + } + ] + ) + + +class InvitationTokenExpiredException(APIException): + status_code = status.HTTP_410_GONE + default_detail = "The invitation token has expired and is no longer valid." + default_code = "token_expired" + + +def custom_exception_handler(exc, context): + if isinstance(exc, django_validation_error): + if hasattr(exc, "error_dict"): + exc = ValidationError(exc.message_dict) + else: + exc = ValidationError(detail=exc.messages[0], code=exc.code) + elif isinstance(exc, (TokenError, InvalidToken)): + exc.detail["messages"] = [ + message_item["message"] for message_item in exc.detail["messages"] + ] + return exception_handler(exc, context) diff --git a/api/src/backend/api/filters.py b/api/src/backend/api/filters.py new file mode 100644 index 0000000000..2ed175c5d7 --- /dev/null +++ b/api/src/backend/api/filters.py @@ -0,0 +1,484 @@ +from datetime import date, datetime, timezone + +from django.conf import settings +from django.db.models import Q +from django_filters.rest_framework import ( + BaseInFilter, + FilterSet, + BooleanFilter, + CharFilter, + UUIDFilter, + DateFilter, + ChoiceFilter, +) +from rest_framework_json_api.django_filters.backends import DjangoFilterBackend +from rest_framework_json_api.serializers import ValidationError + +from api.db_utils import ( + ProviderEnumField, + FindingDeltaEnumField, + StatusEnumField, + SeverityEnumField, + InvitationStateEnumField, +) +from api.models import ( + User, + Membership, + Provider, + ProviderGroup, + Resource, + ResourceTag, + Scan, + Task, + StateChoices, + Finding, + SeverityChoices, + StatusChoices, + ProviderSecret, + Invitation, + ComplianceOverview, +) +from api.rls import Tenant +from api.uuid_utils import ( + datetime_to_uuid7, + uuid7_start, + uuid7_end, + uuid7_range, + transform_into_uuid7, +) +from api.v1.serializers import TaskBase + + +class CustomDjangoFilterBackend(DjangoFilterBackend): + def to_html(self, _request, _queryset, _view): + """Override this method to use the Browsable API in dev environments. + + This disables the HTML render for the default filter. + """ + return None + + +class UUIDInFilter(BaseInFilter, UUIDFilter): + pass + + +class CharInFilter(BaseInFilter, CharFilter): + pass + + +class ChoiceInFilter(BaseInFilter, ChoiceFilter): + pass + + +class TenantFilter(FilterSet): + inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date") + updated_at = DateFilter(field_name="updated_at", lookup_expr="date") + + class Meta: + model = Tenant + fields = { + "name": ["exact", "icontains"], + "inserted_at": ["date", "gte", "lte"], + "updated_at": ["gte", "lte"], + } + + +class MembershipFilter(FilterSet): + date_joined = DateFilter(field_name="date_joined", lookup_expr="date") + role = ChoiceFilter(choices=Membership.RoleChoices.choices) + + class Meta: + model = Membership + fields = { + "tenant": ["exact"], + "role": ["exact"], + "date_joined": ["date", "gte", "lte"], + } + + +class ProviderFilter(FilterSet): + inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date") + updated_at = DateFilter(field_name="updated_at", lookup_expr="date") + connected = BooleanFilter() + provider = ChoiceFilter(choices=Provider.ProviderChoices.choices) + + class Meta: + model = Provider + fields = { + "provider": ["exact", "in"], + "id": ["exact", "in"], + "uid": ["exact", "icontains", "in"], + "alias": ["exact", "icontains", "in"], + "inserted_at": ["gte", "lte"], + "updated_at": ["gte", "lte"], + } + filter_overrides = { + ProviderEnumField: { + "filter_class": CharFilter, + }, + } + + +class ProviderRelationshipFilterSet(FilterSet): + provider_type = ChoiceFilter( + choices=Provider.ProviderChoices.choices, field_name="provider__provider" + ) + provider_type__in = ChoiceInFilter( + choices=Provider.ProviderChoices.choices, field_name="provider__provider" + ) + provider_uid = CharFilter(field_name="provider__uid", lookup_expr="exact") + provider_uid__in = CharInFilter(field_name="provider__uid", lookup_expr="in") + provider_uid__icontains = CharFilter( + field_name="provider__uid", lookup_expr="icontains" + ) + provider_alias = CharFilter(field_name="provider__alias", lookup_expr="exact") + provider_alias__in = CharInFilter(field_name="provider__alias", lookup_expr="in") + provider_alias__icontains = CharFilter( + field_name="provider__alias", lookup_expr="icontains" + ) + + +class ProviderGroupFilter(FilterSet): + inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date") + updated_at = DateFilter(field_name="updated_at", lookup_expr="date") + + class Meta: + model = ProviderGroup + fields = { + "id": ["exact", "in"], + "name": ["exact", "in"], + "inserted_at": ["gte", "lte"], + "updated_at": ["gte", "lte"], + } + + +class ScanFilter(ProviderRelationshipFilterSet): + inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date") + completed_at = DateFilter(field_name="completed_at", lookup_expr="date") + started_at = DateFilter(field_name="started_at", lookup_expr="date") + trigger = ChoiceFilter(choices=Scan.TriggerChoices.choices) + + class Meta: + model = Scan + fields = { + "provider": ["exact", "in"], + "name": ["exact", "icontains"], + "started_at": ["gte", "lte"], + "trigger": ["exact"], + } + + +class TaskFilter(FilterSet): + name = CharFilter(field_name="task_runner_task__task_name", lookup_expr="exact") + name__icontains = CharFilter( + field_name="task_runner_task__task_name", lookup_expr="icontains" + ) + state = ChoiceFilter( + choices=StateChoices.choices, method="filter_state", lookup_expr="exact" + ) + task_state_inverse_mapping_values = { + v: k for k, v in TaskBase.state_mapping.items() + } + + def filter_state(self, queryset, name, value): + if value not in StateChoices: + raise ValidationError( + f"Invalid provider value: '{value}'. Valid values are: " + f"{', '.join(StateChoices)}" + ) + + return queryset.filter( + task_runner_task__status=self.task_state_inverse_mapping_values[value] + ) + + class Meta: + model = Task + fields = [] + + +class ResourceTagFilter(FilterSet): + class Meta: + model = ResourceTag + fields = { + "key": ["exact", "icontains"], + "value": ["exact", "icontains"], + } + search = ["text_search"] + + +class ResourceFilter(ProviderRelationshipFilterSet): + tag_key = CharFilter(method="filter_tag_key") + tag_value = CharFilter(method="filter_tag_value") + tag = CharFilter(method="filter_tag") + tags = CharFilter(method="filter_tag") + inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date") + updated_at = DateFilter(field_name="updated_at", lookup_expr="date") + + class Meta: + model = Resource + fields = { + "provider": ["exact", "in"], + "uid": ["exact", "icontains"], + "name": ["exact", "icontains"], + "region": ["exact", "icontains", "in"], + "service": ["exact", "icontains", "in"], + "type": ["exact", "icontains", "in"], + "inserted_at": ["gte", "lte"], + "updated_at": ["gte", "lte"], + } + + def filter_tag_key(self, queryset, name, value): + return queryset.filter(Q(tags__key=value) | Q(tags__key__icontains=value)) + + def filter_tag_value(self, queryset, name, value): + return queryset.filter(Q(tags__value=value) | Q(tags__value__icontains=value)) + + def filter_tag(self, queryset, name, value): + # We won't know what the user wants to filter on just based on the value, + # and we don't want to build special filtering logic for every possible + # provider tag spec, so we'll just do a full text search + return queryset.filter(tags__text_search=value) + + +class FindingFilter(FilterSet): + # We filter providers from the scan in findings + provider = UUIDFilter(field_name="scan__provider__id", lookup_expr="exact") + provider__in = UUIDInFilter(field_name="scan__provider__id", lookup_expr="in") + provider_type = ChoiceFilter( + choices=Provider.ProviderChoices.choices, field_name="scan__provider__provider" + ) + provider_type__in = ChoiceInFilter( + choices=Provider.ProviderChoices.choices, field_name="scan__provider__provider" + ) + provider_uid = CharFilter(field_name="scan__provider__uid", lookup_expr="exact") + provider_uid__in = CharInFilter(field_name="scan__provider__uid", lookup_expr="in") + provider_uid__icontains = CharFilter( + field_name="scan__provider__uid", lookup_expr="icontains" + ) + provider_alias = CharFilter(field_name="scan__provider__alias", lookup_expr="exact") + provider_alias__in = CharInFilter( + field_name="scan__provider__alias", lookup_expr="in" + ) + provider_alias__icontains = CharFilter( + field_name="scan__provider__alias", lookup_expr="icontains" + ) + + updated_at = DateFilter(field_name="updated_at", lookup_expr="date") + + uid = CharFilter(field_name="uid") + delta = ChoiceFilter(choices=Finding.DeltaChoices.choices) + status = ChoiceFilter(choices=StatusChoices.choices) + severity = ChoiceFilter(choices=SeverityChoices) + impact = ChoiceFilter(choices=SeverityChoices) + + resources = UUIDInFilter(field_name="resource__id", lookup_expr="in") + + region = CharFilter(field_name="resources__region") + region__in = CharInFilter(field_name="resources__region", lookup_expr="in") + region__icontains = CharFilter( + field_name="resources__region", lookup_expr="icontains" + ) + + service = CharFilter(field_name="resources__service") + service__in = CharInFilter(field_name="resources__service", lookup_expr="in") + service__icontains = CharFilter( + field_name="resources__service", lookup_expr="icontains" + ) + + resource_uid = CharFilter(field_name="resources__uid") + resource_uid__in = CharInFilter(field_name="resources__uid", lookup_expr="in") + resource_uid__icontains = CharFilter( + field_name="resources__uid", lookup_expr="icontains" + ) + + resource_name = CharFilter(field_name="resources__name") + resource_name__in = CharInFilter(field_name="resources__name", lookup_expr="in") + resource_name__icontains = CharFilter( + field_name="resources__name", lookup_expr="icontains" + ) + + resource_type = CharFilter(field_name="resources__type") + resource_type__in = CharInFilter(field_name="resources__type", lookup_expr="in") + resource_type__icontains = CharFilter( + field_name="resources__type", lookup_expr="icontains" + ) + + scan = UUIDFilter(method="filter_scan_id") + scan__in = UUIDInFilter(method="filter_scan_id_in") + + inserted_at = DateFilter(method="filter_inserted_at", lookup_expr="date") + inserted_at__date = DateFilter(method="filter_inserted_at", lookup_expr="date") + inserted_at__gte = DateFilter(method="filter_inserted_at_gte") + inserted_at__lte = DateFilter(method="filter_inserted_at_lte") + + class Meta: + model = Finding + fields = { + "id": ["exact", "in"], + "uid": ["exact", "in"], + "scan": ["exact", "in"], + "delta": ["exact", "in"], + "status": ["exact", "in"], + "severity": ["exact", "in"], + "impact": ["exact", "in"], + "check_id": ["exact", "in", "icontains"], + "inserted_at": ["date", "gte", "lte"], + "updated_at": ["gte", "lte"], + } + filter_overrides = { + FindingDeltaEnumField: { + "filter_class": CharFilter, + }, + StatusEnumField: { + "filter_class": CharFilter, + }, + SeverityEnumField: { + "filter_class": CharFilter, + }, + } + + # Convert filter values to UUIDv7 values for use with partitioning + def filter_scan_id(self, queryset, name, value): + try: + value_uuid = transform_into_uuid7(value) + start = uuid7_start(value_uuid) + end = uuid7_end(value_uuid, settings.FINDINGS_TABLE_PARTITION_MONTHS) + except ValidationError as validation_error: + detail = str(validation_error.detail[0]) + raise ValidationError( + [ + { + "detail": detail, + "status": 400, + "source": {"pointer": "/data/relationships/scan"}, + "code": "invalid", + } + ] + ) + + return ( + queryset.filter(id__gte=start) + .filter(id__lt=end) + .filter(scan__id=value_uuid) + ) + + def filter_scan_id_in(self, queryset, name, value): + try: + uuid_list = [ + transform_into_uuid7(value_uuid) + for value_uuid in value + if value_uuid is not None + ] + + start, end = uuid7_range(uuid_list) + except ValidationError as validation_error: + detail = str(validation_error.detail[0]) + raise ValidationError( + [ + { + "detail": detail, + "status": 400, + "source": {"pointer": "/data/relationships/scan"}, + "code": "invalid", + } + ] + ) + if start == end: + return queryset.filter(id__gte=start).filter(scan__id__in=uuid_list) + else: + return ( + queryset.filter(id__gte=start) + .filter(id__lt=end) + .filter(scan__id__in=uuid_list) + ) + + def filter_inserted_at(self, queryset, name, value): + value = self.maybe_date_to_datetime(value) + start = uuid7_start(datetime_to_uuid7(value)) + + return queryset.filter(id__gte=start).filter(inserted_at__date=value) + + def filter_inserted_at_gte(self, queryset, name, value): + value = self.maybe_date_to_datetime(value) + start = uuid7_start(datetime_to_uuid7(value)) + + return queryset.filter(id__gte=start).filter(inserted_at__gte=value) + + def filter_inserted_at_lte(self, queryset, name, value): + value = self.maybe_date_to_datetime(value) + end = uuid7_start(datetime_to_uuid7(value)) + + return queryset.filter(id__lte=end).filter(inserted_at__lte=value) + + @staticmethod + def maybe_date_to_datetime(value): + dt = value + if isinstance(value, date): + dt = datetime.combine(value, datetime.min.time(), tzinfo=timezone.utc) + return dt + + +class ProviderSecretFilter(FilterSet): + inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date") + updated_at = DateFilter(field_name="updated_at", lookup_expr="date") + provider = UUIDFilter(field_name="provider__id", lookup_expr="exact") + + class Meta: + model = ProviderSecret + fields = { + "name": ["exact", "icontains"], + } + + +class InvitationFilter(FilterSet): + inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date") + updated_at = DateFilter(field_name="updated_at", lookup_expr="date") + expires_at = DateFilter(field_name="expires_at", lookup_expr="date") + state = ChoiceFilter(choices=Invitation.State.choices) + state__in = ChoiceInFilter(choices=Invitation.State.choices, lookup_expr="in") + + class Meta: + model = Invitation + fields = { + "email": ["exact", "icontains"], + "inserted_at": ["date", "gte", "lte"], + "updated_at": ["date", "gte", "lte"], + "expires_at": ["date", "gte", "lte"], + "inviter": ["exact"], + } + filter_overrides = { + InvitationStateEnumField: { + "filter_class": CharFilter, + } + } + + +class UserFilter(FilterSet): + date_joined = DateFilter(field_name="date_joined", lookup_expr="date") + + class Meta: + model = User + fields = { + "name": ["exact", "icontains"], + "email": ["exact", "icontains"], + "company_name": ["exact", "icontains"], + "date_joined": ["date", "gte", "lte"], + "is_active": ["exact"], + } + + +class ComplianceOverviewFilter(FilterSet): + inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date") + provider_type = ChoiceFilter(choices=Provider.ProviderChoices.choices) + provider_type__in = ChoiceInFilter(choices=Provider.ProviderChoices.choices) + scan_id = UUIDFilter(field_name="scan__id") + + class Meta: + model = ComplianceOverview + fields = { + "inserted_at": ["date", "gte", "lte"], + "compliance_id": ["exact", "icontains"], + "framework": ["exact", "iexact", "icontains"], + "version": ["exact", "icontains"], + "region": ["exact", "icontains", "in"], + } diff --git a/api/src/backend/api/fixtures/dev/0_dev_users.json b/api/src/backend/api/fixtures/dev/0_dev_users.json new file mode 100644 index 0000000000..87ce06239f --- /dev/null +++ b/api/src/backend/api/fixtures/dev/0_dev_users.json @@ -0,0 +1,28 @@ +[ + { + "model": "api.user", + "pk": "8b38e2eb-6689-4f1e-a4ba-95b275130200", + "fields": { + "password": "pbkdf2_sha256$720000$vA62S78kog2c2ytycVQdke$Fp35GVLLMyy5fUq3krSL9I02A+ocQ+RVa4S22LIAO5s=", + "last_login": null, + "name": "Devie Prowlerson", + "email": "dev@prowler.com", + "company_name": "Prowler Developers", + "is_active": true, + "date_joined": "2024-09-17T09:04:20.850Z" + } + }, + { + "model": "api.user", + "pk": "b6493a3a-c997-489b-8b99-278bf74de9f6", + "fields": { + "password": "pbkdf2_sha256$720000$vA62S78kog2c2ytycVQdke$Fp35GVLLMyy5fUq3krSL9I02A+ocQ+RVa4S22LIAO5s=", + "last_login": null, + "name": "Devietoo Prowlerson", + "email": "dev2@prowler.com", + "company_name": "Prowler Developers", + "is_active": true, + "date_joined": "2024-09-18T09:04:20.850Z" + } + } +] diff --git a/api/src/backend/api/fixtures/dev/1_dev_tenants.json b/api/src/backend/api/fixtures/dev/1_dev_tenants.json new file mode 100644 index 0000000000..dd89b100d4 --- /dev/null +++ b/api/src/backend/api/fixtures/dev/1_dev_tenants.json @@ -0,0 +1,50 @@ +[ + { + "model": "api.tenant", + "pk": "12646005-9067-4d2a-a098-8bb378604362", + "fields": { + "inserted_at": "2024-03-21T23:00:00Z", + "updated_at": "2024-03-21T23:00:00Z", + "name": "Tenant1" + } + }, + { + "model": "api.tenant", + "pk": "0412980b-06e3-436a-ab98-3c9b1d0333d3", + "fields": { + "inserted_at": "2024-03-21T23:00:00Z", + "updated_at": "2024-03-21T23:00:00Z", + "name": "Tenant2" + } + }, + { + "model": "api.membership", + "pk": "2b0db93a-7e0b-4edf-a851-ea448676b7eb", + "fields": { + "user": "8b38e2eb-6689-4f1e-a4ba-95b275130200", + "tenant": "0412980b-06e3-436a-ab98-3c9b1d0333d3", + "role": "owner", + "date_joined": "2024-09-19T11:03:59.712Z" + } + }, + { + "model": "api.membership", + "pk": "797d7cee-abc9-4598-98bb-4bf4bfb97f27", + "fields": { + "user": "8b38e2eb-6689-4f1e-a4ba-95b275130200", + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "role": "owner", + "date_joined": "2024-09-19T11:02:59.712Z" + } + }, + { + "model": "api.membership", + "pk": "dea37563-7009-4dcf-9f18-25efb41462a7", + "fields": { + "user": "b6493a3a-c997-489b-8b99-278bf74de9f6", + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "role": "member", + "date_joined": "2024-09-19T11:03:59.712Z" + } + } +] diff --git a/api/src/backend/api/fixtures/dev/2_dev_providers.json b/api/src/backend/api/fixtures/dev/2_dev_providers.json new file mode 100644 index 0000000000..a4d73950cc --- /dev/null +++ b/api/src/backend/api/fixtures/dev/2_dev_providers.json @@ -0,0 +1,177 @@ +[ + { + "model": "api.provider", + "pk": "37b065f8-26b0-4218-a665-0b23d07b27d9", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-08-01T17:20:27.050Z", + "updated_at": "2024-08-01T17:20:27.050Z", + "provider": "gcp", + "uid": "a12322-test321", + "alias": "gcp_testing_2", + "connected": null, + "connection_last_checked_at": null, + "metadata": {} + } + }, + { + "model": "api.provider", + "pk": "8851db6b-42e5-4533-aa9e-30a32d67e875", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-08-01T17:19:42.453Z", + "updated_at": "2024-08-01T17:19:42.453Z", + "provider": "gcp", + "uid": "a12345-test123", + "alias": "gcp_testing_1", + "connected": null, + "connection_last_checked_at": null, + "metadata": {} + } + }, + { + "model": "api.provider", + "pk": "b85601a8-4b45-4194-8135-03fb980ef428", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-08-01T17:19:09.556Z", + "updated_at": "2024-08-01T17:19:09.556Z", + "provider": "aws", + "uid": "123456789020", + "alias": "aws_testing_2", + "connected": null, + "connection_last_checked_at": null, + "metadata": {} + } + }, + { + "model": "api.provider", + "pk": "baa7b895-8bac-4f47-b010-4226d132856e", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-08-01T17:20:16.962Z", + "updated_at": "2024-08-01T17:20:16.962Z", + "provider": "gcp", + "uid": "a12322-test123", + "alias": "gcp_testing_3", + "connected": null, + "connection_last_checked_at": null, + "metadata": {} + } + }, + { + "model": "api.provider", + "pk": "d7c7ea89-d9af-423b-a364-1290dcad5a01", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-08-01T17:18:58.132Z", + "updated_at": "2024-08-01T17:18:58.132Z", + "provider": "aws", + "uid": "123456789015", + "alias": "aws_testing_1", + "connected": null, + "connection_last_checked_at": null, + "metadata": {} + } + }, + { + "model": "api.provider", + "pk": "1b59e032-3eb6-4694-93a5-df84cd9b3ce2", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-08-06T16:03:26.176Z", + "updated_at": "2024-08-06T16:03:26.176Z", + "provider": "azure", + "uid": "8851db6b-42e5-4533-aa9e-30a32d67e875", + "alias": "azure_testing", + "connected": null, + "connection_last_checked_at": null, + "metadata": {}, + "scanner_args": {} + } + }, + { + "model": "api.provider", + "pk": "26e55a24-cb2c-4cef-ac87-6f91fddb2c97", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-08-06T16:03:07.037Z", + "updated_at": "2024-08-06T16:03:07.037Z", + "provider": "kubernetes", + "uid": "kubernetes-test-12345", + "alias": "k8s_testing", + "connected": null, + "connection_last_checked_at": null, + "metadata": {}, + "scanner_args": {} + } + }, + { + "model": "api.provider", + "pk": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:45:26.352Z", + "updated_at": "2024-10-18T11:16:23.533Z", + "provider": "aws", + "uid": "106908755759", + "alias": "real testing aws provider", + "connected": true, + "connection_last_checked_at": "2024-10-18T11:16:23.503Z", + "metadata": {}, + "scanner_args": {} + } + }, + { + "model": "api.providersecret", + "pk": "11491b47-75ae-4f71-ad8d-3e630a72182e", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-11T08:03:05.026Z", + "updated_at": "2024-10-11T08:04:47.033Z", + "name": "GCP static secrets", + "secret_type": "static", + "_secret": "Z0FBQUFBQm5DTndmZW9KakRZUHM2UHhQN2V3RzN0QmM1cERham8yMHp5cnVTT0lzdGFyS1FuVmJXUlpYSGsyU0cxR3RMMEdQYXlYMUVsaWtqLU1OZWlaVUp6OFREYlotZTVBY3BuTlZYbm9YcUJydzAxV2p5dkpLamI1Y2tUYzA0MmJUNWxsNTBRM0E1SDRCa0pPQWVlb05YU3dfeUhkLTRmOEh3dGczOGh1ZGhQcVdZdVAtYmtoSWlwNXM4VGFoVmF3dno2X1hrbk5GZjZTWjVuWEdEZUFXeHJSQjEzbTlVakhNdzYyWTdiVEpvUEc2MTNpRzUtczhEank1eGI0b3MyMlAyaGN6dlByZmtUWHByaDNUYWFqYS1tYnNBUkRKTzBacFNSRjFuVmd5bUtFUEJhd1ZVS1ZDd2xSUV9PaEtLTnc0XzVkY2lhM01WTjQwaWdJSk9wNUJSXzQ4RUNQLXFPNy1VdzdPYkZyWkVkU3RyQjVLTS1MVHN0R3k4THNKZ2NBNExaZnl3Q1EwN2dwNGRsUXptMjB0LXUzTUpzTDE2Q1hmS0ZSN2g1ZjBPeV8taFoxNUwxc2FEcktXX0dCM1IzeUZTTHNiTmNxVXBvNWViZTJScUVWV2VYTFQ4UHlid21PY1A0UjdNMGtERkZCd0lLMlJENDMzMVZUM09DQ0twd1N3VHlZd09XLUctOWhYcFJIR1p5aUlZeEUzejc2dWRYdGNsd0xOODNqRUFEczhSTWNtWU0tdFZ1ZTExaHNHUVYtd0Zxdld1LTdKVUNINzlZTGdHODhKeVVpQmRZMHRUNTJRRWhwS1F1Y3I2X2Iwc0c1NHlXSVRLZWxreEt0dVRnOTZFMkptU2VMS1dWXzdVOVRzMUNUWXM2aFlxVDJXdGo3d2cxSVZGWlI2ZWhIZzZBcEl4bEJ6UnVHc0RYWVNHcjFZUHI5ZUYyWG9rSlo0QUVSUkFCX3h2UmtJUTFzVXJUZ25vTmk2VzdoTTNta05ucmNfTi0yR1ZxN1E2MnZJOVVKOGxmMXMzdHMxVndmSVhQbUItUHgtMVpVcHJwMU5JVHJLb0Y1aHV5OEEwS0kzQkEtcFJkdkRnWGxmZnprNFhndWg1TmQyd09yTFdTRmZ3d2ZvZFUtWXp4a2VYb3JjckFIcE13MDUzX0RHSnlzM0N2ZE5IRzJzMXFMc0k4MDRyTHdLZFlWOG9SaFF0LU43Ynd6VFlEcVNvdFZ0emJEVk10aEp4dDZFTFNFNzk0UUo2WTlVLWRGYm1fanZHaFZreHBIMmtzVjhyS0xPTk9fWHhiVTJHQXZwVlVuY3JtSjFUYUdHQzhEaHFNZXhwUHBmY0kxaUVrOHo4a0FYOTdpZVJDbFRvdFlQeWo3eFZHX1ZMZ1Myc3prU3o2c3o2eXNja1U4N0Y1T0d1REVjZFRGNTByUkgyemVCSjlQYkY2bmJ4YTZodHB0cUNzd2xZcENycUdsczBIaEZPbG1jVUlqNlM2cEE3aGpVaWswTzBDLVFGUHM5UHhvM09saWNtaDhaNVlsc3FZdktKeWlheDF5OGhTODE2N3JWamdTZG5Fa3JSQ2ZUSEVfRjZOZXdreXRZLTBZRFhleVFFeC1YUzc0cWhYeEhobGxvdnZ3Rm15WFlBWXp0dm1DeTA5eExLeEFRRXVRSXBXdTNEaWdZZ3JDenItdDhoZlFiTzI0SGZ1c01FR1FNaFVweVBKR1YxWGRUMW1Mc2JVdW9raWR6UHk2ZTBnS05pV3oyZVBjREdkY3k4ZHZPUWE5S281MkJRSHF3NnpTclZ5bl90bk1wUEh6Tkp5dXlDcE5paWRqcVhxRFVObWIzRldWOGJ2aC1CRHZpbFZrb0hjNGpCMm5POGRiS2lETUpMLUVfQlhCdTZPLW9USW1LTFlTSF9zRUJYZ1NKeFFEQjNOR215ZXJDbkFndmcxWl9rWlk9", + "provider": "8851db6b-42e5-4533-aa9e-30a32d67e875" + } + }, + { + "model": "api.providersecret", + "pk": "40191ad5-d8c2-40a9-826d-241397626b68", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-10T11:11:44.515Z", + "updated_at": "2024-10-11T07:59:56.102Z", + "name": "AWS static secrets", + "secret_type": "static", + "_secret": "Z0FBQUFBQm5DTnI4Y1RyV19UWEJzc3kzQUExcU5tdlQzbFVLeDdZMWd1MzkwWkl2UF9oZGhiVEJHVWpSMXV4MjYyN3g2OVpvNVpkQUQ3S0VGaGdQLTFhQWE3MkpWZUt2cnVhODc4d3FpY3FVZkpwdHJzNUJPeFRwZ3N4bGpPZTlkNWRNdFlwTHU3aTNWR3JjSzJwLWRITHdfQWpXb1F0c1l3bVFxbnFrTEpPTGgxcnF1VUprSzZ5dGRQU2VGYmZhTTlwbVpsNFBNWlFhVW9RbjJyYnZ5N0oweE5kV0ZEaUdpUUpNVExOa3oyQ2dNREVSenJ0TEFZc0RrRWpXNUhyMmtybGNLWDVOR0FabEl4QVR1bkZyb2hBLWc1MFNIekVyeXI0SmVreHBjRnJ1YUlVdXpVbW9JZkk0aEgxYlM1VGhSRlhtcS14YzdTYUhXR2xodElmWjZuNUVwaHozX1RVTG1QWHdPZWd4clNHYnAyOTBsWEl5UU83RGxZb0RKWjdadjlsTmJtSHQ0Yl9uaDJoODB0QV9sWmFYbFAxcjA1bmhNVlNqc2xEeHlvcUJFbVZvY250ZENnMnZLT1psb1JDclB3WVR6NGdZb2pzb3U4Ny04QlB0UTZub0dMOXZEUTZEcVJhZldCWEZZSDdLTy02UVZqck5zVTZwS3pObGlOejNJeHUzbFRabFM2V2xaekZVRjZtX3VzZlplendnOWQzT01WMFd3ejNadHVlTFlqRGR2dk5Da29zOFYwOUdOaEc4OHhHRnJFMmJFMk12VDNPNlBBTGlsXy13cUM1QkVYb0o1Z2U4ZXJnWXpZdm1sWjA5bzQzb2NFWC1xbmIycGZRbGtCaGNaOWlkX094UUNNampwbkZoREctNWI4QnZRaE8zM3BEQ1BwNzA1a3BzOGczZXdIM2s1NHFGN1ZTbmJhZkc4RVdfM0ZIZU5udTBYajd1RGxpWXZpRWdSMmhHa2RKOEIzbmM0X2F1OGxrN2p6LW9UVldDOFVpREoxZ1UzcTBZX19OQ0xJb0syWlhNSlQ4MzQwdzRtVG94Y01GS3FMLV95UVlxOTFORk8zdjE5VGxVaXdhbGlzeHdoYWNzazZWai1GUGtUM2gzR0ZWTTY4SThWeVFnZldIaklOTTJqTTg1VkhEYW5wNmdEVllXMmJCV2tpVmVYeUV2c0E1T00xbHJRNzgzVG9wb0Q1cV81UEhqYUFsQ2p1a0VpRDVINl9SVkpyZVRNVnVXQUxwY3NWZnJrNmRVREpiLWNHYUpXWmxkQlhNbWhuR1NmQ1BaVDlidUxCWHJMaHhZbk1FclVBaEVZeWg1ZlFoenZzRHlKbV8wa3lmMGZrd3NmTDZjQkE0UXNSUFhpTWtUUHBrX29BVzc4QzEtWEJIQW1GMGFuZVlXQWZIOXJEamloeGFCeHpYMHNjMFVfNXpQdlJfSkk2bzFROU5NU0c1SHREWW1nbkFNZFZ0UjdPRGdjaF96RGplY1hjdFFzLVR6MTVXYlRjbHIxQ2JRejRpVko5NWhBU0ZHR3ZvczU5elljRGpHRTdIc0FsSm5fUHEwT1gtTS1lN3M3X3ZZRnlkYUZoZXRQeEJsZlhLdFdTUzU1NUl4a29aOWZIdTlPM0Fnak1xYWVkYTNiMmZXUHlXS2lwUVBZLXQyaUxuRmtQNFFieE9SVmdZVW9WTHlzbnBPZlNIdGVHOE1LNVNESjN3cGtVSHVpT1NJWHE1ZzNmUTVTOC0xX3NGSmJqU19IbjZfQWtMRG1YNUQtRy13TUJIZFlyOXJkQzFQbkdZVXVzM2czbS1HWHFBT1pXdVd3N09tcG82SVhnY1ZtUWxqTEg2UzJCUmllb2pweVN2aGwwS1FVRUhjNEN2amRMc3MwVU4zN3dVMWM5Slg4SERtenFaQk1yMWx0LWtxVWtLZVVtbU4yejVEM2h6TEt0RGdfWE09", + "provider": "b85601a8-4b45-4194-8135-03fb980ef428" + } + }, + { + "model": "api.providersecret", + "pk": "ed89d1ea-366a-4d12-a602-f2ab77019742", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-10T11:11:44.515Z", + "updated_at": "2024-10-11T07:59:56.102Z", + "name": "Azure static secrets", + "secret_type": "static", + "_secret": "Z0FBQUFBQm5DTnI4Y1RyV19UWEJzc3kzQUExcU5tdlQzbFVLeDdZMWd1MzkwWkl2UF9oZGhiVEJHVWpSMXV4MjYyN3g2OVpvNVpkQUQ3S0VGaGdQLTFhQWE3MkpWZUt2cnVhODc4d3FpY3FVZkpwdHJzNUJPeFRwZ3N4bGpPZTlkNWRNdFlwTHU3aTNWR3JjSzJwLWRITHdfQWpXb1F0c1l3bVFxbnFrTEpPTGgxcnF1VUprSzZ5dGRQU2VGYmZhTTlwbVpsNFBNWlFhVW9RbjJyYnZ5N0oweE5kV0ZEaUdpUUpNVExOa3oyQ2dNREVSenJ0TEFZc0RrRWpXNUhyMmtybGNLWDVOR0FabEl4QVR1bkZyb2hBLWc1MFNIekVyeXI0SmVreHBjRnJ1YUlVdXpVbW9JZkk0aEgxYlM1VGhSRlhtcS14YzdTYUhXR2xodElmWjZuNUVwaHozX1RVTG1QWHdPZWd4clNHYnAyOTBsWEl5UU83RGxZb0RKWjdadjlsTmJtSHQ0Yl9uaDJoODB0QV9sWmFYbFAxcjA1bmhNVlNqc2xEeHlvcUJFbVZvY250ZENnMnZLT1psb1JDclB3WVR6NGdZb2pzb3U4Ny04QlB0UTZub0dMOXZEUTZEcVJhZldCWEZZSDdLTy02UVZqck5zVTZwS3pObGlOejNJeHUzbFRabFM2V2xaekZVRjZtX3VzZlplendnOWQzT01WMFd3ejNadHVlTFlqRGR2dk5Da29zOFYwOUdOaEc4OHhHRnJFMmJFMk12VDNPNlBBTGlsXy13cUM1QkVYb0o1Z2U4ZXJnWXpZdm1sWjA5bzQzb2NFWC1xbmIycGZRbGtCaGNaOWlkX094UUNNampwbkZoREctNWI4QnZRaE8zM3BEQ1BwNzA1a3BzOGczZXdIM2s1NHFGN1ZTbmJhZkc4RVdfM0ZIZU5udTBYajd1RGxpWXZpRWdSMmhHa2RKOEIzbmM0X2F1OGxrN2p6LW9UVldDOFVpREoxZ1UzcTBZX19OQ0xJb0syWlhNSlQ4MzQwdzRtVG94Y01GS3FMLV95UVlxOTFORk8zdjE5VGxVaXdhbGlzeHdoYWNzazZWai1GUGtUM2gzR0ZWTTY4SThWeVFnZldIaklOTTJqTTg1VkhEYW5wNmdEVllXMmJCV2tpVmVYeUV2c0E1T00xbHJRNzgzVG9wb0Q1cV81UEhqYUFsQ2p1a0VpRDVINl9SVkpyZVRNVnVXQUxwY3NWZnJrNmRVREpiLWNHYUpXWmxkQlhNbWhuR1NmQ1BaVDlidUxCWHJMaHhZbk1FclVBaEVZeWg1ZlFoenZzRHlKbV8wa3lmMGZrd3NmTDZjQkE0UXNSUFhpTWtUUHBrX29BVzc4QzEtWEJIQW1GMGFuZVlXQWZIOXJEamloeGFCeHpYMHNjMFVfNXpQdlJfSkk2bzFROU5NU0c1SHREWW1nbkFNZFZ0UjdPRGdjaF96RGplY1hjdFFzLVR6MTVXYlRjbHIxQ2JRejRpVko5NWhBU0ZHR3ZvczU5elljRGpHRTdIc0FsSm5fUHEwT1gtTS1lN3M3X3ZZRnlkYUZoZXRQeEJsZlhLdFdTUzU1NUl4a29aOWZIdTlPM0Fnak1xYWVkYTNiMmZXUHlXS2lwUVBZLXQyaUxuRmtQNFFieE9SVmdZVW9WTHlzbnBPZlNIdGVHOE1LNVNESjN3cGtVSHVpT1NJWHE1ZzNmUTVTOC0xX3NGSmJqU19IbjZfQWtMRG1YNUQtRy13TUJIZFlyOXJkQzFQbkdZVXVzM2czbS1HWHFBT1pXdVd3N09tcG82SVhnY1ZtUWxqTEg2UzJCUmllb2pweVN2aGwwS1FVRUhjNEN2amRMc3MwVU4zN3dVMWM5Slg4SERtenFaQk1yMWx0LWtxVWtLZVVtbU4yejVEM2h6TEt0RGdfWE09", + "provider": "1b59e032-3eb6-4694-93a5-df84cd9b3ce2" + } + }, + { + "model": "api.providersecret", + "pk": "ae48ecde-75cd-4814-92ab-18f48719e5d9", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:45:26.412Z", + "updated_at": "2024-10-18T10:45:26.412Z", + "name": "Valid AWS Credentials", + "secret_type": "static", + "_secret": "Z0FBQUFBQm5FanhHa3dXS0I3M2NmWm56SktiaGNqdDZUN0xQU1QwUi15QkhLZldFUmRENk1BXzlscG9JSUxVSTF5ekxuMkdEanlJNjhPUS1VSV9wVTBvU2l4ZnNGOVJhYW93RC1LTEhmc2pyOTJvUWwyWnpFY19WN1pRQk5IdDYwYnBDQnF1eU9nUzdwTGU3QU5qMGFyX1E4SXdpSk9paGVLcVpOVUhwb3duaXgxZ0ZxME5Pcm40QzBGWEZKY2lmRVlCMGFuVFVzemxuVjVNalZVQ2JsY2ZqNWt3Z01IYUZ0dk92YkdtSUZ5SlBvQWZoVU5DWlRFWmExNnJGVEY4Q1Bnd2VJUW9TSWdRcG9rSDNfREQwRld3Q1RYVnVYWVJLWWIxZmpsWGpwd0xQM0dtLTlYUjdHOVhhNklLWXFGTHpFQUVyVmNhYW9CU0tocGVyX3VjMkVEcVdjdFBfaVpsLTBzaUxrWTlta3dpelNtTG9xYVhBUHUzNUE4RnI1WXdJdHcxcFVfaG1XRHhDVFBKamxJb1FaQ2lsQ3FzRmxZbEJVemVkT1E2aHZfbDJqWDJPT3ViOWJGYzQ3eTNWNlFQSHBWRDFiV2tneDM4SmVqMU9Bd01TaXhPY2dmWG5RdENURkM2b2s5V3luVUZQcnFKNldnWEdYaWE2MnVNQkEwMHd6cUY5cVJkcGw4bHBtNzhPeHhkREdwSXNEc1JqQkxUR1FYRTV0UFNwbVlVSWF5LWgtbVhJZXlPZ0Q4cG9HX2E0Qld0LTF1TTFEVy1XNGdnQTRpLWpQQmFJUEdaOFJGNDVoUVJnQ25YVU5DTENMaTY4YmxtYWJFRERXTjAydVN2YnBDb3RkUE0zSDRlN1A3TXc4d2h1Wmd0LWUzZEcwMUstNUw2YnFyS2Z0NEVYMXllQW5GLVBpeU55SkNhczFIeFhrWXZpVXdwSFVrTDdiQjQtWHZJdERXVThzSnJsT2FNZzJDaUt6Y2NXYUZhUlo3VkY0R1BrSHNHNHprTmxjYmp1TXVKakRha0VtNmRFZWRmZHJWdnRCOVNjVGFVWjVQM3RwWWl4SkNmOU1pb2xqMFdOblhNY3Y3aERpOHFlWjJRc2dtRDkzZm1Qc29wdk5OQmJPbGk5ZUpGM1I2YzRJN2gxR3FEMllXR1pma1k0emVqSjZyMUliMGZsc3NfSlVDbGt4QzJTc3hHOU9FRHlZb09zVnlvcDR6WC1uclRSenI0Yy13WlFWNzJWRkwydjhmSjFZdnZ5X3NmZVF6UWRNMXo5STVyV3B0d09UUlFtOURITGhXSDVIUl9zYURJc05KWUNxekVyYkxJclNFNV9leEk4R2xsMGJod3lYeFIwaXR2dllwLTZyNWlXdDRpRkxVYkxWZFdvYUhKck5aeElBZUtKejNKS2tYVW1rTnVrRjJBQmdlZmV6ckozNjNwRmxLS1FaZzRVTTBZYzFFYi1idjBpZkQ3bWVvbEdRZXJrWFNleWZmSmFNdG1wQlp0YmxjWDV5T0tEbHRsYnNHbjRPRjl5MkttOUhRWlJtd1pmTnY4Z1lPRlZoTzFGVDdTZ0RDY1ByV0RndTd5LUNhcHNXUnNIeXdLMEw3WS1tektRTWFLQy1zakpMLWFiM3FOakE1UWU4LXlOX2VPbmd4MTZCRk9OY3Z4UGVDSWxhRlg4eHI4X1VUTDZZM0pjV0JDVi1UUjlTUl85cm1LWlZ0T1dzU0lpdWUwbXgtZ0l6eHNSNExRTV9MczJ6UkRkVElnRV9Rc0RoTDFnVHRZSEFPb2paX200TzZiRzVmRE5hOW5CTjh5Qi1WaEtueEpqRzJDY1luVWZtX1pseUpQSE5lQ0RrZ05EbWo5cU9MZ0ZkcXlqUll4UUkyejRfY2p4RXdEeC1PS1JIQVNUcmNIdkRJbzRiUktMWEQxUFM3aGNzeVFWUDdtcm5xNHlOYUU9", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555" + } + } +] diff --git a/api/src/backend/api/fixtures/dev/3_dev_scans.json b/api/src/backend/api/fixtures/dev/3_dev_scans.json new file mode 100644 index 0000000000..d03f4ac07b --- /dev/null +++ b/api/src/backend/api/fixtures/dev/3_dev_scans.json @@ -0,0 +1,218 @@ +[ + { + "model": "api.scan", + "pk": "0191e280-9d2f-71c8-9b18-487a23ba185e", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "provider": "37b065f8-26b0-4218-a665-0b23d07b27d9", + "trigger": "manual", + "name": "test scan 1", + "state": "completed", + "unique_resource_count": 1, + "duration": 5, + "scanner_args": { + "checks_to_execute": [ + "accessanalyzer_enabled" + ] + }, + "inserted_at": "2024-09-01T17:25:27.050Z", + "started_at": "2024-09-01T17:25:27.050Z", + "updated_at": "2024-09-01T17:25:27.050Z", + "completed_at": "2024-09-01T17:25:32.050Z" + } + }, + { + "model": "api.scan", + "pk": "01920573-aa9c-73c9-bcda-f2e35c9b19d2", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "provider": "b85601a8-4b45-4194-8135-03fb980ef428", + "trigger": "manual", + "name": "test aws scan 2", + "state": "completed", + "unique_resource_count": 1, + "duration": 20, + "scanner_args": { + "checks_to_execute": [ + "accessanalyzer_enabled" + ] + }, + "inserted_at": "2024-09-02T17:24:27.050Z", + "started_at": "2024-09-02T17:24:27.050Z", + "updated_at": "2024-09-02T17:24:27.050Z", + "completed_at": "2024-09-01T17:24:37.050Z" + } + }, + { + "model": "api.scan", + "pk": "01920573-ea5b-77fd-a93f-1ed2ae12f728", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "provider": "baa7b895-8bac-4f47-b010-4226d132856e", + "trigger": "manual", + "name": "test gcp scan", + "state": "completed", + "unique_resource_count": 10, + "duration": 10, + "scanner_args": { + "checks_to_execute": [ + "cloudsql_instance_automated_backups" + ] + }, + "inserted_at": "2024-09-02T19:26:27.050Z", + "started_at": "2024-09-02T19:26:27.050Z", + "updated_at": "2024-09-02T19:26:27.050Z", + "completed_at": "2024-09-01T17:26:37.050Z" + } + }, + { + "model": "api.scan", + "pk": "01920573-ea5b-77fd-a93f-1ed2ae12f728", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "provider": "b85601a8-4b45-4194-8135-03fb980ef428", + "trigger": "manual", + "name": "test aws scan", + "state": "completed", + "unique_resource_count": 1, + "duration": 35, + "scanner_args": { + "checks_to_execute": [ + "accessanalyzer_enabled" + ] + }, + "inserted_at": "2024-09-02T19:27:27.050Z", + "started_at": "2024-09-02T19:27:27.050Z", + "updated_at": "2024-09-02T19:27:27.050Z", + "completed_at": "2024-09-01T17:27:37.050Z" + } + }, + { + "model": "api.scan", + "pk": "c281c924-23f3-4fcc-ac63-73a22154b7de", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "provider": "b85601a8-4b45-4194-8135-03fb980ef428", + "trigger": "scheduled", + "name": "test scheduled aws scan", + "state": "available", + "scanner_args": { + "checks_to_execute": [ + "cloudformation_stack_outputs_find_secrets" + ] + }, + "scheduled_at": "2030-09-02T19:20:27.050Z", + "inserted_at": "2024-09-02T19:24:27.050Z", + "updated_at": "2024-09-02T19:24:27.050Z" + } + }, + { + "model": "api.scan", + "pk": "25c8907c-b26e-4ec0-966b-a1f53a39d8e6", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "provider": "b85601a8-4b45-4194-8135-03fb980ef428", + "trigger": "scheduled", + "name": "test scheduled aws scan 2", + "state": "available", + "scanner_args": { + "checks_to_execute": [ + "accessanalyzer_enabled", + "cloudformation_stack_outputs_find_secrets" + ] + }, + "scheduled_at": "2030-08-02T19:31:27.050Z", + "inserted_at": "2024-09-02T19:38:27.050Z", + "updated_at": "2024-09-02T19:38:27.050Z" + } + }, + { + "model": "api.scan", + "pk": "25c8907c-b26e-4ec0-966b-a1f53a39d8e6", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "provider": "baa7b895-8bac-4f47-b010-4226d132856e", + "trigger": "scheduled", + "name": "test scheduled gcp scan", + "state": "available", + "scanner_args": { + "checks_to_execute": [ + "cloudsql_instance_automated_backups", + "iam_audit_logs_enabled" + ] + }, + "scheduled_at": "2030-07-02T19:30:27.050Z", + "inserted_at": "2024-09-02T19:29:27.050Z", + "updated_at": "2024-09-02T19:29:27.050Z" + } + }, + { + "model": "api.scan", + "pk": "25c8907c-b26e-4ec0-966b-a1f53a39d8e6", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "provider": "1b59e032-3eb6-4694-93a5-df84cd9b3ce2", + "trigger": "scheduled", + "name": "test scheduled azure scan", + "state": "available", + "scanner_args": { + "checks_to_execute": [ + "aks_cluster_rbac_enabled", + "defender_additional_email_configured_with_a_security_contact" + ] + }, + "scheduled_at": "2030-08-05T19:32:27.050Z", + "inserted_at": "2024-09-02T19:29:27.050Z", + "updated_at": "2024-09-02T19:29:27.050Z" + } + }, + { + "model": "api.scan", + "pk": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "name": "real scan 1", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "trigger": "manual", + "state": "completed", + "unique_resource_count": 19, + "progress": 100, + "scanner_args": { + "checks_to_execute": [ + "accessanalyzer_enabled" + ] + }, + "duration": 7, + "scheduled_at": null, + "inserted_at": "2024-10-18T10:45:57.678Z", + "updated_at": "2024-10-18T10:46:05.127Z", + "started_at": "2024-10-18T10:45:57.909Z", + "completed_at": "2024-10-18T10:46:05.127Z" + } + }, + { + "model": "api.scan", + "pk": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "name": "real scan 2", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "trigger": "manual", + "state": "completed", + "unique_resource_count": 20, + "progress": 100, + "scanner_args": { + "checks_to_execute": [ + "accessanalyzer_enabled", + "account_security_contact_information_is_registered" + ] + }, + "duration": 4, + "scheduled_at": null, + "inserted_at": "2024-10-18T11:16:21.358Z", + "updated_at": "2024-10-18T11:16:26.060Z", + "started_at": "2024-10-18T11:16:21.593Z", + "completed_at": "2024-10-18T11:16:26.060Z" + } + } +] diff --git a/api/src/backend/api/fixtures/dev/4_dev_resources.json b/api/src/backend/api/fixtures/dev/4_dev_resources.json new file mode 100644 index 0000000000..0c94329290 --- /dev/null +++ b/api/src/backend/api/fixtures/dev/4_dev_resources.json @@ -0,0 +1,322 @@ +[ + { + "model": "api.resource", + "pk": "0234477d-0b8e-439f-87d3-ce38dff3a434", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.772Z", + "updated_at": "2024-10-18T11:16:24.466Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root", + "name": "", + "region": "eu-south-2", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'2':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'eu':7C 'eu-south':6C 'iam':3A 'other':11 'root':5A 'south':8C" + } + }, + { + "model": "api.resource", + "pk": "17ce30a3-6e77-42a5-bb08-29dfcad7396a", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.882Z", + "updated_at": "2024-10-18T11:16:24.533Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root2", + "name": "", + "region": "eu-west-1", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'1':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'eu':7C 'eu-west':6C 'iam':3A 'other':11 'root':5A 'west':8C" + } + }, + { + "model": "api.resource", + "pk": "1f9de587-ba5b-415a-b9b0-ceed4c6c9f32", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.091Z", + "updated_at": "2024-10-18T11:16:24.637Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root3", + "name": "", + "region": "ap-northeast-2", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'2':9C '112233445566':4A 'accessanalyzer':10 'ap':7C 'ap-northeast':6C 'arn':1A 'aws':2A 'iam':3A 'northeast':8C 'other':11 'root':5A" + } + }, + { + "model": "api.resource", + "pk": "29b35668-6dad-411d-bfec-492311889892", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.008Z", + "updated_at": "2024-10-18T11:16:24.600Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root4", + "name": "", + "region": "us-west-2", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'2':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'iam':3A 'other':11 'root':5A 'us':7C 'us-west':6C 'west':8C" + } + }, + { + "model": "api.resource", + "pk": "30505514-01d4-42bb-8b0c-471bbab27460", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:26.014Z", + "updated_at": "2024-10-18T11:16:26.023Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root5", + "name": "", + "region": "us-east-1", + "service": "account", + "type": "Other", + "text_search": "'1':9C '112233445566':4A 'account':10 'arn':1A 'aws':2A 'east':8C 'iam':3A 'other':11 'root':5A 'us':7C 'us-east':6C" + } + }, + { + "model": "api.resource", + "pk": "372932f0-e4df-4968-9721-bb4f6236fae4", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.848Z", + "updated_at": "2024-10-18T11:16:24.516Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root6", + "name": "", + "region": "eu-west-3", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'3':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'eu':7C 'eu-west':6C 'iam':3A 'other':11 'root':5A 'west':8C" + } + }, + { + "model": "api.resource", + "pk": "3a37d124-7637-43f6-9df7-e9aa7ef98c53", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.979Z", + "updated_at": "2024-10-18T11:16:24.585Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root7", + "name": "", + "region": "sa-east-1", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'1':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'east':8C 'iam':3A 'other':11 'root':5A 'sa':7C 'sa-east':6C" + } + }, + { + "model": "api.resource", + "pk": "3c49318e-03c6-4f12-876f-40451ce7de3d", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.072Z", + "updated_at": "2024-10-18T11:16:24.630Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root8", + "name": "", + "region": "ap-southeast-2", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'2':9C '112233445566':4A 'accessanalyzer':10 'ap':7C 'ap-southeast':6C 'arn':1A 'aws':2A 'iam':3A 'other':11 'root':5A 'southeast':8C" + } + }, + { + "model": "api.resource", + "pk": "430bf313-8733-4bc5-ac70-5402adfce880", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.994Z", + "updated_at": "2024-10-18T11:16:24.593Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root9", + "name": "", + "region": "eu-north-1", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'1':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'eu':7C 'eu-north':6C 'iam':3A 'north':8C 'other':11 'root':5A" + } + }, + { + "model": "api.resource", + "pk": "78bd2a52-82f9-45df-90a9-4ad78254fdc4", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.055Z", + "updated_at": "2024-10-18T11:16:24.622Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root10", + "name": "", + "region": "ap-northeast-1", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'1':9C '112233445566':4A 'accessanalyzer':10 'ap':7C 'ap-northeast':6C 'arn':1A 'aws':2A 'iam':3A 'northeast':8C 'other':11 'root':5A" + } + }, + { + "model": "api.resource", + "pk": "7973e332-795e-4a74-b4d4-a53a21c98c80", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.896Z", + "updated_at": "2024-10-18T11:16:24.542Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root11", + "name": "", + "region": "us-east-2", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'2':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'east':8C 'iam':3A 'other':11 'root':5A 'us':7C 'us-east':6C" + } + }, + { + "model": "api.resource", + "pk": "8ca0a188-5699-436e-80fd-e566edaeb259", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.938Z", + "updated_at": "2024-10-18T11:16:24.565Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root12", + "name": "", + "region": "ca-central-1", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'1':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'ca':7C 'ca-central':6C 'central':8C 'iam':3A 'other':11 'root':5A" + } + }, + { + "model": "api.resource", + "pk": "8fe4514f-71d7-46ab-b0dc-70cef23b4d13", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.965Z", + "updated_at": "2024-10-18T11:16:24.578Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root13", + "name": "", + "region": "eu-west-2", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'2':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'eu':7C 'eu-west':6C 'iam':3A 'other':11 'root':5A 'west':8C" + } + }, + { + "model": "api.resource", + "pk": "9ab35225-dc7c-4ebd-bbc0-d81fb5d9de77", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.909Z", + "updated_at": "2024-10-18T11:16:24.549Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root14", + "name": "", + "region": "ap-south-1", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'1':9C '112233445566':4A 'accessanalyzer':10 'ap':7C 'ap-south':6C 'arn':1A 'aws':2A 'iam':3A 'other':11 'root':5A 'south':8C" + } + }, + { + "model": "api.resource", + "pk": "9be26c1d-adf0-4ba8-9ca9-c740f4a0dc4e", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.863Z", + "updated_at": "2024-10-18T11:16:24.524Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root15", + "name": "", + "region": "eu-central-2", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'2':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'central':8C 'eu':7C 'eu-central':6C 'iam':3A 'other':11 'root':5A" + } + }, + { + "model": "api.resource", + "pk": "ba108c01-bcad-44f1-b211-c1d8985da89d", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.110Z", + "updated_at": "2024-10-18T11:16:24.644Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root16", + "name": "", + "region": "ap-northeast-3", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'3':9C '112233445566':4A 'accessanalyzer':10 'ap':7C 'ap-northeast':6C 'arn':1A 'aws':2A 'iam':3A 'northeast':8C 'other':11 'root':5A" + } + }, + { + "model": "api.resource", + "pk": "dc6cfb5d-6835-4c7b-9152-c18c734a6eaa", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.038Z", + "updated_at": "2024-10-18T11:16:24.615Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root17", + "name": "", + "region": "eu-central-1", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'1':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'central':8C 'eu':7C 'eu-central':6C 'iam':3A 'other':11 'root':5A" + } + }, + { + "model": "api.resource", + "pk": "e0664164-cfda-44a4-b743-acee1c69386c", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.924Z", + "updated_at": "2024-10-18T11:16:24.557Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root18", + "name": "", + "region": "us-west-1", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'1':9C '112233445566':4A 'accessanalyzer':10 'arn':1A 'aws':2A 'iam':3A 'other':11 'root':5A 'us':7C 'us-west':6C 'west':8C" + } + }, + { + "model": "api.resource", + "pk": "e1929daa-a984-4116-8131-492a48321dba", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.023Z", + "updated_at": "2024-10-18T11:16:24.607Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:iam::112233445566:root19", + "name": "", + "region": "ap-southeast-1", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'1':9C '112233445566':4A 'accessanalyzer':10 'ap':7C 'ap-southeast':6C 'arn':1A 'aws':2A 'iam':3A 'other':11 'root':5A 'southeast':8C" + } + }, + { + "model": "api.resource", + "pk": "e37bb1f1-1669-4bb3-be86-e3378ddfbcba", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.952Z", + "updated_at": "2024-10-18T11:16:24.571Z", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "uid": "arn:aws:access-analyzer:us-east-1:112233445566:analyzer/ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c", + "name": "", + "region": "us-east-1", + "service": "accessanalyzer", + "type": "Other", + "text_search": "'1':9A,15C '112233445566':10A 'access':4A 'access-analyzer':3A 'accessanalyzer':16 'analyzer':5A 'analyzer/consoleanalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c':11A 'arn':1A 'aws':2A 'east':8A,14C 'other':17 'us':7A,13C 'us-east':6A,12C" + } + } +] diff --git a/api/src/backend/api/fixtures/dev/5_dev_findings.json b/api/src/backend/api/fixtures/dev/5_dev_findings.json new file mode 100644 index 0000000000..8a02b6c2ed --- /dev/null +++ b/api/src/backend/api/fixtures/dev/5_dev_findings.json @@ -0,0 +1,2498 @@ +[ + { + "model": "api.finding", + "pk": "01929f3c-0917-75ff-ba43-08b857227015", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.823Z", + "updated_at": "2024-10-18T10:46:04.841Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-0936-7cef-b923-55639a76763a", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.855Z", + "updated_at": "2024-10-18T10:46:04.858Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-0944-7bcf-8fe4-65df82f0de3a", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.869Z", + "updated_at": "2024-10-18T10:46:04.876Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-0958-7e15-8cac-0df0a67fd3a6", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.888Z", + "updated_at": "2024-10-18T10:46:04.892Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-0965-7290-9b5d-f4a84e26feb0", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.901Z", + "updated_at": "2024-10-18T10:46:04.905Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-0973-75c7-8bb3-d7f73491dbd2", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.915Z", + "updated_at": "2024-10-18T10:46:04.919Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-0981-71ff-8b66-1b23f7cdd1f8", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.929Z", + "updated_at": "2024-10-18T10:46:04.934Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-0990-7a50-a72e-e6686cd74116", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.944Z", + "updated_at": "2024-10-18T10:46:04.947Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-099d-7d97-8c57-34ee4740c9e5", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.957Z", + "updated_at": "2024-10-18T10:46:04.962Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c", + "delta": "new", + "status": "PASS", + "status_extended": "IAM Access Analyzer ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c is enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':107,131 '454e':8B '52d11cc1cf7c':10B '83b66ad7':6B 'a':55,62,84 'access':2B,15,47,59,80,100,112,135,150,166 'access-analyzer':165 'accessanalyzer':88,118,159 'account':126,170 'account-id':169 'accounts':26,139 'action':144 'additional':156 'all':78,138 'allowed':82 'amazon':29 'an':39 'analysis':66 'analyz':3B 'analyzer':16,60,101,113,121,123,136,141,151,167 'analyzer-name':122 'analyzer/resource-id':172 'and':25,51,73,142 'applies':71 'are':36 'arn':163 'as':28 'at':154 'automated':68 'available':153 'aws':13,91,117 'b851':9B 'buckets':31 'by':83 'called':67 'check':97,109 'consoleanalyz':5B 'consoleanalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c':4B 'cost':157 'create':120,140 'create-analyzer':119 'd024':7B 'data':52 'determine':77 'docs.aws.amazon.com':106,130 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':105,129 'enabl':12B 'enable':133 'enabled':89,103,115 'entity':41 'external':40 'for':137 'form':63 'helps':17 'iam':1B,14,33,58,95,99,111,134,149 'id':171 'identify':19,45 'if':98,110 'in':22 'inference':75 'is':54,102,114,147,152 'it':146 'lets':43 'logic':72 'low':93 'mathematical':65,74 'name':124 'no':155 'of':64 'or':32 'organization':24,127 'other':161 'over':145 'partition':164 'paths':81 'policy':86 'possible':79 'reasoning':69 'recommendations':148 'region':168 'resource':85 'resources':21,50 'risk':57 'roles':34 's3':30 'security':56 'shared':37 'such':27 'take':143 'that':35 'the':20 'this':42 'to':48,76 'type':125 'unintended':46 'uses':61 'which':53,70 'with':38 'you':18,44 'your':23,49" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-09ab-728a-a4a9-5a9a0693b0c1", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.971Z", + "updated_at": "2024-10-18T10:46:04.975Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-09b8-757a-89c2-411e0c7309d4", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.984Z", + "updated_at": "2024-10-18T10:46:04.989Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-09c7-71c9-a483-4d207cd464b7", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:04.999Z", + "updated_at": "2024-10-18T10:46:05.003Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-09d5-7fe3-bb36-19e0c1b90a9d", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.013Z", + "updated_at": "2024-10-18T10:46:05.018Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-09e4-70a7-949d-aba3a0f93fb1", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.029Z", + "updated_at": "2024-10-18T10:46:05.033Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-09f5-79f9-9ee7-17c87e43d87b", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.045Z", + "updated_at": "2024-10-18T10:46:05.050Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-0a05-7426-980d-bdfeeb70a008", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.061Z", + "updated_at": "2024-10-18T10:46:05.065Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-0a17-7694-9920-7233a71fcdbe", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.080Z", + "updated_at": "2024-10-18T10:46:05.085Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-0a2a-7ba7-8e43-26cb937c8df7", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.099Z", + "updated_at": "2024-10-18T10:46:05.104Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f3c-0a3b-784a-930e-e363d60f3586", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T10:46:05.115Z", + "updated_at": "2024-10-18T10:46:05.121Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566", + "delta": "new", + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f3b-ed2e-7623-ad63-7c37cd37828f", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd29-7499-80c3-18cfa227c7b6", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.489Z", + "updated_at": "2024-10-18T11:16:24.506Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd46-7ff5-800f-483b7ee71cd6", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.518Z", + "updated_at": "2024-10-18T11:16:24.521Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd4e-75db-a9f5-1c95776cead6", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.526Z", + "updated_at": "2024-10-18T11:16:24.529Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd57-7560-b55e-ad2e7d660509", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.535Z", + "updated_at": "2024-10-18T11:16:24.538Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd60-7a7d-ba1b-37affc11176a", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.544Z", + "updated_at": "2024-10-18T11:16:24.546Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd67-79ba-af2f-ce6d0fd3c846", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.551Z", + "updated_at": "2024-10-18T11:16:24.554Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd70-79ec-a176-9d606bdf68fb", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.560Z", + "updated_at": "2024-10-18T11:16:24.562Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd76-7817-aab2-c283b44082ec", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.567Z", + "updated_at": "2024-10-18T11:16:24.569Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd7d-740c-921d-9400d1fde3e2", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.573Z", + "updated_at": "2024-10-18T11:16:24.575Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c", + "delta": null, + "status": "PASS", + "status_extended": "IAM Access Analyzer ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c is enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':107,131 '454e':8B '52d11cc1cf7c':10B '83b66ad7':6B 'a':55,62,84 'access':2B,15,47,59,80,100,112,135,150,166 'access-analyzer':165 'accessanalyzer':88,118,159 'account':126,170 'account-id':169 'accounts':26,139 'action':144 'additional':156 'all':78,138 'allowed':82 'amazon':29 'an':39 'analysis':66 'analyz':3B 'analyzer':16,60,101,113,121,123,136,141,151,167 'analyzer-name':122 'analyzer/resource-id':172 'and':25,51,73,142 'applies':71 'are':36 'arn':163 'as':28 'at':154 'automated':68 'available':153 'aws':13,91,117 'b851':9B 'buckets':31 'by':83 'called':67 'check':97,109 'consoleanalyz':5B 'consoleanalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c':4B 'cost':157 'create':120,140 'create-analyzer':119 'd024':7B 'data':52 'determine':77 'docs.aws.amazon.com':106,130 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':105,129 'enabl':12B 'enable':133 'enabled':89,103,115 'entity':41 'external':40 'for':137 'form':63 'helps':17 'iam':1B,14,33,58,95,99,111,134,149 'id':171 'identify':19,45 'if':98,110 'in':22 'inference':75 'is':54,102,114,147,152 'it':146 'lets':43 'logic':72 'low':93 'mathematical':65,74 'name':124 'no':155 'of':64 'or':32 'organization':24,127 'other':161 'over':145 'partition':164 'paths':81 'policy':86 'possible':79 'reasoning':69 'recommendations':148 'region':168 'resource':85 'resources':21,50 'risk':57 'roles':34 's3':30 'security':56 'shared':37 'such':27 'take':143 'that':35 'the':20 'this':42 'to':48,76 'type':125 'unintended':46 'uses':61 'which':53,70 'with':38 'you':18,44 'your':23,49" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd84-7154-aaf3-3d57bc9fec6c", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.580Z", + "updated_at": "2024-10-18T11:16:24.582Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd8b-79ee-97a2-66fb8d53bcfc", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.587Z", + "updated_at": "2024-10-18T11:16:24.589Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd93-72a1-82bb-6247e5b05f5c", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.595Z", + "updated_at": "2024-10-18T11:16:24.597Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cd9a-7574-a8bd-2eb085c1c1d4", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.602Z", + "updated_at": "2024-10-18T11:16:24.604Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cda2-7bb6-b303-ccf5c68e3b7e", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.610Z", + "updated_at": "2024-10-18T11:16:24.612Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cda9-716d-9824-9bdaf894ba46", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.617Z", + "updated_at": "2024-10-18T11:16:24.620Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cdb0-7eef-84d9-13ff9bd5405d", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.625Z", + "updated_at": "2024-10-18T11:16:24.627Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cdb8-72c4-923a-13dbebc6347b", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.632Z", + "updated_at": "2024-10-18T11:16:24.634Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cdbf-7d43-b0f0-c82b8c0f6bca", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.639Z", + "updated_at": "2024-10-18T11:16:24.642Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-cdc6-70e9-b025-30f1f8b5efbe", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:24.646Z", + "updated_at": "2024-10-18T11:16:24.648Z", + "uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566", + "delta": null, + "status": "FAIL", + "status_extended": "IAM Access Analyzer in account 112233445566 is not enabled.", + "severity": "low", + "impact": "low", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "accessanalyzer_enabled", + "check_metadata": { + "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "notes": "", + "checkid": "accessanalyzer_enabled", + "provider": "aws", + "severity": "low", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Check if IAM Access Analyzer is enabled", + "compliance": null, + "relatedurl": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "description": "Check if IAM Access Analyzer is enabled", + "remediation": { + "code": { + "cli": "aws accessanalyzer create-analyzer --analyzer-name --type ", + "other": "", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html", + "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost)." + } + }, + "servicename": "accessanalyzer", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/iam/latest/userguide/what-is-access-analyzer.html':104,128 '112233445566':6B 'a':52,59,81 'access':2B,12,44,56,77,97,109,132,147,163 'access-analyzer':162 'accessanalyzer':85,115,156 'account':5B,123,167 'account-id':166 'accounts':23,136 'action':141 'additional':153 'all':75,135 'allowed':79 'amazon':26 'an':36 'analysis':63 'analyz':3B 'analyzer':13,57,98,110,118,120,133,138,148,164 'analyzer-name':119 'analyzer/resource-id':169 'and':22,48,70,139 'applies':68 'are':33 'arn':160 'as':25 'at':151 'automated':65 'available':150 'aws':10,88,114 'buckets':28 'by':80 'called':64 'check':94,106 'cost':154 'create':117,137 'create-analyzer':116 'data':49 'determine':74 'docs.aws.amazon.com':103,127 'docs.aws.amazon.com/iam/latest/userguide/what-is-access-analyzer.html':102,126 'enabl':9B 'enable':130 'enabled':86,100,112 'entity':38 'external':37 'for':134 'form':60 'helps':14 'iam':1B,11,30,55,92,96,108,131,146 'id':168 'identify':16,42 'if':95,107 'in':19 'inference':72 'is':51,99,111,144,149 'it':143 'lets':40 'logic':69 'low':90 'mathematical':62,71 'name':121 'no':152 'of':61 'or':29 'organization':21,124 'other':158 'over':142 'partition':161 'paths':78 'policy':83 'possible':76 'reasoning':66 'recommendations':145 'region':165 'resource':82 'resources':18,47 'risk':54 'roles':31 's3':27 'security':53 'shared':34 'such':24 'take':140 'that':32 'the':17 'this':39 'to':45,73 'type':122 'unintended':43 'uses':58 'which':50,67 'with':35 'you':15,41 'your':20,46" + } + }, + { + "model": "api.finding", + "pk": "01929f57-d331-73f2-b5c2-8e75148b99e7", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "inserted_at": "2024-10-18T11:16:26.033Z", + "updated_at": "2024-10-18T11:16:26.045Z", + "uid": "prowler-aws-account_security_contact_information_is_registered-112233445566-us-east-1-112233445566", + "delta": "new", + "status": "MANUAL", + "status_extended": "Login to the AWS Console. Choose your account name on the top right of the window -> My Account -> Alternate Contacts -> Security Section.", + "severity": "medium", + "impact": "medium", + "impact_extended": null, + "raw_result": {}, + "tags": {}, + "check_id": "account_security_contact_information_is_registered", + "check_metadata": { + "risk": "AWS provides customers with the option of specifying the contact information for accounts security team. It is recommended that this information be provided. Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them.", + "notes": "", + "checkid": "account_security_contact_information_is_registered", + "provider": "aws", + "severity": "medium", + "checktype": [ + "IAM" + ], + "dependson": [], + "relatedto": [], + "categories": [], + "checktitle": "Ensure security contact information is registered.", + "compliance": null, + "relatedurl": "", + "description": "Ensure security contact information is registered.", + "remediation": { + "code": { + "cli": "No command available.", + "other": "https://docs.prowler.com/checks/aws/iam-policies/iam_19#aws-console", + "nativeiac": "", + "terraform": "" + }, + "recommendation": { + "url": "https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html", + "text": "Go to the My Account section and complete alternate contacts." + } + }, + "servicename": "account", + "checkaliases": [], + "resourcetype": "Other", + "subservicename": "", + "resourceidtemplate": "arn:partition:access-recorder:region:account-id:recorder/resource-id" + }, + "scan": "01929f57-c0ee-7553-be0b-cbde006fb6f7", + "text_search": "'/accounts/latest/reference/manage-acct-update-contact.html':113 '/checks/aws/iam-policies/iam_19#aws-console':109 'access':133 'access-recorder':132 'account':8B,18B,76,119,126,137 'account-id':136 'accounts':35 'advisories':57 'altern':19B 'alternate':123 'and':121 'arn':130 'available':105 'aw':4B 'aws':23,60,83 'be':44 'best':69 'by':59 'choos':6B 'command':104 'complete':122 'consol':5B 'contact':20B,32,50,78,91,98 'contacts':124 'customers':25 'docs.aws.amazon.com':112 'docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html':111 'docs.prowler.com':108 'docs.prowler.com/checks/aws/iam-policies/iam_19#aws-console':107 'ensure':54,89,96 'equipped':70 'for':34 'go':115 'help':53 'iam':87 'id':138 'in':64 'information':33,43,51,79,92,99 'is':39,68,80,93,100 'it':38 'login':1B 'medium':85 'my':118 'name':9B 'no':103 'of':29 'option':28 'organization':66 'other':128 'partition':131 'provided':45 'provides':24 'reach':61 'recommended':40 'recorder':134 'recorder/resource-id':139 'region':135 'registered':81,94,101 'respond':72 'right':13B 'section':22B,120 'secur':21B 'security':36,48,56,77,90,97 'security-specific':47 'sent':58 'specific':49 'specifying':30,46 'team':37,63 'that':41,55,67 'the':27,31,62,117 'them':74 'this':42 'to':71,73,116 'top':12B 'will':52 'window':16B 'with':26 'your':65" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "00841d38-1319-4fe4-b38e-4f00bf6246d5", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "372932f0-e4df-4968-9721-bb4f6236fae4", + "finding": "01929f57-cd46-7ff5-800f-483b7ee71cd6" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "013595e9-0388-4a65-950b-fab01c2a4c68", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "78bd2a52-82f9-45df-90a9-4ad78254fdc4", + "finding": "01929f3c-0a05-7426-980d-bdfeeb70a008" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "0943d22d-db70-47a1-b8a2-0a1f6925b16b", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "8ca0a188-5699-436e-80fd-e566edaeb259", + "finding": "01929f57-cd76-7817-aab2-c283b44082ec" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "0f508071-6989-41da-aec9-23934eb4d9b0", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "372932f0-e4df-4968-9721-bb4f6236fae4", + "finding": "01929f3c-0936-7cef-b923-55639a76763a" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "0fbc8dd3-c505-4804-887c-16026632c8e7", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "ba108c01-bcad-44f1-b211-c1d8985da89d", + "finding": "01929f3c-0a3b-784a-930e-e363d60f3586" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "214b1027-292b-4e04-93f9-72acb2784345", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "3a37d124-7637-43f6-9df7-e9aa7ef98c53", + "finding": "01929f3c-09b8-757a-89c2-411e0c7309d4" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "24496c6f-b0ee-4dd5-b566-77283c3d625c", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "e37bb1f1-1669-4bb3-be86-e3378ddfbcba", + "finding": "01929f57-cd7d-740c-921d-9400d1fde3e2" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "24a5a1bc-f211-4074-b687-61c994d48ea3", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "8fe4514f-71d7-46ab-b0dc-70cef23b4d13", + "finding": "01929f57-cd84-7154-aaf3-3d57bc9fec6c" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "25161d3f-bf2c-43ce-821b-237456f1846b", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "17ce30a3-6e77-42a5-bb08-29dfcad7396a", + "finding": "01929f57-cd57-7560-b55e-ad2e7d660509" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "254d9f93-cae5-4aee-a063-9c30e9e1fce5", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "9ab35225-dc7c-4ebd-bbc0-d81fb5d9de77", + "finding": "01929f57-cd67-79ba-af2f-ce6d0fd3c846" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "2d492826-662d-4f0b-8545-ac374d433399", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "dc6cfb5d-6835-4c7b-9152-c18c734a6eaa", + "finding": "01929f3c-09f5-79f9-9ee7-17c87e43d87b" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "2e6d8623-5656-4118-ad85-128ed9f22edb", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "3c49318e-03c6-4f12-876f-40451ce7de3d", + "finding": "01929f3c-0a17-7694-9920-7233a71fcdbe" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "324d5b45-7975-4792-a8f2-3a66c87d2542", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "0234477d-0b8e-439f-87d3-ce38dff3a434", + "finding": "01929f3c-0917-75ff-ba43-08b857227015" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "55c94cc1-65db-4e6d-b4ae-9461d067e73c", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "3c49318e-03c6-4f12-876f-40451ce7de3d", + "finding": "01929f57-cdb8-72c4-923a-13dbebc6347b" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "596ac4dd-f10f-4dfb-b1d3-47a5f9a831c8", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "8ca0a188-5699-436e-80fd-e566edaeb259", + "finding": "01929f3c-0990-7a50-a72e-e6686cd74116" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "61e8d4c2-a043-47cc-b959-3d412830b637", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "e37bb1f1-1669-4bb3-be86-e3378ddfbcba", + "finding": "01929f3c-099d-7d97-8c57-34ee4740c9e5" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "62e9ed60-c79d-44fa-b2ea-357185b1ef26", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "dc6cfb5d-6835-4c7b-9152-c18c734a6eaa", + "finding": "01929f57-cda9-716d-9824-9bdaf894ba46" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "6946200f-10b5-469b-95df-bee6369a38ef", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "78bd2a52-82f9-45df-90a9-4ad78254fdc4", + "finding": "01929f57-cdb0-7eef-84d9-13ff9bd5405d" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "6ab286d0-e155-4ca1-a6eb-696abb240014", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "e1929daa-a984-4116-8131-492a48321dba", + "finding": "01929f3c-09e4-70a7-949d-aba3a0f93fb1" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "6fda097d-0541-4dfc-b966-7a83346e1b73", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "430bf313-8733-4bc5-ac70-5402adfce880", + "finding": "01929f57-cd93-72a1-82bb-6247e5b05f5c" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "71d037ac-9fc6-44d3-8684-3e561b3161a8", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "7973e332-795e-4a74-b4d4-a53a21c98c80", + "finding": "01929f3c-0965-7290-9b5d-f4a84e26feb0" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "738db92d-87bb-4f44-b00c-111308327167", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "0234477d-0b8e-439f-87d3-ce38dff3a434", + "finding": "01929f57-cd29-7499-80c3-18cfa227c7b6" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "746abf14-2c00-41c5-9017-9f9c63596a0d", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "1f9de587-ba5b-415a-b9b0-ceed4c6c9f32", + "finding": "01929f57-cdbf-7d43-b0f0-c82b8c0f6bca" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "7513f701-68b2-4737-8781-9bff9fee83aa", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "e1929daa-a984-4116-8131-492a48321dba", + "finding": "01929f57-cda2-7bb6-b303-ccf5c68e3b7e" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "7f044f9f-f665-4d58-a3f3-c21388221653", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "430bf313-8733-4bc5-ac70-5402adfce880", + "finding": "01929f3c-09c7-71c9-a483-4d207cd464b7" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "81b13739-f130-4617-b944-1fcc4e319c1a", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "9ab35225-dc7c-4ebd-bbc0-d81fb5d9de77", + "finding": "01929f3c-0973-75c7-8bb3-d7f73491dbd2" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "99d3ad31-0f49-4a97-8c1e-4d57a8cb284b", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "ba108c01-bcad-44f1-b211-c1d8985da89d", + "finding": "01929f57-cdc6-70e9-b025-30f1f8b5efbe" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "b2285b6f-8b1b-44f3-b4b2-c1e15dd72fa9", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "1f9de587-ba5b-415a-b9b0-ceed4c6c9f32", + "finding": "01929f3c-0a2a-7ba7-8e43-26cb937c8df7" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "b97a46b3-a1a5-4c00-a9fc-d7788b1b977d", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "8fe4514f-71d7-46ab-b0dc-70cef23b4d13", + "finding": "01929f3c-09ab-728a-a4a9-5a9a0693b0c1" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "d07f4f57-4a98-4ee0-82ed-c6f22499ea2d", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "29b35668-6dad-411d-bfec-492311889892", + "finding": "01929f57-cd9a-7574-a8bd-2eb085c1c1d4" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "d6ae9224-4197-48ec-bdef-c8c8b1d5d0b0", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "9be26c1d-adf0-4ba8-9ca9-c740f4a0dc4e", + "finding": "01929f3c-0944-7bcf-8fe4-65df82f0de3a" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "d7375f9d-2741-42c1-841e-daf8d777fb57", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "e0664164-cfda-44a4-b743-acee1c69386c", + "finding": "01929f3c-0981-71ff-8b66-1b23f7cdd1f8" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "dca80342-2431-4eda-b083-58aa7627b2c7", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "29b35668-6dad-411d-bfec-492311889892", + "finding": "01929f3c-09d5-7fe3-bb36-19e0c1b90a9d" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "dd37c0f2-174c-4b41-8655-67f70ad1f77b", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "9be26c1d-adf0-4ba8-9ca9-c740f4a0dc4e", + "finding": "01929f57-cd4e-75db-a9f5-1c95776cead6" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "e185e5bc-31cf-4a1e-b587-1ed909721590", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "17ce30a3-6e77-42a5-bb08-29dfcad7396a", + "finding": "01929f3c-0958-7e15-8cac-0df0a67fd3a6" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "e7c09b00-a6c4-4f9a-a685-d5f5bc586d94", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "30505514-01d4-42bb-8b0c-471bbab27460", + "finding": "01929f57-d331-73f2-b5c2-8e75148b99e7" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "eab724f9-bca4-4ed2-81e9-470bcf735c7e", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "3a37d124-7637-43f6-9df7-e9aa7ef98c53", + "finding": "01929f57-cd8b-79ee-97a2-66fb8d53bcfc" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "f159cb58-10df-4b9d-88d4-09de62888e14", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "e0664164-cfda-44a4-b743-acee1c69386c", + "finding": "01929f57-cd70-79ec-a176-9d606bdf68fb" + } + }, + { + "model": "api.resourcefindingmapping", + "pk": "f3d0d704-78e1-4329-af60-ade4dac73a8d", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "resource": "7973e332-795e-4a74-b4d4-a53a21c98c80", + "finding": "01929f57-cd60-7a7d-ba1b-37affc11176a" + } + } +] diff --git a/api/src/backend/api/fixtures/dev/6_dev_rbac.json b/api/src/backend/api/fixtures/dev/6_dev_rbac.json new file mode 100644 index 0000000000..38917e7546 --- /dev/null +++ b/api/src/backend/api/fixtures/dev/6_dev_rbac.json @@ -0,0 +1,62 @@ +[ + { + "model": "api.providergroup", + "pk": "3fe28fb8-e545-424c-9b8f-69aff638f430", + "fields": { + "name": "first_group", + "inserted_at": "2024-11-13T11:36:19.503Z", + "updated_at": "2024-11-13T11:36:19.503Z", + "tenant": "12646005-9067-4d2a-a098-8bb378604362" + } + }, + { + "model": "api.providergroup", + "pk": "525e91e7-f3f3-4254-bbc3-27ce1ade86b1", + "fields": { + "name": "second_group", + "inserted_at": "2024-11-13T11:36:25.421Z", + "updated_at": "2024-11-13T11:36:25.421Z", + "tenant": "12646005-9067-4d2a-a098-8bb378604362" + } + }, + { + "model": "api.providergroup", + "pk": "481769f5-db2b-447b-8b00-1dee18db90ec", + "fields": { + "name": "third_group", + "inserted_at": "2024-11-13T11:36:37.603Z", + "updated_at": "2024-11-13T11:36:37.603Z", + "tenant": "12646005-9067-4d2a-a098-8bb378604362" + } + }, + { + "model": "api.providergroupmembership", + "pk": "13625bd3-f428-4021-ac1b-b0bd41b6e02f", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "provider": "1b59e032-3eb6-4694-93a5-df84cd9b3ce2", + "provider_group": "3fe28fb8-e545-424c-9b8f-69aff638f430", + "inserted_at": "2024-11-13T11:55:17.138Z" + } + }, + { + "model": "api.providergroupmembership", + "pk": "54784ebe-42d2-4937-aa6a-e21c62879567", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "provider_group": "3fe28fb8-e545-424c-9b8f-69aff638f430", + "inserted_at": "2024-11-13T11:55:17.138Z" + } + }, + { + "model": "api.providergroupmembership", + "pk": "c8bd52d5-42a5-48fe-8e0a-3eef154b8ebe", + "fields": { + "tenant": "12646005-9067-4d2a-a098-8bb378604362", + "provider": "15fce1fa-ecaa-433f-a9dc-62553f3a2555", + "provider_group": "525e91e7-f3f3-4254-bbc3-27ce1ade86b1", + "inserted_at": "2024-11-13T11:55:41.237Z" + } + } +] diff --git a/api/src/backend/api/fixtures/dev/7_dev_compliance.json b/api/src/backend/api/fixtures/dev/7_dev_compliance.json new file mode 100644 index 0000000000..90a4bcd6be --- /dev/null +++ b/api/src/backend/api/fixtures/dev/7_dev_compliance.json @@ -0,0 +1 @@ +[{"model": "api.complianceoverview","pk": "07d0c342-abcb-4d91-b865-88f9c96adbfc","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "cisa_aws","framework": "CISA","version": "","description": "Cybersecurity & Infrastructure Security Agency's (CISA) Cyber Essentials is a guide for leaders of small businesses as well as leaders of small and local government agencies to develop an actionable understanding of where to start implementing organizational cybersecurity practices.","region": "eu-west-1","requirements": {"your-data-1": {"name": "Your Data-1","checks": {"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"sagemaker_training_jobs_volume_and_output_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "your-data-1","Section": "your data","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Learn how your data is protected.","checks_status": {"fail": 4,"pass": 3,"total": 13,"manual": 0}},"your-data-2": {"name": "Your Data-2","checks": {"elb_ssl_listeners": "FAIL","elb_logging_enabled": "FAIL","elbv2_ssl_listeners": "FAIL","vpc_flow_logs_enabled": "FAIL","ec2_instance_public_ip": "FAIL","elbv2_waf_acl_attached": "FAIL","efs_have_backup_enabled": "FAIL","s3_bucket_public_access": null,"s3_bucket_acl_prohibited": "FAIL","ec2_ebs_volume_encryption": "PASS","rds_snapshots_public_access": "PASS","s3_bucket_default_encryption": "PASS","rds_instance_no_public_access": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"redshift_cluster_public_access": null,"cloudtrail_multi_region_enabled": "PASS","acm_certificates_expiration_check": "PASS","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_logging_enabled": "PASS","s3_bucket_policy_public_write_access": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","awslambda_function_not_publicly_accessible": "PASS","dynamodb_tables_kms_cmk_encryption_enabled": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","cloudwatch_log_group_kms_encryption_enabled": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_cloudwatch_logging_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","opensearch_service_domains_node_to_node_encryption_enabled": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_training_jobs_volume_and_output_encryption_enabled": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "your-data-2","Section": "your data","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Learn what is happening on your network, manage network and perimeter components, host and device components, data-at-rest and in-transit, and user behavior activities.","checks_status": {"fail": 18,"pass": 11,"total": 49,"manual": 0}},"your-data-3": {"name": "Your Data-3","checks": {"elbv2_waf_acl_attached": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "your-data-3","Section": "your data","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Domain name system protection.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"your-data-4": {"name": "Your Data-4","checks": {"efs_have_backup_enabled": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"rds_instance_deletion_protection": "FAIL","redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "your-data-4","Section": "your data","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Establish regular automated backups and redundancies of key systems.","checks_status": {"fail": 4,"pass": 1,"total": 8,"manual": 0}},"your-data-5": {"name": "Your Data-5","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "your-data-5","Section": "your data","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Leverage protections for backups, including physical security, encryption and offline copies.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"your-systems-1": {"name": "Your Systems-1","checks": {"ec2_elastic_ip_unassigned": "FAIL","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","ec2_instance_older_than_specific_days": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "your-systems-1","Section": "your systems","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Learn what is on your network. Maintain inventories of hardware and software assets to know what is in play and at-risk from attack.","checks_status": {"fail": 4,"pass": 0,"total": 4,"manual": 0}},"your-systems-2": {"name": "Your Systems-2","checks": {"ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null,"rds_instance_minor_version_upgrade_enabled": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "your-systems-2","Section": "your systems","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Leverage automatic updates for all operating systems and third-party software.","checks_status": {"fail": 1,"pass": 1,"total": 3,"manual": 0}},"your-systems-3": {"name": "Your Systems-3","checks": {"elb_ssl_listeners": "FAIL","elb_logging_enabled": "FAIL","elbv2_ssl_listeners": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","iam_root_mfa_enabled": null,"rds_instance_multi_az": "FAIL","ec2_instance_public_ip": "FAIL","elbv2_waf_acl_attached": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","efs_have_backup_enabled": "FAIL","s3_bucket_public_access": null,"kms_cmk_rotation_enabled": null,"ec2_ebs_volume_encryption": "PASS","elbv2_deletion_protection": "FAIL","iam_user_accesskey_unused": null,"ec2_ebs_default_encryption": "PASS","iam_password_policy_number": null,"iam_password_policy_symbol": null,"rds_instance_backup_enabled": "PASS","rds_snapshots_public_access": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"s3_bucket_default_encryption": "PASS","iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"rds_instance_no_public_access": "PASS","efs_encryption_at_rest_enabled": "FAIL","iam_user_console_access_unused": null,"rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"redshift_cluster_public_access": null,"cloudtrail_multi_region_enabled": "PASS","rds_instance_deletion_protection": "FAIL","cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_logging_enabled": "PASS","apigateway_restapi_waf_acl_attached": "FAIL","iam_user_mfa_enabled_console_access": null,"redshift_cluster_automated_snapshot": null,"s3_bucket_policy_public_write_access": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"emr_cluster_master_nodes_no_public_ip": null,"iam_password_policy_minimum_length_14": null,"s3_account_level_public_access_blocks": null,"cloudtrail_log_file_validation_enabled": "FAIL","cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","secretsmanager_automatic_rotation_enabled": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","vpc_endpoint_connections_trust_boundaries": "FAIL","awslambda_function_not_publicly_accessible": "PASS","dynamodb_tables_kms_cmk_encryption_enabled": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","cloudwatch_log_group_kms_encryption_enabled": "FAIL","codebuild_project_user_controlled_buildspec": "PASS","apigateway_restapi_client_certificate_enabled": "FAIL","iam_inline_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_encryption_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_aws_attached_policy_no_administrative_privileges": null,"opensearch_service_domains_cloudwatch_logging_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"iam_customer_attached_policy_no_administrative_privileges": null,"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","opensearch_service_domains_node_to_node_encryption_enabled": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_training_jobs_volume_and_output_encryption_enabled": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "your-systems-3","Section": "your systems","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement security configurations for all hardware and software assets.","checks_status": {"fail": 25,"pass": 16,"total": 84,"manual": 0}},"your-surroundings-1": {"name": "Your Surroundings-1","checks": {"vpc_flow_logs_enabled": "FAIL","ec2_elastic_ip_unassigned": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "your-surroundings-1","Section": "your surroundings","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Learn who is on your network. Maintain inventories of network connections (user accounts, vendors, business partners, etc.).","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"your-surroundings-2": {"name": "Your Surroundings-2","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "your-surroundings-2","Section": "your surroundings","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Leverage multi-factor authentication for all users, starting with privileged, administrative and remote access users.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"your-surroundings-3": {"name": "Your Surroundings-3","checks": {"elbv2_ssl_listeners": "FAIL","iam_no_root_access_key": null,"iam_inline_policy_no_administrative_privileges": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "your-surroundings-3","Section": "your surroundings","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Grant access and admin permissions based on need-to-know and least privilege.","checks_status": {"fail": 1,"pass": 0,"total": 6,"manual": 0}},"your-surroundings-4": {"name": "Your Surroundings-4","checks": {"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "your-surroundings-4","Section": "your surroundings","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Leverage unique passwords for all user accounts.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"your-crisis-response-2": {"name": "Your Crisis Response-2","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "your-crisis-response-2","Section": "your crisis response","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Lead development of an internal reporting structure to detect, communicate and contain attacks.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"booting-up-thing-to-do-first-1": {"name": "YBooting Up: Things to Do First-1","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "booting-up-thing-to-do-first-1","Section": "booting up thing to do first","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Lead development of an internal reporting structure to detect, communicate and contain attacks.","checks_status": {"fail": 2,"pass": 1,"total": 7,"manual": 0}},"booting-up-thing-to-do-first-2": {"name": "YBooting Up: Things to Do First-2","checks": {"iam_root_mfa_enabled": null,"iam_user_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "booting-up-thing-to-do-first-2","Section": "booting up thing to do first","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Require multi-factor authentication (MFA) for accessing your systems whenever possible. MFA should be required of all users, but start with privileged, administrative, and remote access users.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"booting-up-thing-to-do-first-3": {"name": "YBooting Up: Things to Do First-3","checks": {"ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null,"rds_instance_minor_version_upgrade_enabled": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "booting-up-thing-to-do-first-1","Section": "booting up thing to do first","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Enable automatic updates whenever possible. Replace unsupported operating systems, applications and hardware. Test and deploy patches quickly.","checks_status": {"fail": 1,"pass": 1,"total": 3,"manual": 0}}},"requirements_passed": 4,"requirements_failed": 11,"requirements_manual": 1,"total_requirements": 16,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "089cf697-547a-4a34-a811-e7a19b78b9fd","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "aws_foundational_technical_review_aws","framework": "AWS-Foundational-Technical-Review","version": "","description": "The AWS Foundational Technical Review (FTR) assesses an AWS Partner's solution against a specific set of Amazon Web Services (AWS) best practices around security, performance, and operational processes that are most critical for customer success. Passing the FTR is required to qualify AWS Software Partners for AWS Partner Network (APN) programs such as AWS Competency and AWS Service Ready but any AWS Partner who offers a technology solution may request a FTR review through AWS Partner Central.","region": "eu-west-1","requirements": {"S3-001": {"name": "Review all Amazon S3 buckets to determine appropriate access levels","checks": {"s3_bucket_object_lock": "FAIL","s3_bucket_public_access": null,"s3_bucket_acl_prohibited": "FAIL","s3_bucket_kms_encryption": "FAIL","s3_bucket_public_list_acl": null,"s3_bucket_public_write_acl": null,"s3_bucket_default_encryption": "PASS","s3_bucket_secure_transport_policy": "FAIL","s3_bucket_level_public_access_block": "PASS","s3_bucket_policy_public_write_access": "PASS","s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "You must ensure that buckets that require public access have been reviewed to determine if public read or write access is needed and if appropriate controls are in place to control public access. When assigning access permissions, follow the principle of least privilege, an AWS best practice. For more information, refer to overview of managing access.","checks_status": {"fail": 5,"pass": 3,"total": 11,"manual": 0}},"ARC-001": {"name": "Use root user only by exception","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "The root user has unlimited access to your account and its resources, and using it only by exception helps protect your AWS resources. The AWS root user must not be used for everyday tasks, even administrative ones. Instead, adhere to the best practice of using the root user only to create your first AWS Identity and Access Management (IAM) user. Then securely lock away the root user credentials and use them to perform only a few accounts and service management tasks. To view the tasks that require you to sign in as the root user, see AWS Tasks That Require Root User. FTR does not require you to actively monitor root usage.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"ARC-003": {"name": "Enable multi-factor authentication (MFA) on the root user for all AWS accounts","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Enabling MFA provides an additional layer of protection against unauthorized access to your account. To configure MFA for the root user, follow the instructions for enabling either a virtual MFA or hardware MFA device. If you are using AWS Organizations to create new accounts, the initial password for the root user is set to a random value that is never exposed to you. If you do not recover the password for the root user of these accounts, you do not need to enable MFA on them. For any accounts where you do have access to the root userโ€™s password, you must enable MFA","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"ARC-004": {"name": "Remove access keys for the root user","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Programmatic access to AWS APIs should never use the root user. It is best not to generate static an access key for the root user. If one already exists, you should transition any processes using that key to use temporary access keys from an AWS Identity and Access Management (IAM) role, or, if necessary, static access keys from an IAM user.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ARC-005": {"name": "Develop incident management plans","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "An incident management plan is critical to respond, mitigate, and recover from the potential impact of security incidents. An incident management plan is a structured process for identifying, remediating, and responding in a timely matter to security incidents. An effective incident management plan must be continually iterated upon, remaining current with your cloud operations goal. For more information on developing incident management plan please see Develop incident management plans.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"BAR-001": {"name": "Configure automatic data backups","checks": {"backup_plans_exist": "PASS","backup_vaults_exist": null,"backup_vaults_encrypted": "PASS","efs_have_backup_enabled": "FAIL","backup_reportplans_exist": null,"rds_instance_backup_enabled": "PASS"},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "You must perform regular backups to a durable storage service. Backups ensure that you have the ability to recover from administrative, logical, or physical error scenarios. Configure backups to be taken automatically based on a periodic schedule, or by changes in the dataset. RDS instances, EBS volumes, DynamoDB tables, and S3 objects can all be configured for automatic backup. AWS Backup, AWS Marketplace solutions or third-party solutions can also be used. If objects in S3 bucket are write-once-read-many (WORM), compensating controls such as object lock can be used meet this requirement. If it is customersโ€™ responsibility to backup their data, it must be clearly stated in the documentation and the Partner must provide clear instructions on how to backup the data.","checks_status": {"fail": 1,"pass": 3,"total": 6,"manual": 0}},"BAR-002": {"name": "Periodically recover data to verify the integrity of your backup process","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "To confirm that your backup process meets your recovery time objectives (RTO) and recovery point objectives (RPO), run a recovery test on a regular schedule and after making significant changes to your cloud environment. For more information, refer to Getting Started - Backup and Restore with AWS.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"CAA-001": {"name": "Use cross-account roles to access customer AWS accounts","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Cross-account roles reduce the amount of sensitive information AWS Partners need to store for their customers.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"CAA-002": {"name": "Use an external ID with cross-account roles to access customer accounts","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "An external ID allows the user that is assuming the role to assert the circumstances in which they are operating. It also provides a way for the account owner to permit the role to be assumed only under specific circumstances. The primary function of the external ID is to address and prevent the confused deputy problem.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"CAA-003": {"name": "Deprecate any historical use of customer-provided IAM credentials","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "If your application provides legacy support for the use of static IAM credentials for cross-account access, the application's user interface and customer documentation must make it clear that this method is deprecated. Existing customers should be encouraged to switch to cross-account role based-access, and collection of credentials should be disabled for new customers.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"CAA-004": {"name": "Use a value you generate (not something provided by the customer) for the external ID","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "When configuring cross-account access using IAM roles, you must use a value you generate for the external ID, instead of one provided by the customer, to ensure the integrity of the cross-account role configuration. A partner-generated external ID ensures that malicious parties cannot impersonate a customer's configuration and enforces uniqueness and format consistency across all customers. If you are not generating an external ID today we recommend implementing a process that generates a random unique value (such as a Universally Unique Identifier) for the external ID that a customer uses to set up a cross-account role.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"CAA-005": {"name": "Ensure that all external IDs are unique.","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "The external IDs used must be unique across all customers. Re-using external IDs for different customers does not solve the confused deputy problem and runs the risk of customer A being able to view data of customer B by using the role ARN and the external ID of customer B. To resolve this, we recommend implementing a process that ensures a random unique value, such as a Universally Unique Identifier, is generated for the external ID that a customer would use to setup a cross account role.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"CAA-006": {"name": "Provide read-only access to external ID to customers","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Customers must not be able to set or influence external IDs. When the external ID is editable, it is possible for one customer to impersonate the configuration of another. For example, when the external ID is editable, customer A can create a cross account role setup using customer Bโ€™s role ARN and external ID, granting customer A access to customer Bโ€™s data. Remediation of this item involves making the external ID a view-only field, ensuring that the external ID cannot be changed to impersonate the setup of another customer.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"CAA-007": {"name": "Provide guidance or an automated setup mechanism (for example, an AWS CloudFormation template) for creating cross-account roles with the minimum required privileges","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "The policy created for cross-account access in customer accounts must follow the principle of least privilege. The AWS Partner must provide a role-policy document or an automated setup mechanism (for example, an AWS CloudFormation template) for the customers to use to ensure that the roles are created with minimum required privileges. For more information, refer to the AWS Partner Network (APN) blog posts.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"IAM-001": {"name": "Enable multi-factor authentication (MFA) for all Human Identities with AWS access","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_hardware_mfa_enabled": null,"iam_administrator_access_with_mfa": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "You must require any human identities to authenticate using MFA before accessing your AWS accounts. Typically, this means enabling MFA within your corporate identity provider. If you have existing legacy IAM users you must enable MFA for console access for those principals as well. Enabling MFA for IAM users provides an additional layer of security. With MFA, users have a device that generates a unique authentication code (a one-time password, or OTP). Users must provide both their normal credentials (user name and password) and the OTP. The MFA device can either be a special piece of hardware, or it can be a virtual device (for example, it can run in an app on a smartphone). Please note that machine identities do not require MFA.","checks_status": {"fail": 0,"pass": 0,"total": 5,"manual": 0}},"IAM-002": {"name": "Monitor and secure static AWS Identity and Access Management (IAM) credentials","checks": {"guardduty_is_enabled": "PASS","iam_user_accesskey_unused": null,"iam_rotate_access_key_90_days": null,"guardduty_no_high_severity_findings": "FAIL","iam_user_with_temporary_credentials": null},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Use temporary IAM credentials retrieved by assuming a role whenever possible. In cases where it is infeasible to use IAM roles, implement the following controls to reduce the risk these credentials are misused: Rotate IAM access keys regularly (recommended at least every 90 days). Maintain an inventory of all static keys and where they are used and remove unused access keys. Implement monitoring of AWS CloudTrail logs to detect anomalous activity or other potential misuse (e.g. using AWS GuardDuty.) Define a runbook or SOP for revoking credentials in the event you detect misuse.","checks_status": {"fail": 1,"pass": 1,"total": 5,"manual": 0}},"IAM-003": {"name": "Use strong password policy","checks": {"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_reuse_24": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_password_policy_minimum_length_14": null,"iam_password_policy_expires_passwords_within_90_days_or_less": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Enforce a strong password policy, and educate users to avoid common or re-used passwords. For IAM users, you can create a password policy for your account on the Account Settings page of the IAM console. You can use the password policy to define password requirements, such as minimum length and whether it requires non-alphabetic characters, and so on. For more information, see Setting an Account Password Policy for IAM users.","checks_status": {"fail": 0,"pass": 0,"total": 7,"manual": 0}},"IAM-004": {"name": "Create individual identities (no shared credentials) for anyone who needs AWS access","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Create individual entities and give unique security credentials and permissions to each user accessing your account. With individual entities and no shared credentials, you can audit the activity of each user.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"IAM-005": {"name": "Use IAM roles and its temporary security credentials to provide access to third parties.","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Do not provision IAM users and share those credentials with people outside of your organization. Any external services that need to make AWS API calls against your account (for example, a monitoring solution that accesses your account's AWS CloudWatch metrics) must use a cross-account role. For more information, refer to Providing access to AWS accounts owned by third parties.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"IAM-006": {"name": "Grant least privilege access","checks": {"iam_policy_attached_only_to_group_or_roles": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "You must follow the standard security advice of granting least privilege. Grant only the access that identities require by allowing access to specific actions on specific AWS resources under specific conditions. Rely on groups and identity attributes to dynamically set permissions at scale, rather than defining permissions for individual users. For example, you can allow a group of developers access to manage only resources for their project. This way, when a developer is removed from the group, access for the developer is revoked everywhere that group was used for access control, without requiring any changes to the access policies.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"IAM-007": {"name": "Manage access based on life cycle","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Integrate access controls with operator and application lifecycle and your centralized federation provider and IAM. For example, remove a userโ€™s access when they leave the organization or change roles.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"IAM-008": {"name": "Audit identities quarterly","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Auditing the identities that are configured in your identity provider and IAM helps ensure that only authorized identities have access to your workload. For example, remove people that leave the organization, and remove cross-account roles that are no longer required. Have a process in place to periodically audit permissions to the services accessed by an IAM entity. This helps you identify the policies you needto modify to remove any unused permissions. For more information, see Refining permissions in AWS using last accessed information.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"IAM-009": {"name": "Do not embed credentials in application code","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Ensure that all credentials used by your applications (for example, IAM access keys and database passwords) are never included in your application's source code or committed to source control in any way.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"RES-001": {"name": "Define a Recovery Point Objective (RPO)","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "To confirm that your backup process meets your recovery time objectives (RTO) and recovery point objectives (RPO), run a recovery test on a regular schedule and after making significant changes to your cloud environment. For more information, refer to Getting Started - Backup and Restore with AWS.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"RES-002": {"name": "Establish a Recovery Time Objective (RTO)","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Define an RTO that meets your organizationโ€™s needs and expectations. RTO is the maximum acceptable delay your organization will accept between the interruption and restoration of service.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"RES-004": {"name": "Resiliency Testing","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Test resiliency to ensure that RTO and RPO are met, both periodically (minimum every 12 months) and after major updates. The resiliency test must include accidental data loss, instance failures, and Availability Zone (AZ) failures. At least one resilience test that meets RTO and RPO requirements must be completed prior to FTR approval. You can use AWS Resilience Hub to test and verify your workloads to see if it meets its resilience target. AWS Resilience Hub works with AWS Fault Injection Service (AWS FIS) , a chaos engineering service, to provide fault-injection simulations of real-world failures to validate the application recovers within the resilience targets you defined. AWS Resilience Hub also provides API operations for you to integrate its resilience assessment and testing into your CI/CD pipelines for ongoing resilience validation. Including resilience validation in CI/CD pipelines helps make sure that changes to the workloadโ€™s underlying infrastructure don't compromise resilience.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"RES-005": {"name": "Communicate customer responsibilities for resilience","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Clearly define your customersโ€™ responsibility for backup, recovery, and availability. At a minimum, your product documentation or customer agreements should cover the following: Responsibility the customer has for backing up the data stored in your solution. Instructions for backing up data or configuring optional features in your product for data protection, if applicable. Options customers have for configuring the availability of your product.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"RES-006": {"name": "Architect your product to meet availability targets and uptime service level agreements (SLAs)","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "If you publish or privately agree to availability targets or uptime SLAs, ensure that your architecture and operational processes are designed to support them. Additionally, provide clear guidance to customers on any configuration required to achieve the targets or SLAs.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"RES-007": {"name": "Define a customer communication plan for outages","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Establish a plan for communicating information about system outages to your customers both during and after incidents. Your communication should not include any data that was provided by AWS under a non-disclosure agreement (NDA).","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SUP-001": {"name": "Subscribe to the AWS Business Support tier (or higher) for all production AWS accounts or have an action plan to handle issues which require help from AWS Support","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Partner-hosted FTR requirements","Service": null,"SubGroup": null,"SubSection": null}],"description": "It is recommended that you subscribe to the AWS Business Support tier or higher (including AWS Partner-Led Support) for all of your AWS production accounts. For more information, refer to Compare AWS Support Plans. If you don't have premium support, you must have an action plan to handle issues which require help from AWS Support. AWS Support provides a mix of tools and technology, people, and programs designed to proactively help you optimize performance, lower costs, and innovate faster. AWS Business Support provides additional benefits including access to AWS Trusted Advisor and AWS Personal Health Dashboard and faster response times.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"ACOM-001": {"name": "Configure AWS account contacts","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "If an account is not managed by AWS Organizations, alternate account contacts help AWS get in contact with the appropriate personnel if needed. Configure the accountโ€™s alternate contacts to point to a group rather than an individual. For example, create separate email distribution lists for billing, operations, and security and configure these as Billing, Security, and Operations contacts in each active AWS account. This ensures that multiple people will receive AWS notifications and be able to respond, even if someone is on vacation, changes roles, or leaves the company.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"ACOM-002": {"name": "Set account contact information including the root user email address to email addresses and phone numbers owned by your company","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Using company owned email addresses and phone numbers for contact information enables you to access them even if the individuals whom they belong to are no longer with your organization","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"HOST-001": {"name": "Confirm your hosting model","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Partner-hosted FTR requirements","Service": null,"SubGroup": null,"SubSection": null}],"description": "To use this FTR checklist you must host all critical application components on AWS. You may use external providers for edge services such as content delivery networks (CDNs) or domain name system (DNS), or corporate identity providers. If you are using any edge services outside AWS, please specify them in the self-assessment.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"IAM-0010": {"name": "Store secrets securely.","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Encrypt all secrets in transit and at rest, define fine-grained access controls that only allow access to specific identities, and log access to secrets in an audit log. We recommend you use a purpose-built secret management service such as AWS Secrets Manager, AWS Systems Manager Parameter Store, or an AWS Partner solution, but internally developed solutions that meet these requirements are also acceptable.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"IAM-0011": {"name": "Encrypt all end user/customer credentials and hash passwords at rest.","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "If you are storing end user/customer credentials in a database that you manage, encrypt credentials at rest and hash passwords. As an alternative, AWS recommends using a user-identity synchronization service, such as Amazon Cognito or an equivalent AWS Partner solution.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"IAM-0012": {"name": "Use temporary credentials","checks": {"iam_root_mfa_enabled": null,"iam_user_accesskey_unused": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_hardware_mfa_enabled": null,"iam_administrator_access_with_mfa": null,"iam_role_administratoraccess_policy": null,"iam_user_mfa_enabled_console_access": null,"iam_user_with_temporary_credentials": null,"iam_policy_attached_only_to_group_or_roles": null,"iam_role_cross_account_readonlyaccess_policy": null,"iam_role_cross_service_confused_deputy_prevention": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Use temporary security credentials to access AWS resources. For machine identities within AWS (for example, Amazon Elastic Compute Cloud (Amazon EC2) instances or AWS Lambda functions), always use IAM roles to acquire temporary security credentials. For machine identities running outside of AWS, use IAM Roles Anywhere or securely store static AWS access keys that are only used to assume an IAM role.For human identities, use AWS IAM Identity Center or other identity federation solutions where possible. If you must use static AWS access keys for human users, require MFA for all access, including the AWS Management Console, and AWS Command Line Interface (AWS CLI).","checks_status": {"fail": 0,"pass": 0,"total": 13,"manual": 0}},"RCVP-001": {"name": "Establish a process to ensure that all required compliance standards are met","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "If you advertise that your product meets specific compliance standards, you must have an internal process for ensuring compliance. Examples of compliance standards include Payment Card Industry Data Security Standard (PCI DSS) PCI DSS, Federal Risk and Authorization Management Program (FedRAMP)FedRAMP, and U.S. Health Insurance Portability and Accountability Act (HIPAA)HIPAA. Applicable compliance standards are determined by various factors, such as what types of data the solution stores or transmits and which geographic regions the solution supports.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SDAT-001": {"name": "Identify sensitive data (for example, Personally Identifiable Information (PII) and Protected Health Information (PHI))","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Data classification enables you to determine which data needs to be protected and how. Based on the workload and the data it processes, identify the data that is not common public knowledge.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SDAT-002": {"name": "Encrypt all sensitive data at rest","checks": {"athena_workgroup_encryption": null,"efs_encryption_at_rest_enabled": "FAIL","cloudtrail_kms_encryption_enabled": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Encryption maintains the confidentiality of sensitive data even when it gets stolen or the network through which it is transmitted becomes compromised.","checks_status": {"fail": 3,"pass": 0,"total": 7,"manual": 0}},"SDAT-003": {"name": "Only use protocols with encryption when transmitting sensitive data outside of your VPC","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Encryption maintains data confidentiality even when the network through which it is transmitted becomes compromised.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"WAFR-001": {"name": "Conduct periodic architecture reviews (minimum once every year)","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Partner-hosted FTR requirements","Service": null,"SubGroup": null,"SubSection": null}],"description": "Conduct periodic architecture reviews of your production workload (at least once per year) using a documented architectural standard that includes AWS-specific best practices. If you have an internally defined standard for your AWS workloads, we recommend you use it for these reviews. If you do not have an internal standard, we recommend you use the AWS Well-Architected Framework.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"WAFR-002": {"name": "Review the AWS Shared Responsibility Models for Security and Resiliency","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Partner-hosted FTR requirements","Service": null,"SubGroup": null,"SubSection": null}],"description": "Review the AWS Shared Responsibility Model for Security and the AWS Shared Responsibility Model for Resiliency. Ensure that your productโ€™s architecture and operational processes address the customer responsibilities defined in these models. We recommend you to use AWS Resilience Hub to ensure your workload resiliency posture meets your targets and to provide you with operational procedures you may use to address the customer responsibilities.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"NETSEC-001": {"name": "Implement the least permissive rules for all Amazon EC2 security groups","checks": {"ec2_ami_public": null,"ec2_instance_public_ip": "FAIL","ec2_securitygroup_not_used": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","ec2_securitygroup_with_many_ingress_egress_rules": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "All Amazon EC2 security groups should restrict access to the greatest degree possible. At a minimum, do the following: Ensure that no security groups allow ingress from 0.0.0.0/0 to port 22 or 3389 (CIS 5.2) Ensure that the default security group of every VPC restricts all traffic (CIS 5.3/Security Control EC2.2)","checks_status": {"fail": 3,"pass": 16,"total": 20,"manual": 0}},"NETSEC-002": {"name": "Restrict resources in public subnets","checks": {"vpc_subnet_no_public_ip_by_default": "FAIL","vpc_subnet_separate_private_public": "FAIL","vpc_endpoint_connections_trust_boundaries": "FAIL","workspaces_vpc_2private_1public_subnets_nat": null,"vpc_endpoint_services_allowed_principals_trust_boundaries": null},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Do not place resources in public subnets of your VPC unless they must receive network traffic from public sources. Public subnets are subnets associated with a route table that has a route to an internet gateway.","checks_status": {"fail": 3,"pass": 0,"total": 5,"manual": 0}},"SECOPS-001": {"name": "Perform vulnerability management","checks": {"inspector2_is_enabled": "FAIL","inspector2_active_findings_exist": "FAIL","guardduty_no_high_severity_findings": "FAIL","accessanalyzer_enabled_without_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "Architectural and Operational Controls","Service": null,"SubGroup": null,"SubSection": null}],"description": "Define a mechanism and frequency to scan and patch for vulnerabilities in your dependencies, and in your operating systems to help protect against new threats. Scan and patch your dependencies, and your operating systems on a defined schedule. Software vulnerability management is essential to keeping your system secure from threat actors. Embedding vulnerability assessments early into your continuous integration/continuous delivery (CI/CD) pipeline allows you to prioritize remediation of any security vulnerabilities detected. The solution you need to achieve this varies according to the AWS services that you are consuming. To check for vulnerabilities in software running in Amazon EC2 instances, you can add Amazon Inspector to your pipeline to cause your build to fail if Inspector detects vulnerabilities. You can also use open source products such as OWASP Dependency-Check, Snyk, OpenVAS, package managers and AWS Partner tools for vulnerability management.","checks_status": {"fail": 4,"pass": 0,"total": 4,"manual": 0}}},"requirements_passed": 6,"requirements_failed": 7,"requirements_manual": 32,"total_requirements": 45,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "1491ce35-3d2b-4cf6-a56d-b18b391d5623","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "nist_800_171_revision_2_aws","framework": "NIST-800-171-Revision-2","version": "","description": "The cybersecurity controls within NIST 800-171 safeguard CUI in the IT networks of government contractors and subcontractors. It defines the practices and procedures that government contractors must adhere to when their networks process or store CUI. NIST 800-171 only applies to those parts of a contractorโ€™s network where CUI is present.","region": "eu-west-1","requirements": {"3_1_1": {"name": "3.1.1 Limit system access to authorized users, processes acting on behalf of authorized users, and devices (including other systems)","checks": {"iam_root_mfa_enabled": null,"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"ec2_instance_profile_attached": "PASS","iam_root_hardware_mfa_enabled": null,"rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"eks_cluster_not_publicly_accessible": null,"iam_user_mfa_enabled_console_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_1_1","Section": "3.1 Access Control","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Access control policies (e.g., identity or role-based policies, control matrices, and cryptography) control access between active entities or subjects (i.e., users or processes acting on behalf of users) and passive entities or objects (e.g., devices, files, records, and domains) in systems. Access enforcement mechanisms can be employed at the application and service level to provide increased information security. Other systems include systems internal and external to the organization. This requirement focuses on account management for systems and applications. The definition of and enforcement of access authorizations, other than those determined by account type (e.g., privileged verses non-privileged) are addressed in requirement 3.1.2.","checks_status": {"fail": 3,"pass": 7,"total": 28,"manual": 0}},"3_1_2": {"name": "3.1.2 Limit system access to the types of transactions and functions that authorized users are permitted to execute","checks": {"iam_root_mfa_enabled": null,"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"ec2_instance_profile_attached": "PASS","iam_root_hardware_mfa_enabled": null,"rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"eks_cluster_not_publicly_accessible": null,"iam_user_mfa_enabled_console_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_1_2","Section": "3.1 Access Control","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Organizations may choose to define access privileges or other attributes by account, by type of account, or a combination of both. System account types include individual, shared, group, system, anonymous, guest, emergency, developer, manufacturer, vendor, and temporary. Other attributes required for authorizing access include restrictions on time-of-day, day-of-week, and point-oforigin. In defining other account attributes, organizations consider system-related requirements (e.g., system upgrades scheduled maintenance,) and mission or business requirements, (e.g., time zone differences, customer requirements, remote access to support travel requirements).","checks_status": {"fail": 3,"pass": 7,"total": 28,"manual": 0}},"3_1_3": {"name": "3.1.3 Control the flow of CUI in accordance with approved authorizations","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"eks_cluster_not_publicly_accessible": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_1_3","Section": "3.1 Access Control","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Information flow control regulates where information can travel within a system and between systems (versus who can access the information) and without explicit regard to subsequent accesses to that information. Flow control restrictions include the following: keeping exportcontrolled information from being transmitted in the clear to the Internet; blocking outside traffic that claims to be from within the organization; restricting requests to the Internet that are not from the internal web proxy server; and limiting information transfers between organizations based on data structures and content. Organizations commonly use information flow control policies and enforcement mechanisms to control the flow of information between designated sources and destinations (e.g., networks, individuals, and devices) within systems and between interconnected systems. Flow control is based on characteristics of the information or the information path. Enforcement occurs in boundary protection devices (e.g., gateways, routers, guards, encrypted tunnels, firewalls) that employ rule sets or establish configuration settings that restrict system services, provide a packetfiltering capability based on header information, or message-filtering capability based on message content (e.g., implementing key word searches or using document characteristics). Organizations also consider the trustworthiness of filtering and inspection mechanisms (i.e., hardware, firmware, and software components) that are critical to information flow enforcement. Transferring information between systems representing different security domains with different security policies introduces risk that such transfers violate one or more domain security policies. In such situations, information owners or stewards provide guidance at designated policy enforcement points between interconnected systems. Organizations consider mandating specific architectural solutions when required to enforce specific security policies. Enforcement includes: prohibiting information transfers between interconnected systems (i.e., allowing access only); employing hardware mechanisms to enforce one-way information flows; and implementing trustworthy regrading mechanisms to reassign security attributes and security labels.","checks_status": {"fail": 3,"pass": 6,"total": 17,"manual": 0}},"3_1_4": {"name": "3.1.4 Separate the duties of individuals to reduce the risk of malevolent activity without collusion","checks": {"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_1_4","Section": "3.1 Access Control","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Separation of duties addresses the potential for abuse of authorized privileges and helps to reduce the risk of malevolent activity without collusion. Separation of duties includes dividing mission functions and system support functions among different individuals or roles; conducting system support functions with different individuals (e.g., configuration management, quality assurance and testing, system management, programming, and network security); and ensuring that security personnel administering access control functions do not also administer audit functions. Because separation of duty violations can span systems and application domains, organizations consider the entirety of organizational systems and system components when developing policy on separation of duties.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"3_1_5": {"name": "3.1.5 Employ the principle of least privilege, including for specific security functions and privileged accounts","checks": {"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_1_5","Section": "3.1 Access Control","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Organizations employ the principle of least privilege for specific duties and authorized accesses for users and processes. The principle of least privilege is applied with the goal of authorized privileges no higher than necessary to accomplish required organizational missions or business functions. Organizations consider the creation of additional processes, roles, and system accounts as necessary, to achieve least privilege. Organizations also apply least privilege to the development, implementation, and operation of organizational systems. Security functions include establishing system accounts, setting events to be logged, setting intrusion detection parameters, and configuring access authorizations (i.e., permissions, privileges). Privileged accounts, including super user accounts, are typically described as system administrator for various types of commercial off-the-shelf operating systems. Restricting privileged accounts to specific personnel or roles prevents day-to-day users from having access to privileged information or functions. Organizations may differentiate in the application of this requirement between allowed privileges for local accounts and for domain accounts provided organizations retain the ability to control system configurations for key security parameters and as otherwise necessary to sufficiently mitigate risk.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"3_1_6": {"name": "3.1.6 Use non-privileged accounts or roles when accessing nonsecurity functions","checks": {"iam_no_root_access_key": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_1_6","Section": "3.1 Access Control","Service": "aws","SubGroup": null,"SubSection": null}],"description": "This requirement limits exposure when operating from within privileged accounts or roles. The inclusion of roles addresses situations where organizations implement access control policies such as role-based access control and where a change of role provides the same degree of assurance in the change of access authorizations for the user and all processes acting on behalf of the user as would be provided by a change between a privileged and non-privileged account.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"3_1_7": {"name": "3.1.7 Prevent non-privileged users from executing privileged functions and capture the execution of such functions in audit logs","checks": {"iam_no_root_access_key": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_1_7","Section": "3.1 Access Control","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Privileged functions include establishing system accounts, performing system integrity checks, conducting patching operations, or administering cryptographic key management activities. Nonprivileged users are individuals that do not possess appropriate authorizations. Circumventing intrusion detection and prevention mechanisms or malicious code protection mechanisms are examples of privileged functions that require protection from non-privileged users. Note that this requirement represents a condition to be achieved by the definition of authorized privileges in 3.1.2. Misuse of privileged functions, either intentionally or unintentionally by authorized users, or by unauthorized external entities that have compromised system accounts, is a serious and ongoing concern and can have significant adverse impacts on organizations. Logging the use of privileged functions is one way to detect such misuse, and in doing so, help mitigate the risk from insider threats and the advanced persistent threat.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"3_3_1": {"name": "3.3.1 Create and retain system audit logs and records to the extent needed to enable the monitoring, analysis, investigation, and reporting of unlawful or unauthorized system activity","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_3_1","Section": "3.3 Audit and Accountability","Service": "aws","SubGroup": null,"SubSection": null}],"description": "An event is any observable occurrence in a system, which includes unlawful or unauthorized system activity. Organizations identify event types for which a logging functionality is needed as those events which are significant and relevant to the security of systems and the environments in which those systems operate to meet specific and ongoing auditing needs. Event types can include password changes, failed logons or failed accesses related to systems, administrative privilege usage, or third-party credential usage. In determining event types that require logging, organizations consider the monitoring and auditing appropriate for each of the CUI security requirements. Monitoring and auditing requirements can be balanced with other system needs. For example, organizations may determine that systems must have the capability to log every file access both successful and unsuccessful, but not activate that capability except for specific circumstances due to the potential burden on system performance. Audit records can be generated at various levels of abstraction, including at the packet level as information traverses the network. Selecting the appropriate level of abstraction is a critical aspect of an audit logging capability and can facilitate the identification of root causes to problems. Organizations consider in the definition of event types, the logging necessary to cover related events such as the steps in distributed, transaction-based processes (e.g., processes that are distributed across multiple organizations) and actions that occur in service-oriented or cloudbased architectures. Audit record content that may be necessary to satisfy this requirement includes time stamps, source and destination addresses, user or process identifiers, event descriptions, success or fail indications, filenames involved, and access control or flow control rules invoked. Event outcomes can include indicators of event success or failure and event-specific results (e.g., the security state of the system after the event occurred). Detailed information that organizations may consider in audit records includes full text recording of privileged commands or the individual identities of group account users. Organizations consider limiting the additional audit log information to only that information explicitly needed for specific audit requirements. This facilitates the use of audit trails and audit logs by not including information that could potentially be misleading or could make it more difficult to locate information of interest. Audit logs are reviewed and analyzed as often as needed to provide important information to organizations to facilitate risk-based decision making.","checks_status": {"fail": 7,"pass": 4,"total": 14,"manual": 0}},"3_3_2": {"name": "3.3.2 Ensure that the actions of individual system users can be uniquely traced to those users, so they can be held accountable for their actions","checks": {"guardduty_is_enabled": "PASS","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_3_2","Section": "3.3 Audit and Accountability","Service": "aws","SubGroup": null,"SubSection": null}],"description": "This requirement ensures that the contents of the audit record include the information needed to link the audit event to the actions of an individual to the extent feasible. Organizations consider logging for traceability including results from monitoring of account usage, remote access, wireless connectivity, mobile device connection, communications at system boundaries, configuration settings, physical access, nonlocal maintenance, use of maintenance tools, temperature and humidity, equipment delivery and removal, system component inventory, use of mobile code, and use of Voice over Internet Protocol (VoIP).","checks_status": {"fail": 3,"pass": 3,"total": 9,"manual": 0}},"3_3_3": {"name": "3.3.3 Review and update logged events","checks": {"vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_3_3","Section": "3.3 Audit and Accountability","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The intent of this requirement is to periodically re-evaluate which logged events will continue to be included in the list of events to be logged. The event types that are logged by organizations may change over time. Reviewing and updating the set of logged event types periodically is necessary to ensure that the current set remains necessary and sufficient.","checks_status": {"fail": 4,"pass": 2,"total": 9,"manual": 0}},"3_3_4": {"name": "3.3.4 Alert in the event of an audit logging process failure","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_3_4","Section": "3.3 Audit and Accountability","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Audit logging process failures include software and hardware errors, failures in the audit record capturing mechanisms, and audit record storage capacity being reached or exceeded. This requirement applies to each audit record data storage repository (i.e., distinct system component where audit records are stored), the total audit record storage capacity of organizations (i.e., all audit record data storage repositories combined), or both.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"3_3_5": {"name": "3.3.5 Correlate audit record review, analysis, and reporting processes for investigation and response to indications of unlawful, unauthorized, suspicious, or unusual activity","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_3_5","Section": "3.3 Audit and Accountability","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Correlating audit record review, analysis, and reporting processes helps to ensure that they do not operate independently, but rather collectively. Regarding the assessment of a given organizational system, the requirement is agnostic as to whether this correlation is applied at the system level or at the organization level across all systems.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"3_3_8": {"name": "3.3.8 Protect audit information and audit logging tools from unauthorized access, modification, and deletion","checks": {"s3_bucket_public_access": null,"s3_bucket_object_versioning": "FAIL","s3_bucket_default_encryption": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_policy_public_write_access": "PASS","s3_account_level_public_access_blocks": null,"cloudtrail_log_file_validation_enabled": "FAIL","cloudwatch_log_group_kms_encryption_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_3_8","Section": "3.3 Audit and Accountability","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Audit information includes all information (e.g., audit records, audit log settings, and audit reports) needed to successfully audit system activity. Audit logging tools are those programs and devices used to conduct audit and logging activities. This requirement focuses on the technical protection of audit information and limits the ability to access and execute audit logging tools to authorized individuals. Physical protection of audit information is addressed by media protection and physical and environmental protection requirements.","checks_status": {"fail": 4,"pass": 2,"total": 8,"manual": 0}},"3_4_1": {"name": "3.4.1 Establish and maintain baseline configurations and inventories of organizational systems (including hardware, software, firmware, and documentation) throughout the respective system development life cycles","checks": {"ec2_elastic_ip_unassigned": "FAIL","elbv2_deletion_protection": "FAIL","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","cloudtrail_multi_region_enabled": "PASS","ec2_instance_older_than_specific_days": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_4_1","Section": "3.4 Configuration Management","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Baseline configurations are documented, formally reviewed, and agreed-upon specifications for systems or configuration items within those systems. Baseline configurations serve as a basis for future builds, releases, and changes to systems. Baseline configurations include information about system components (e.g., standard software packages installed on workstations, notebook computers, servers, network components, or mobile devices; current version numbers and update and patch information on operating systems and applications; and configuration settings and parameters), network topology, and the logical placement of those components within the system architecture. Baseline configurations of systems also reflect the current enterprise architecture. Maintaining effective baseline configurations requires creating new baselines as organizational systems change over time. Baseline configuration maintenance includes reviewing and updating the baseline configuration when changes are made based on security risks and deviations from the established baseline configuration Organizations can implement centralized system component inventories that include components from multiple organizational systems. In such situations, organizations ensure that the resulting inventories include system-specific information required for proper component accountability (e.g., system association, system owner). Information deemed necessary for effective accountability of system components includes hardware inventory specifications, software license information, software version numbers, component owners, and for networked components or devices, machine names and network addresses. Inventory specifications include manufacturer, device type, model, serial number, and physical location.","checks_status": {"fail": 6,"pass": 1,"total": 7,"manual": 0}},"3_4_2": {"name": "3.4.2 Establish and enforce security configuration settings for information technology products employed in organizational systems","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","ec2_instance_older_than_specific_days": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_4_2","Section": "3.4 Configuration Management","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Configuration settings are the set of parameters that can be changed in hardware, software, or firmware components of the system that affect the security posture or functionality of the system. Information technology products for which security-related configuration settings can be defined include mainframe computers, servers, workstations, input and output devices (e.g., scanners, copiers, and printers), network components (e.g., firewalls, routers, gateways, voice and data switches, wireless access points, network appliances, sensors), operating systems, middleware, and applications. Security parameters are those parameters impacting the security state of systems including the parameters required to satisfy other security requirements. Security parameters include: registry settings; account, file, directory permission settings; and settings for functions, ports, protocols, and remote connections. Organizations establish organization-wide configuration settings and subsequently derive specific configuration settings for systems. The established settings become part of the systems configuration baseline. Common secure configurations (also referred to as security configuration checklists, lockdown and hardening guides, security reference guides, security technical implementation guides) provide recognized, standardized, and established benchmarks that stipulate secure configuration settings for specific information technology platforms/products and instructions for configuring those system components to meet operational requirements. Common secure configurations can be developed by a variety of organizations including information technology product developers, manufacturers, vendors, consortia, academia, industry, federal agencies, and other organizations in the public and private sectors.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"3_4_6": {"name": "3.4.6 Employ the principle of least functionality by configuring organizational systems to provide only essential capabilities","checks": {"iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"ec2_instance_managed_by_ssm": "FAIL","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"redshift_cluster_public_access": null,"ssm_managed_compliant_patching": "FAIL","s3_bucket_policy_public_write_access": "PASS","s3_account_level_public_access_blocks": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_4_6","Section": "3.4 Configuration Management","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Systems can provide a wide variety of functions and services. Some of the functions and services routinely provided by default, may not be necessary to support essential organizational missions, functions, or operations. It is sometimes convenient to provide multiple services from single system components. However, doing so increases risk over limiting the services provided by any one component. Where feasible, organizations limit component functionality to a single function per component. Organizations review functions and services provided by systems or components of systems, to determine which functions and services are candidates for elimination. Organizations disable unused or unnecessary physical and logical ports and protocols to prevent unauthorized connection of devices, transfer of information, and tunneling. Organizations can utilize network scanning tools, intrusion detection and prevention systems, and end-point protections such as firewalls and host-based intrusion detection systems to identify and prevent the use of prohibited functions, ports, protocols, and services.","checks_status": {"fail": 3,"pass": 3,"total": 15,"manual": 0}},"3_4_7": {"name": "3.4.7 Restrict, disable, or prevent the use of nonessential programs, functions, ports, protocols, and services","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_4_7","Section": "3.4 Configuration Management","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Restricting the use of nonessential software (programs) includes restricting the roles allowed to approve program execution; prohibiting auto-execute; program blacklisting and whitelisting; or restricting the number of program instances executed at the same time. The organization makes a security-based determination which functions, ports, protocols, and/or services are restricted. Bluetooth, File Transfer Protocol (FTP), and peer-to-peer networking are examples of protocols organizations consider preventing the use of, restricting, or disabling.","checks_status": {"fail": 2,"pass": 1,"total": 4,"manual": 0}},"3_4_9": {"name": "3.4.9 Control and monitor user-installed software","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_4_9","Section": "3.4 Configuration Management","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Users can install software in organizational systems if provided the necessary privileges. To maintain control over the software installed, organizations identify permitted and prohibited actions regarding software installation through policies. Permitted software installations include updates and security patches to existing software and applications from organization-approved 'app stores.' Prohibited software installations may include software with unknown or suspect pedigrees or software that organizations consider potentially malicious. The policies organizations select governing user-installed software may be organization-developed or provided by some external entity. Policy enforcement methods include procedural methods, automated methods, or both.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"3_5_2": {"name": "3.5.2 Authenticate (or verify) the identities of users, processes, or devices, as a prerequisite to allowing access to organizational systems","checks": {"iam_root_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_5_2","Section": "3.5 Identification and Authentication","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Individual authenticators include the following: passwords, key cards, cryptographic devices, and one-time password devices. Initial authenticator content is the actual content of the authenticator, for example, the initial password. In contrast, the requirements about authenticator content include the minimum password length. Developers ship system components with factory default authentication credentials to allow for initial installation and configuration. Default authentication credentials are often well known, easily discoverable, and present a significant security risk. Systems support authenticator management by organization-defined settings and restrictions for various authenticator characteristics including minimum password length, validation time window for time synchronous one-time tokens, and number of allowed rejections during the verification stage of biometric authentication. Authenticator management includes issuing and revoking, when no longer needed, authenticators for temporary access such as that required for remote maintenance. Device authenticators include certificates and passwords.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"3_5_3": {"name": "3.5.3 Use multifactor authentication for local and network access to privileged accounts and for network access to non-privileged accounts","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_5_3","Section": "3.5 Identification and Authentication","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Multifactor authentication requires the use of two or more different factors to authenticate. The factors are defined as something you know (e.g., password, personal identification number [PIN]); something you have (e.g., cryptographic identification device, token); or something you are (e.g., biometric). Multifactor authentication solutions that feature physical authenticators include hardware authenticators providing time-based or challenge-response authenticators and smart cards. In addition to authenticating users at the system level (i.e., at logon), organizations may also employ authentication mechanisms at the application level, when necessary, to provide increased information security. Access to organizational systems is defined as local access or network access. Local access is any access to organizational systems by users (or processes acting on behalf of users) where such access is obtained by direct connections without the use of networks. Network access is access to systems by users (or processes acting on behalf of users) where such access is obtained through network connections (i.e., nonlocal accesses). Remote access is a type of network access that involves communication through external networks. The use of encrypted virtual private networks for connections between organization-controlled and non-organization controlled endpoints may be treated as internal networks with regard to protecting the confidentiality of information.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"3_5_5": {"name": "3.5.5 Prevent reuse of identifiers for a defined period","checks": {"iam_password_policy_reuse_24": null,"iam_password_policy_expires_passwords_within_90_days_or_less": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_5_5","Section": "3.5 Identification and Authentication","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Identifiers are provided for users, processes acting on behalf of users, or devices (3.5.1). Preventing reuse of identifiers implies preventing the assignment of previously used individual, group, role, or device identifiers to different individuals, groups, roles, or devices.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"3_5_6": {"name": "3.5.6 Disable identifiers after a defined period of inactivity","checks": {"iam_user_accesskey_unused": null,"iam_password_policy_reuse_24": null,"iam_user_console_access_unused": null,"iam_password_policy_expires_passwords_within_90_days_or_less": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_5_6","Section": "3.5 Identification and Authentication","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Inactive identifiers pose a risk to organizational information because attackers may exploit an inactive identifier to gain undetected access to organizational devices. The owners of the inactive accounts may not notice if unauthorized access to the account has been obtained.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"3_5_7": {"name": "3.5.7 Enforce a minimum password complexity and change of characters when new passwords are created","checks": {"iam_user_accesskey_unused": null,"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_reuse_24": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_user_console_access_unused": null,"iam_password_policy_minimum_length_14": null,"iam_password_policy_expires_passwords_within_90_days_or_less": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_5_7","Section": "3.5 Identification and Authentication","Service": "aws","SubGroup": null,"SubSection": null}],"description": "This requirement applies to single-factor authentication of individuals using passwords as individual or group authenticators, and in a similar manner, when passwords are used as part of multifactor authenticators. The number of changed characters refers to the number of changes required with respect to the total number of positions in the current password. To mitigate certain brute force attacks against passwords, organizations may also consider salting passwords.","checks_status": {"fail": 0,"pass": 0,"total": 9,"manual": 0}},"3_5_8": {"name": "3.5.8 Prohibit password reuse for a specified number of generations","checks": {"iam_user_accesskey_unused": null,"iam_password_policy_reuse_24": null,"iam_user_console_access_unused": null,"iam_password_policy_expires_passwords_within_90_days_or_less": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_5_8","Section": "3.5 Identification and Authentication","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Password lifetime restrictions do not apply to temporary passwords.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"3_6_1": {"name": "3.6.1 Establish an operational incident-handling capability for organizational systems that includes preparation, detection, analysis, containment, recovery, and user response activities","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_6_1","Section": "3.6 Incident Response","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Organizations recognize that incident handling capability is dependent on the capabilities of organizational systems and the mission/business processes being supported by those systems. Organizations consider incident handling as part of the definition, design, and development of mission/business processes and systems. Incident-related information can be obtained from a variety of sources including audit monitoring, network monitoring, physical access monitoring, user and administrator reports, and reported supply chain events. Effective incident handling capability includes coordination among many organizational entities including mission/business owners, system owners, authorizing officials, human resources offices, physical and personnel security offices, legal departments, operations personnel, procurement offices, and the risk executive. As part of user response activities, incident response training is provided by organizations and is linked directly to the assigned roles and responsibilities of organizational personnel to ensure that the appropriate content and level of detail is included in such training. For example, regular users may only need to know who to call or how to recognize an incident on the system; system administrators may require additional training on how to handle or remediate incidents; and incident responders may receive more specific training on forensics, reporting, system recovery, and restoration. Incident response training includes user training in the identification/reporting of suspicious activities from external and internal sources. User response activities also includes incident response assistance which may consist of help desk support, assistance groups, and access to forensics services or consumer redress services, when required.","checks_status": {"fail": 6,"pass": 4,"total": 14,"manual": 0}},"3_6_2": {"name": "3.6.2 Track, document, and report incidents to designated officials and/or authorities both internal and external to the organization","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_6_2","Section": "3.6 Incident Response","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Tracking and documenting system security incidents includes maintaining records about each incident, the status of the incident, and other pertinent information necessary for forensics, evaluating incident details, trends, and handling. Incident information can be obtained from a variety of sources including incident reports, incident response teams, audit monitoring, network monitoring, physical access monitoring, and user/administrator reports. Reporting incidents addresses specific incident reporting requirements within an organization and the formal incident reporting requirements for the organization. Suspected security incidents may also be reported and include the receipt of suspicious email communications that can potentially contain malicious code. The types of security incidents reported, the content and timeliness of the reports, and the designated reporting authorities reflect applicable laws, Executive Orders, directives, regulations, and policies.","checks_status": {"fail": 6,"pass": 4,"total": 14,"manual": 0}},"3_11_2": {"name": "3.11.2 Scan for vulnerabilities in organizational systems and applications periodically and when new vulnerabilities affecting those systems and applications are identified","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_11_2","Section": "3.11 Risk Assessment","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Organizations determine the required vulnerability scanning for all system components, ensuring that potential sources of vulnerabilities such as networked printers, scanners, and copiers are not overlooked. The vulnerabilities to be scanned are readily updated as new vulnerabilities are discovered, announced, and scanning methods developed. This process ensures that potential vulnerabilities in the system are identified and addressed as quickly as possible. Vulnerability analyses for custom software applications may require additional approaches such as static analysis, dynamic analysis, binary analysis, or a hybrid of the three approaches. Organizations can employ these analysis approaches in source code reviews and in a variety of tools (e.g., static analysis tools, web-based application scanners, binary analyzers) and in source code reviews. Vulnerability scanning includes: scanning for patch levels; scanning for functions, ports, protocols, and services that should not be accessible to users or devices; and scanning for improperly configured or incorrectly operating information flow control mechanisms. To facilitate interoperability, organizations consider using products that are Security Content Automated Protocol (SCAP)-validated, scanning tools that express vulnerabilities in the Common Vulnerabilities and Exposures (CVE) naming convention, and that employ the Open Vulnerability Assessment Language (OVAL) to determine the presence of system vulnerabilities. Sources for vulnerability information include the Common Weakness Enumeration (CWE) listing and the National Vulnerability Database (NVD). Security assessments, such as red team exercises, provide additional sources of potential vulnerabilities for which to scan. Organizations also consider using scanning tools that express vulnerability impact by the Common Vulnerability Scoring System (CVSS). In certain situations, the nature of the vulnerability scanning may be more intrusive or the system component that is the subject of the scanning may contain highly sensitive information. Privileged access authorization to selected system components facilitates thorough vulnerability scanning and protects the sensitive nature of such scanning.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"3_11_3": {"name": "3.11.3 Remediate vulnerabilities in accordance with risk assessments","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_11_3","Section": "3.11 Risk Assessment","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Vulnerabilities discovered, for example, via the scanning conducted in response to 3.11.2, are remediated with consideration of the related assessment of risk. The consideration of risk influences the prioritization of remediation efforts and the level of effort to be expended in the remediation for specific vulnerabilities.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"3_12_4": {"name": "3.12.4 Develop, document, and periodically update system security plans that describe system boundaries, system environments of operation, how security requirements are implemented, and the relationships with or connections to other systems","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_imdsv2_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_12_4","Section": "3.12 Assessment, Authorization, and Monitoring","Service": "aws","SubGroup": null,"SubSection": null}],"description": "System security plans relate security requirements to a set of security controls. System security plans also describe, at a high level, how the security controls meet those security requirements, but do not provide detailed, technical descriptions of the design or implementation of the controls. System security plans contain sufficient information to enable a design and implementation that is unambiguously compliant with the intent of the plans and subsequent determinations of risk if the plan is implemented as intended. Security plans need not be single documents; the plans can be a collection of various documents including documents that already exist. Effective security plans make extensive use of references to policies, procedures, and additional documents (e.g., design and implementation specifications) where more detailed information can be obtained. This reduces the documentation requirements associated with security programs and maintains security-related information in other established management/operational areas related to enterprise architecture, system development life cycle, systems engineering, and acquisition. Federal agencies may consider the submitted system security plans and plans of action as critical inputs to an overall risk management decision to process, store, or transmit CUI on a system hosted by a nonfederal organization and whether it is advisable to pursue an agreement or contract with the nonfederal organization.","checks_status": {"fail": 2,"pass": 3,"total": 9,"manual": 0}},"3_13_1": {"name": "3.13.1 Monitor, control, and protect communications (i.e., information transmitted or received by organizational systems) at the external boundaries and key internal boundaries of organizational systems","checks": {"elb_ssl_listeners": "FAIL","elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","elbv2_waf_acl_attached": "FAIL","rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"cloudtrail_multi_region_enabled": "PASS","acm_certificates_expiration_check": "PASS","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_logging_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL","cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_13_1","Section": "3.13 System and Communications Protection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Communications can be monitored, controlled, and protected at boundary components and by restricting or prohibiting interfaces in organizational systems. Boundary components include gateways, routers, firewalls, guards, network-based malicious code analysis and virtualization systems, or encrypted tunnels implemented within a system security architecture (e.g., routers protecting firewalls or application gateways residing on protected subnetworks). Restricting or prohibiting interfaces in organizational systems includes restricting external web communications traffic to designated web servers within managed interfaces and prohibiting external traffic that appears to be spoofing internal addresses. Organizations consider the shared nature of commercial telecommunications services in the implementation of security requirements associated with the use of such services. Commercial telecommunications services are commonly based on network components and consolidated management systems shared by all attached commercial customers and may also include third party-provided access lines and other service elements. Such transmission services may represent sources of increased risk despite contract security provisions.","checks_status": {"fail": 10,"pass": 8,"total": 23,"manual": 0}},"3_13_2": {"name": "3.13.2 Employ architectural designs, software development techniques, and systems engineering principles that promote effective information security within organizational systems","checks": {"rds_instance_multi_az": "FAIL","ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","efs_have_backup_enabled": "FAIL","s3_bucket_public_access": null,"elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","dynamodb_tables_pitr_enabled": null,"awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"cloudtrail_multi_region_enabled": "PASS","acm_certificates_expiration_check": "PASS","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"rds_instance_integration_cloudwatch_logs": "FAIL","awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_13_2","Section": "3.13 System and Communications Protection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Organizations apply systems security engineering principles to new development systems or systems undergoing major upgrades. For legacy systems, organizations apply systems security engineering principles to system upgrades and modifications to the extent feasible, given the current state of hardware, software, and firmware components within those systems. The application of systems security engineering concepts and principles helps to develop trustworthy, secure, and resilient systems and system components and reduce the susceptibility of organizations to disruptions, hazards, and threats. Examples of these concepts and principles include developing layered protections; establishing security policies, architecture, and controls as the foundation for design; incorporating security requirements into the system development life cycle; delineating physical and logical security boundaries; ensuring that developers are trained on how to build secure software; and performing threat modeling to identify use cases, threat agents, attack vectors and patterns, design patterns, and compensating controls needed to mitigate risk. Organizations that apply security engineering concepts and principles can facilitate the development of trustworthy, secure systems, system components, and system services; reduce risk to acceptable levels; and make informed risk-management decisions.","checks_status": {"fail": 7,"pass": 8,"total": 23,"manual": 0}},"3_13_3": {"name": "3.13.3 Separate user functionality from system management functionality","checks": {"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_13_3","Section": "3.13 System and Communications Protection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "System management functionality includes functions necessary to administer databases, network components, workstations, or servers, and typically requires privileged user access. The separation of user functionality from system management functionality is physical or logical. Organizations can implement separation of system management functionality from user functionality by using different computers, different central processing units, different instances of operating systems, or different network addresses; virtualization techniques; or combinations of these or other methods, as appropriate. This type of separation includes web administrative interfaces that use separate authentication methods for users of any other system resources. Separation of system and user functionality may include isolating administrative interfaces on different domains and with additional access controls.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"3_13_4": {"name": "3.13.4 Prevent unauthorized and unintended information transfer via shared system resources","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_13_4","Section": "3.13 System and Communications Protection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The control of information in shared system resources (e.g., registers, cache memory, main memory, hard disks) is also commonly referred to as object reuse and residual information protection. This requirement prevents information produced by the actions of prior users or roles (or the actions of processes acting on behalf of prior users or roles) from being available to any current users or roles (or current processes acting on behalf of current users or roles) that obtain access to shared system resources after those resources have been released back to the system. This requirement also applies to encrypted representations of information. This requirement does not address information remanence, which refers to residual representation of data that has been nominally deleted; covert channels (including storage or timing channels) where shared resources are manipulated to violate information flow restrictions; or components within systems for which there are only single users or roles.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3_13_5": {"name": "3.13.5 Implement subnetworks for publicly accessible system components that are physically or logically separated from internal networks","checks": {"elb_ssl_listeners": "FAIL","ec2_instance_public_ip": "FAIL","elbv2_waf_acl_attached": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_secure_transport_policy": "FAIL","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_13_5","Section": "3.13 System and Communications Protection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Subnetworks that are physically or logically separated from internal networks are referred to as demilitarized zones (DMZs). DMZs are typically implemented with boundary control devices and techniques that include routers, gateways, firewalls, virtualization, or cloud-based technologies.","checks_status": {"fail": 6,"pass": 6,"total": 20,"manual": 0}},"3_13_6": {"name": "3.13.6 Deny network communications traffic by default and allow network communications traffic by exception (i.e., deny all, permit by exception)","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_13_6","Section": "3.13 System and Communications Protection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "This requirement applies to inbound and outbound network communications traffic at the system boundary and at identified points within the system. A deny-all, permit-by-exception network communications traffic policy ensures that only those connections which are essential and approved are allowed.","checks_status": {"fail": 1,"pass": 1,"total": 3,"manual": 0}},"3_13_8": {"name": "3.13.8 Implement cryptographic mechanisms to prevent unauthorized disclosure of CUI during transmission unless otherwise protected by alternative physical safeguards","checks": {"elb_ssl_listeners": "FAIL","acm_certificates_expiration_check": "PASS","s3_bucket_secure_transport_policy": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_13_8","Section": "3.13 System and Communications Protection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "This requirement applies to internal and external networks and any system components that can transmit information including servers, notebook computers, desktop computers, mobile devices, printers, copiers, scanners, and facsimile machines. Communication paths outside the physical protection of controlled boundaries are susceptible to both interception and modification. Organizations relying on commercial providers offering transmission services as commodity services rather than as fully dedicated services (i.e., services which can be highly specialized to individual customer needs), may find it difficult to obtain the necessary assurances regarding the implementation of the controls for transmission confidentiality. In such situations, organizations determine what types of confidentiality services are available in commercial telecommunication service packages. If it is infeasible or impractical to obtain the necessary safeguards and assurances of the effectiveness of the safeguards through appropriate contracting vehicles, organizations implement compensating safeguards or explicitly accept the additional risk. An example of an alternative physical safeguard is a protected distribution system (PDS) where the distribution medium is protected against electronic or physical intercept, thereby ensuring the confidentiality of the information being transmitted.","checks_status": {"fail": 2,"pass": 1,"total": 4,"manual": 0}},"3_14_1": {"name": "3.14.1 Identify, report, and correct system flaws in a timely manner","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_14_1","Section": "3.14 System and Information integrity","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Organizations identify systems that are affected by announced software and firmware flaws including potential vulnerabilities resulting from those flaws and report this information to designated personnel with information security responsibilities. Security-relevant updates include patches, service packs, hot fixes, and anti-virus signatures. Organizations address flaws discovered during security assessments, continuous monitoring, incident response activities, and system error handling. Organizations can take advantage of available resources such as the Common Weakness Enumeration (CWE) database or Common Vulnerabilities and Exposures (CVE) database in remediating flaws discovered in organizational systems. Organization-defined time periods for updating security-relevant software and firmware may vary based on a variety of factors including the criticality of the update (i.e., severity of the vulnerability related to the discovered flaw). Some types of flaw remediation may require more testing than other types of remediation.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"3_14_2": {"name": "3.14.2 Provide protection from malicious code at designated locations within organizational systems","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_14_2","Section": "3.14 System and Information integrity","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Designated locations include system entry and exit points which may include firewalls, remoteaccess servers, workstations, electronic mail servers, web servers, proxy servers, notebook computers, and mobile devices. Malicious code includes viruses, worms, Trojan horses, and spyware. Malicious code can be encoded in various formats (e.g., UUENCODE, Unicode), contained within compressed or hidden files, or hidden in files using techniques such as steganography. Malicious code can be inserted into systems in a variety of ways including web accesses, electronic mail, electronic mail attachments, and portable storage devices. Malicious code insertions occur through the exploitation of system vulnerabilities. Malicious code protection mechanisms include anti-virus signature definitions and reputationbased technologies. A variety of technologies and methods exist to limit or eliminate the effects of malicious code. Pervasive configuration management and comprehensive software integrity controls may be effective in preventing execution of unauthorized code. In addition to commercial off-the-shelf software, malicious code may also be present in custom-built software. This could include logic bombs, back doors, and other types of cyber-attacks that could affect organizational missions/business functions. Traditional malicious code protection mechanisms cannot always detect such code. In these situations, organizations rely instead on other safeguards including secure coding practices, configuration management and control, trusted procurement processes, and monitoring practices to help ensure that software does not perform functions other than the functions intended.","checks_status": {"fail": 2,"pass": 2,"total": 5,"manual": 0}},"3_14_3": {"name": "3.14.3 Monitor system security alerts and advisories and take action in response","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_14_3","Section": "3.14 System and Information integrity","Service": "aws","SubGroup": null,"SubSection": null}],"description": "There are many publicly available sources of system security alerts and advisories. For example, the Department of Homeland Securityโ€™s Cybersecurity and Infrastructure Security Agency (CISA) generates security alerts and advisories to maintain situational awareness across the federal government and in nonfederal organizations. Software vendors, subscription services, and industry information sharing and analysis centers (ISACs) may also provide security alerts and advisories. Examples of response actions include notifying relevant external organizations, for example, external mission/business partners, supply chain partners, external service providers, and peer or supporting organizations.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"3_14_4": {"name": "3.14.4 Update malicious code protection mechanisms when new releases are available","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "3_14_4","Section": "3.14 System and Information integrity","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Malicious code protection mechanisms include anti-virus signature definitions and reputationbased technologies. A variety of technologies and methods exist to limit or eliminate the effects of malicious code. Pervasive configuration management and comprehensive software integrity controls may be effective in preventing execution of unauthorized code. In addition to commercial off-the-shelf software, malicious code may also be present in custom-built software. This could include logic bombs, back doors, and other types of cyber-attacks that could affect organizational missions/business functions. Traditional malicious code protection mechanisms cannot always detect such code. In these situations, organizations rely instead on other safeguards including secure coding practices, configuration management and control, trusted procurement processes, and monitoring practices to help ensure that software does not perform functions other than the functions intended.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"3_14_6": {"name": "3.14.6 Monitor organizational systems, including inbound and outbound communications traffic, to detect attacks and indicators of potential attacks","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_14_6","Section": "3.14 System and Information integrity","Service": "aws","SubGroup": null,"SubSection": null}],"description": "System monitoring includes external and internal monitoring. External monitoring includes the observation of events occurring at the system boundary (i.e., part of perimeter defense and boundary protection). Internal monitoring includes the observation of events occurring within the system. Organizations can monitor systems, for example, by observing audit record activities in real time or by observing other system aspects such as access patterns, characteristics of access, and other actions. The monitoring objectives may guide determination of the events. System monitoring capability is achieved through a variety of tools and techniques (e.g., intrusion detection systems, intrusion prevention systems, malicious code protection software, scanning tools, audit record monitoring software, network monitoring software). Strategic locations for monitoring devices include selected perimeter locations and near server farms supporting critical applications, with such devices being employed at managed system interfaces. The granularity of monitoring information collected is based on organizational monitoring objectives and the capability of systems to support such objectives. System monitoring is an integral part of continuous monitoring and incident response programs. Output from system monitoring serves as input to continuous monitoring and incident response programs. A network connection is any connection with a device that communicates through a network (e.g., local area network, Internet). A remote connection is any connection with a device communicating through an external network (e.g., the Internet). Local, network, and remote connections can be either wired or wireless. Unusual or unauthorized activities or conditions related to inbound/outbound communications traffic include internal traffic that indicates the presence of malicious code in systems or propagating among system components, the unauthorized exporting of information, or signaling to external systems. Evidence of malicious code is used to identify potentially compromised systems or system components. System monitoring requirements, including the need for specific types of system monitoring, may be referenced in other requirements.","checks_status": {"fail": 5,"pass": 4,"total": 12,"manual": 0}},"3_14_7": {"name": "3.14.7 Identify unauthorized use of organizational systems","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_14_7","Section": "3.14 System and Information integrity","Service": "aws","SubGroup": null,"SubSection": null}],"description": "System monitoring includes external and internal monitoring. System monitoring can detect unauthorized use of organizational systems. System monitoring is an integral part of continuous monitoring and incident response programs. Monitoring is achieved through a variety of tools and techniques (e.g., intrusion detection systems, intrusion prevention systems, malicious code protection software, scanning tools, audit record monitoring software, network monitoring software). Output from system monitoring serves as input to continuous monitoring and incident response programs. Unusual/unauthorized activities or conditions related to inbound and outbound communications traffic include internal traffic that indicates the presence of malicious code in systems or propagating among system components, the unauthorized exporting of information, or signaling to external systems. Evidence of malicious code is used to identify potentially compromised systems or system components. System monitoring requirements, including the need for specific types of system monitoring, may be referenced in other requirements.","checks_status": {"fail": 5,"pass": 4,"total": 12,"manual": 0}},"3_1_12": {"name": "3.1.12 Monitor and control remote access sessions","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_1_12","Section": "3.1 Access Control","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Remote access is access to organizational systems by users (or processes acting on behalf of users) communicating through external networks (e.g., the Internet). Remote access methods include dial-up, broadband, and wireless. Organizations often employ encrypted virtual private networks (VPNs) to enhance confidentiality over remote connections. The use of encrypted VPNs does not make the access non-remote; however, the use of VPNs, when adequately provisioned with appropriate control (e.g., employing encryption techniques for confidentiality protection), may provide sufficient assurance to the organization that it can effectively treat such connections as internal networks. VPNs with encrypted tunnels can affect the capability to adequately monitor network communications traffic for malicious code. Automated monitoring and control of remote access sessions allows organizations to detect cyberattacks and help to ensure ongoing compliance with remote access policies by auditing connection activities of remote users on a variety of system components (e.g., servers, workstations, notebook computers, smart phones, and tablets).","checks_status": {"fail": 4,"pass": 4,"total": 11,"manual": 0}},"3_1_13": {"name": "3.1.13 Employ cryptographic mechanisms to protect the confidentiality of remote access sessions","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_1_13","Section": "3.1 Access Control","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Cryptographic standards include FIPS-validated cryptography and NSA-approved cryptography.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"3_1_14": {"name": "3.1.14 Route remote access via managed access control points","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"iam_user_mfa_enabled_console_access": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_1_14","Section": "3.1 Access Control","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Routing remote access through managed access control points enhances explicit, organizational control over such connections, reducing the susceptibility to unauthorized access to organizational systems resulting in the unauthorized disclosure of CUI.","checks_status": {"fail": 3,"pass": 5,"total": 14,"manual": 0}},"3_1_20": {"name": "3.1.20 Verify and control/limit connections to and use of external systems","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL","s3_account_level_public_access_blocks": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_1_20","Section": "3.1 Access Control","Service": "aws","SubGroup": null,"SubSection": null}],"description": "External systems are systems or components of systems for which organizations typically have no direct supervision and authority over the application of security requirements and controls or the determination of the effectiveness of implemented controls on those systems. External systems include personally owned systems, components, or devices and privately-owned computing and communications devices resident in commercial or public facilities. This requirement also addresses the use of external systems for the processing, storage, or transmission of CUI, including accessing cloud services (e.g., infrastructure as a service, platform as a service, or software as a service) from organizational systems. Organizations establish terms and conditions for the use of external systems in accordance with organizational security policies and procedures. Terms and conditions address as a minimum, the types of applications that can be accessed on organizational systems from external systems. If terms and conditions with the owners of external systems cannot be established, organizations may impose restrictions on organizational personnel using those external systems. This requirement recognizes that there are circumstances where individuals using external systems (e.g., contractors, coalition partners) need to access organizational systems. In those situations, organizations need confidence that the external systems contain the necessary controls so as not to compromise, damage, or otherwise harm organizational systems. Verification that the required controls have been effectively implemented can be achieved by third-party, independent assessments, attestations, or other means, depending on the assurance or confidence level required by organizations. Note that while โ€œexternalโ€ typically refers to outside of the organization's direct supervision and authority, that is not always the case. Regarding the protection of CUI across an organization, the organization may have systems that process CUI and others that do not. And among the systems that process CUI there are likely access restrictions for CUI that apply between systems. Therefore, from the perspective of a given system, other systems within the organization may be considered 'external' to that system.","checks_status": {"fail": 2,"pass": 1,"total": 5,"manual": 0}},"3_5_10": {"name": "3.5.10 Store and transmit only cryptographically-protected passwords","checks": {"ec2_ebs_volume_encryption": "PASS","elbv2_insecure_ssl_ciphers": "PASS","s3_bucket_default_encryption": "PASS","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_5_10","Section": "3.5 Identification and Authentication","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Cryptographically-protected passwords use salted one-way cryptographic hashes of passwords.","checks_status": {"fail": 2,"pass": 3,"total": 6,"manual": 0}},"3_13_11": {"name": "3.13.11 Employ FIPS-validated cryptography when used to protect the confidentiality of CUI","checks": {"ec2_ebs_volume_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","acm_certificates_expiration_check": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_13_11","Section": "3.13 System and Communications Protection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Cryptography can be employed to support many security solutions including the protection of controlled unclassified information, the provision of digital signatures, and the enforcement of information separation when authorized individuals have the necessary clearances for such information but lack the necessary formal access approvals. Cryptography can also be used to support random number generation and hash generation. Cryptographic standards include FIPSvalidated cryptography and/or NSA-approved cryptography.","checks_status": {"fail": 6,"pass": 3,"total": 12,"manual": 0}},"3_13_15": {"name": "3.13.15 Protect the authenticity of communications sessions","checks": {"elb_ssl_listeners": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_13_15","Section": "3.13 System and Communications Protection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Authenticity protection includes protecting against man-in-the-middle attacks, session hijacking, and the insertion of false information into communications sessions. This requirement addresses communications protection at the session versus packet level (e.g., sessions in service-oriented architectures providing web-based services) and establishes grounds for confidence at both ends of communications sessions in ongoing identities of other parties and in the validity of information transmitted.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3_13_16": {"name": "3.13.16 Protect the confidentiality of CUI at rest","checks": {"ec2_ebs_volume_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "3_13_16","Section": "3.13 System and Communications Protection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Information at rest refers to the state of information when it is not in process or in transit and is located on storage devices as specific components of systems. The focus of protection at rest is not on the type of storage device or the frequency of access but rather the state of the information. Organizations can use different mechanisms to achieve confidentiality protections, including the use of cryptographic mechanisms and file share scanning. Organizations may also use other controls including secure off-line storage in lieu of online storage when adequate protection of information at rest cannot otherwise be achieved or continuous monitoring to identify malicious code at rest.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}}},"requirements_passed": 14,"requirements_failed": 35,"requirements_manual": 1,"total_requirements": 50,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "168b9e98-d0d8-47a8-b53a-32097ec095ac","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "nist_800_53_revision_4_aws","framework": "NIST-800-53-Revision-4","version": "","description": "NIST 800-53 is a regulatory standard that defines the minimum baseline of security controls for all U.S. federal information systems except those related to national security. The controls defined in this standard are customizable and address a diverse set of security and privacy requirements.","region": "eu-west-1","requirements": {"ac_2": {"name": "Account Management (AC-2)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_password_policy_reuse_24": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_2","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Manage system accounts, group memberships, privileges, workflow, notifications, deactivations, and authorizations.","checks_status": {"fail": 3,"pass": 3,"total": 18,"manual": 0}},"ac_3": {"name": "Access Enforcement (AC-3)","checks": {"iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_3","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system enforces approved authorizations for logical access to information and system resources in accordance with applicable access control policies.","checks_status": {"fail": 0,"pass": 3,"total": 14,"manual": 0}},"ac_4": {"name": "Information Flow Enforcement (AC-4)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"acm_certificates_expiration_check": "PASS","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_4","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system enforces approved authorizations for controlling the flow of information within the system and between interconnected systems based on organization-defined information flow control policies.","checks_status": {"fail": 3,"pass": 7,"total": 17,"manual": 0}},"ac_5": {"name": "Separation Of Duties (AC-5)","checks": {"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_5","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Separate duties of individuals to prevent malevolent activity. automate separation of duties and access authorizations.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"ac_6": {"name": "Least Privilege (AC-6)","checks": {"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_6","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization employs the principle of least privilege, allowing only authorized accesses for users (or processes acting on behalf of users) which are necessary to accomplish assigned tasks in accordance with organizational missions and business functions.","checks_status": {"fail": 1,"pass": 5,"total": 18,"manual": 0}},"au_2": {"name": "Event Logging (AU-2)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_2","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Automate security audit function with other organizational entities. Enable mutual support of audit of auditable events.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_3": {"name": "Content of Audit Records (AU-3)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_3","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system generates audit records containing information that establishes what type of event occurred, when the event occurred, where the event occurred, the source of the event, the outcome of the event, and the identity of any individuals or subjects associated with the event.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_9": {"name": "Protection of Audit Information (AU-9)","checks": {"cloudtrail_kms_encryption_enabled": "FAIL","cloudwatch_log_group_kms_encryption_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_9","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system protects audit information and audit tools from unauthorized access, modification, and deletion.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"ca_7": {"name": "Continuous Monitoring (CA-7)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_imdsv2_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ca_7","Section": "Security Assessment And Authorization (CA)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Continuously monitor configuration management processes. Determine security impact, environment and operational risks.","checks_status": {"fail": 2,"pass": 3,"total": 9,"manual": 0}},"cm_2": {"name": "Baseline Configuration (CM-2)","checks": {"elbv2_deletion_protection": "FAIL","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","cloudtrail_multi_region_enabled": "PASS","ec2_instance_older_than_specific_days": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_2","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization develops, documents, and maintains under configuration control, a current baseline configuration of the information system.","checks_status": {"fail": 5,"pass": 1,"total": 6,"manual": 0}},"cm_7": {"name": "Least Functionality (CM-7)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_7","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization configures the information system to provide only essential capabilities and prohibits or restricts the use of the functions, ports, protocols, and/or services.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cp_9": {"name": "Information System Backup (CP-9)","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","dynamodb_tables_pitr_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_9","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization conducts backups of user-level information, system-level information and information system documentation including security-related documentation contained in the information system and protects the confidentiality, integrity, and availability of backup information at storage locations.","checks_status": {"fail": 1,"pass": 1,"total": 4,"manual": 0}},"ia_2": {"name": "Identification and Authentication (Organizational users) (IA-2)","checks": {"iam_password_policy_reuse_24": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_2","Section": "Identification and Authentication (IA)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system uniquely identifies and authenticates organizational users (or processes acting on behalf of organizational users).","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ra_5": {"name": "Vulnerability Scanning (RA-5)","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ra_5","Section": "Risk Assessment (RA)","Service": "guardduty","SubGroup": null,"SubSection": null}],"description": "Scan for system vulnerabilities. Share vulnerability information and security controls that eliminate vulnerabilities.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"sa_3": {"name": "System Development Life Cycle (SA-3)","checks": {"ec2_instance_managed_by_ssm": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sa_3","Section": "System and Services Acquisition (SA)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization manages the information system using organization-defined system development life cycle, defines and documents information security roles and responsibilities throughout the system development life cycle, identifies individuals having information security roles and responsibilities and integrates the organizational information security risk management process into system development life cycle activities.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"sc_2": {"name": "Application Partitioning (SC-2)","checks": {"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_2","Section": "System and Communications Protection (SC)","Service": "iam","SubGroup": null,"SubSection": null}],"description": "The information system separates user functionality (including user interface services) from information system management functionality.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"sc_4": {"name": "Information In Shared Resources (SC-4)","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_4","Section": "System and Communications Protection (SC)","Service": "ebs","SubGroup": null,"SubSection": null}],"description": "The information system prevents unauthorized and unintended information transfer via shared system resources.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"sc_5": {"name": "Denial Of Service Protection (SC-5)","checks": {"rds_instance_multi_az": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_5","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system protects against or limits the effects of the following types of denial of service attacks: [Assignment: organization-defined types of denial of service attacks or references to sources for such information] by employing [Assignment: organization-defined security safeguards].","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"sc_7": {"name": "Boundary Protection (SC-7)","checks": {"elb_ssl_listeners": "FAIL","ec2_instance_public_ip": "FAIL","elbv2_waf_acl_attached": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_secure_transport_policy": "FAIL","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system: a. Monitors and controls communications at the external boundary of the system and at key internal boundaries within the system; b. Implements subnetworks for publicly accessible system components that are [Selection: physically; logically] separated from internal organizational networks; and c. Connects to external networks or information systems only through managed interfaces consisting of boundary protection devices arranged in accordance with an organizational security architecture.","checks_status": {"fail": 6,"pass": 6,"total": 20,"manual": 0}},"sc_8": {"name": "Transmission Confidentiality And Integrity (SC-8)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_8","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system protects the [Selection (one or more): confidentiality; integrity] of transmitted information.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"si_4": {"name": "Information System Monitoring (SI-4)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_waf_acl_attached": "FAIL","ec2_instance_imdsv2_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_4","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization: a.Monitors the information system to detect: 1. Attacks and indicators of potential attacks in accordance with [Assignment: organization-defined monitoring objectives]; and 2.Unauthorized local, network, and remote connections; b. Identifies unauthorized use of the information system through [Assignment: organization-defined techniques and methods]; c. Deploys monitoring devices: 1. Strategically within the information system to collect organization-determined essential information; and 2. At ad hoc locations within the system to track specific types of transactions of interest to the organization; d. Protects information obtained from intrusion-monitoring tools from unauthorized access, modification, and deletion; e. Heightens the level of information system monitoring activity whenever there is an indication of increased risk to organizational operations and assets, individuals, other organizations, or the Nation based on law enforcement information, intelligence information, or other credible sources of information; f. Obtains legal opinion with regard to information system monitoring activities in accordance with applicable federal laws, Executive Orders, directives, policies, or regulations; and g. Provides [Assignment: organization-defined information system monitoring information] to [Assignment: organization-defined personnel or roles] [Selection (one or more): as needed; [Assignment: organization-defined frequency]].","checks_status": {"fail": 3,"pass": 3,"total": 10,"manual": 0}},"si_7": {"name": "Software, Firmware, and Information Integrity (SI-7)","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_7","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization employs integrity verification tools to detect unauthorized changes to [Assignment: organization-defined software, firmware, and information].","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"ac_21": {"name": "Information Sharing (AC-21)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_21","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Facilitate information sharing. Enable authorized users to grant access to partners.","checks_status": {"fail": 1,"pass": 4,"total": 11,"manual": 0}},"au_11": {"name": "Audit Record Retention (AU-11)","checks": {"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_11","Section": "Audit and Accountability (AU)","Service": "cloudwatch","SubGroup": null,"SubSection": null}],"description": "The organization retains audit records for [Assignment: organization-defined time period consistent with records retention policy] to provide support for after-the-fact investigations of security incidents and to meet regulatory and organizational information retention requirements.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"au_12": {"name": "Audit Generation (AU-12)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_12","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Audit events defined in AU-2. Allow trusted personnel to select which events to audit. Generate audit records for events.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"cp_10": {"name": "Information System Recovery And Reconstitution (CP-10)","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_10","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization provides for the recovery and reconstitution of the information system to a known state after a disruption, compromise, or failure.","checks_status": {"fail": 4,"pass": 1,"total": 7,"manual": 0}},"sa_10": {"name": "Developer Configuration Management (SA-10)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_managed_by_ssm": "FAIL","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sa_10","Section": "System and Services Acquisition (SA)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization requires the developer of the information system, system component, or information system service to: a. Perform configuration management during system, component, or service [Selection (one or more): design; development; implementation; operation]; b. Document, manage, and control the integrity of changes to [Assignment: organization-defined configuration items under configuration management]; c. Implement only organization-approved changes to the system, component, or service; d. Document approved changes to the system, component, or service and the potential security impacts of such changes; and e. Track security flaws and flaw resolution within the system, component, or service and report findings to [Assignment: organization-defined personnel].","checks_status": {"fail": 2,"pass": 2,"total": 4,"manual": 0}},"sc_12": {"name": "Cryptographic Key Establishment And Management (SC-12)","checks": {"kms_cmk_rotation_enabled": null,"acm_certificates_expiration_check": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_12","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization establishes and manages cryptographic keys for required cryptography employed within the information system in accordance with [Assignment: organization-defined requirements for key generation, distribution, storage, access, and destruction].","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"sc_13": {"name": "Cryptographic Protection (SC-13)","checks": {"dynamodb_tables_kms_cmk_encryption_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_13","Section": "System and Communications Protection (SC)","Service": "dynamodb","SubGroup": null,"SubSection": null}],"description": "The information system implements [Assignment: organization-defined cryptographic uses and type of cryptography required for each use] in accordance with applicable federal laws, Executive Orders, directives, policies, regulations, and standards.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"sc_23": {"name": "Session Authenticity (SC-23)","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_23","Section": "System and Communications Protection (SC)","Service": "elb","SubGroup": null,"SubSection": null}],"description": "The information system protects the authenticity of communications sessions.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"sc_28": {"name": "Protection Of Information At Rest (SC-28)","checks": {"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_28","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system protects the [Selection (one or more): confidentiality; integrity] of [Assignment: organization-defined information at rest].","checks_status": {"fail": 5,"pass": 3,"total": 12,"manual": 0}},"si_12": {"name": "Information Handling and Retention (SI-12)","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_12","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization handles and retains information within the information system and information output from the system in accordance with applicable federal laws, Executive Orders, directives, policies, regulations, standards, and operational requirements.","checks_status": {"fail": 3,"pass": 1,"total": 6,"manual": 0}},"ac_2_1": {"name": "AC-2(1) Automated System Account Management","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","iam_user_accesskey_unused": null,"iam_password_policy_reuse_24": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_1","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "Access control policies (e.g., identity or role-based policies, control matrices, and cryptography) control access between active entities or subjects (i.e., users or processes acting on behalf of users) and passive entities or objects (e.g., devices, files, records, and domains) in systems. Access enforcement mechanisms can be employed at the application and service level to provide increased information security. Other systems include systems internal and external to the organization. This requirement focuses on account management for systems and applications. The definition of and enforcement of access authorizations, other than those determined by account type (e.g., privileged verses non-privileged) are addressed in requirement 3.1.2.","checks_status": {"fail": 0,"pass": 2,"total": 6,"manual": 0}},"ac_2_3": {"name": "AC-2(3) Disable Inactive Accounts","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_3","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "The information system automatically disables inactive accounts after 90 days for user accounts.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"ac_2_4": {"name": "AC-2(4) Automated Audit Actions","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_2_4","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "The information system automatically audits account creation, modification, enabling, disabling, and removal actions, and notifies [Assignment: organization-defined personnel or roles].","checks_status": {"fail": 2,"pass": 3,"total": 11,"manual": 0}},"au_6_1": {"name": "AU-6(1) Process Integration","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_6_1","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Review, Analysis And Reporting (AU-6)"}],"description": "The organization employs automated mechanisms to integrate audit review, analysis,and reporting processes to support organizational processes for investigation and response to suspicious activities.","checks_status": {"fail": 1,"pass": 2,"total": 7,"manual": 0}},"au_6_3": {"name": "AU-6(3) Correlate Audit Repositories","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_6_3","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Review, Analysis And Reporting (AU-6)"}],"description": "The organization analyzes and correlates audit records across different repositories to gain organization-wide situational awareness.","checks_status": {"fail": 1,"pass": 2,"total": 7,"manual": 0}},"au_7_1": {"name": "AU-7(1) Automatic Processing","checks": {"cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_7_1","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system provides the capability to process audit records for events of interest based on [Assignment: organization-defined audit fields within audit records].","checks_status": {"fail": 1,"pass": 0,"total": 5,"manual": 0}},"au_9_2": {"name": "AU-9(2) Audit Backup On Separate Physical Systems / Components","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "au_9_2","Section": "Audit and Accountability (AU)","Service": "s3","SubGroup": null,"SubSection": "Protection of Audit Information (AU-9)"}],"description": "The information system backs up audit records [Assignment: organization-defined frequency] onto a physically different system or system component than the system or component being audited.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cm_8_1": {"name": "CM-8(1) Updates During Installation / Removals","checks": {"ec2_instance_managed_by_ssm": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_1","Section": "Configuration Management (CM)","Service": "ec2","SubGroup": null,"SubSection": "Information System Component Inventory (CM-8)"}],"description": "The organization develops and documents an inventory of information system components that accurately reflects the current information system, includes all components within the authorization boundary of the information system, is at the level of granularity deemed necessary for tracking and reporting and reviews and updates the information system component inventory.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"cm_8_3": {"name": "CM-8(3) Automated Unauthorized Component Detection","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_3","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Information System Component Inventory (CM-8)"}],"description": "The organization employs automated mechanisms to detect the presence of unauthorized hardware, software, and firmware components within the information system and takes actions (disables network access by such components, isolates the components etc) when unauthorized components are detected.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"ia_2_1": {"name": "IA-2(1) Network Access To Privileged Accounts","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_2_1","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identification and Authentication (Organizational users) (IA-2)"}],"description": "The information system implements multi-factor authentication for network access to privileged accounts.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ia_2_2": {"name": "IA-2(2) Network Access To Non-Privileged Accounts","checks": {"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_2_2","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identification and Authentication (Organizational users) (IA-2)"}],"description": "The information system implements multifactor authentication for network access to non-privileged accounts.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"ia_5_1": {"name": "IA-5(1) Password-Based Authentication","checks": {"iam_password_policy_reuse_24": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_1","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Authenticator Management (IA-5)"}],"description": "The information system, for password-based authentication that enforces minimum password complexity, stores and transmits only cryptographically-protected passwords, enforces password minimum and maximum lifetime restrictions, prohibits password reuse, allows the use of a temporary password for system logons with an immediate change to a permanent password etc.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5_4": {"name": "IA-5(4) Automated Support For Password Strength Determination","checks": {"iam_password_policy_reuse_24": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_4","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Authenticator Management (IA-5)"}],"description": "The organization employs automated tools to determine if password authenticators are sufficiently strong to satisfy [Assignment: organization-defined requirements].","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5_7": {"name": "IA-5(7) No Embedded Unencrypted Static Authenticators","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_7","Section": "Identification and Authentication (IA)","Service": "codebuild","SubGroup": null,"SubSection": "Authenticator Management (IA-5)"}],"description": "The organization ensures that unencrypted static authenticators are not embedded in applications or access scripts or stored on function keys.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"ir_4_1": {"name": "IR-4(1) Automated Incident Handling Processes","checks": {"guardduty_no_high_severity_findings": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ir_4_1","Section": "Incident Response (IR)","Service": "aws","SubGroup": null,"SubSection": "Incident Handling (IR-4)"}],"description": "The organization employs automated mechanisms to support the incident handling process.","checks_status": {"fail": 1,"pass": 0,"total": 5,"manual": 0}},"ir_6_1": {"name": "IR-6(1) Automated Reporting","checks": {"guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ir_6_1","Section": "Incident Response (IR)","Service": "guardduty","SubGroup": null,"SubSection": "Incident Reporting (IR-6)"}],"description": "The organization employs automated mechanisms to assist in the reporting of security incidents.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"ir_7_1": {"name": "IR-7(1) Automation Support For Availability Of Information / Support","checks": {"guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ir_7_1","Section": "Incident Response (IR)","Service": "guardduty","SubGroup": null,"SubSection": "Incident Response Assistance (IR-7)"}],"description": "The organization employs automated mechanisms to increase the availability of incident response-related information and support.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"sc_7_3": {"name": "SC-7(3) Access Points","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_3","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "The organization limits the number of external network connections to the information system.","checks_status": {"fail": 3,"pass": 6,"total": 16,"manual": 0}},"sc_8_1": {"name": "SC-8(1) Cryptographic Or Alternate Physical Protection","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_8_1","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Transmission Confidentiality And Integrity (SC-8)"}],"description": "The information system implements cryptographic mechanisms to [Selection (one or more): prevent unauthorized disclosure of information; detect changes to information] during transmission unless otherwise protected by [Assignment: organization-defined alternative physical safeguards].","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"si_2_2": {"name": "SI-2(2) Automates Flaw Remediation Status","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_2_2","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Flaw Remediation (SI-2)"}],"description": "The organization employs automated mechanisms to determine the state of information system components with regard to flaw remediation.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"si_4_1": {"name": "SI-4(1) System-Wide Intrusion Detection System","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_1","Section": "System and Information Integrity (SI)","Service": "guardduty","SubGroup": null,"SubSection": "Information System Monitoring (SI-4)"}],"description": "The organization connects and configures individual intrusion detection tools into an information system-wide intrusion detection system.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_2": {"name": "SI-4(2) Automated Tools For Real-Time Analysis","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_imdsv2_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_4_2","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Information System Monitoring (SI-4)"}],"description": "The organization employs automated tools to support near real-time analysis of events.","checks_status": {"fail": 1,"pass": 3,"total": 8,"manual": 0}},"si_4_4": {"name": "SI-4(4) Inbound and Outbound Communications Traffic","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_4_4","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Information System Monitoring (SI-4)"}],"description": "The information system monitors inbound and outbound communications traffic continuously for unusual or unauthorized activities or conditions.","checks_status": {"fail": 1,"pass": 2,"total": 7,"manual": 0}},"si_4_5": {"name": "SI-4(5) System-Generated Alerts","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_4_5","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Information System Monitoring (SI-4)"}],"description": "The information system alerts organization-defined personnel or roles when the following indications of compromise or potential compromise occur: [Assignment: organization-defined compromise indicators].","checks_status": {"fail": 1,"pass": 2,"total": 7,"manual": 0}},"si_7_1": {"name": "SI-7(1) Integrity Checks","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_7_1","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Software, Firmware, and Information Integrity (SI-7)"}],"description": "The information system performs an integrity check of security relevant events at least monthly.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"ac_17_1": {"name": "AC-17(1) Automated Monitoring/Control","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_17_1","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system monitors and controls remote access methods.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"ac_17_2": {"name": "AC-17(2) Protection Of Confidentiality/Integrity Using Encryption","checks": {"elb_ssl_listeners": "FAIL","acm_certificates_expiration_check": "PASS","s3_bucket_secure_transport_policy": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_17_2","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system implements cryptographic mechanisms to protect the confidentiality and integrity of remote access sessions.","checks_status": {"fail": 2,"pass": 1,"total": 3,"manual": 0}},"ac_17_3": {"name": "AC-17(3) Managed Access Control Points","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_17_3","Section": "Access Control (AC)","Service": "vpc","SubGroup": null,"SubSection": null}],"description": "The information system routes all remote accesses through organization-defined managed network access control points.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"ac_2_12": {"name": "AC-2(12) Account Monitoring","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_12","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "Monitors and reports atypical usage of information system accounts to organization-defined personnel or roles.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"ac_6_10": {"name": "AC-6(10) Prohibit Non-Privileged Users From Executing Privileged Functions","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_6_10","Section": "Access Control (AC)","Service": "iam","SubGroup": null,"SubSection": "Least Privilege (AC-6)"}],"description": "The information system prevents non-privileged users from executing privileged functions to include disabling, circumventing, or altering implemented security safeguards/countermeasures.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_2_11": {"name": "IA-2(11) Remote Access - Separate Device","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_2_11","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identification and Authentication (Organizational users) (IA-2)"}],"description": "The information system implements multifactor authentication for remote access to privileged and non-privileged accounts such that one of the factors is provided by a device separate from the system gaining access and the device meets [Assignment: organization-defined strength of mechanism requirements].","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"si_4_16": {"name": "SI-4(16) Correlate Monitoring Information","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_16","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Information System Monitoring (SI-4)"}],"description": "The organization correlates information from monitoring tools employed throughout the information system.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}}},"requirements_passed": 18,"requirements_failed": 41,"requirements_manual": 5,"total_requirements": 64,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "1c8c75df-34ec-48f2-b6e2-5dba27d9b734","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "kisa_isms_p_2023_aws","framework": "KISA-ISMS-P","version": "2023","description": "The ISMS-P certification, established by KISA (Korea Internet & Security Agency), is a system where an independent certification body evaluates whether a company or organization's information security and privacy protection measures comply with certification standards, and grants certification. This helps organizations improve public trust in their services and respond effectively to increasingly complex cyber threats. The ISMS-P framework also provides comprehensive guidelines for systematically establishing, implementing, and managing information security and privacy protection.","region": "eu-west-1","requirements": {"1.1.1": {"name": "Executive Participation","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.1.1 Executive Participation","Subdomain": "1.1. Management System","AuditEvidence": ["Information protection and personal information protection reporting system (e.g., communication plan)","Minutes of the Information Protection and Personal Information Protection Committee","Information protection and personal information protection policies/guidelines (including executive approval records)","Information protection plans and internal management plans (including executive approval records)","Information protection and personal information protection organization chart"],"AuditChecklist": ["Is there documentation outlining the responsibilities and roles of executives to ensure their participation in the establishment and operation of the information protection and personal information protection management system?","Is there a reporting, review, and approval process in place to ensure that executives actively participate in decision-making regarding information protection and personal information protection activities?"],"NonComplianceCases": ["Case 1: Although it is stated in the information protection and personal information protection policy to report the status of information protection and personal information protection to the executives on a quarterly basis, no such reports have been made for an extended period.","Case 2: In performing major information protection activities (e.g., risk assessment, determining risk acceptance levels, reviewing information protection measures and implementation plans, reviewing the results of information protection measures, security audits, etc.), executives or those authorized by the executives did not participate in decision-making or there was no evidence of their involvement."],"RelatedRegulations": []}],"description": "The CEO must establish and operate a reporting and decision-making system to ensure executive participation in the establishment and operation of the information protection and personal information protection management system.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.1.2": {"name": "Designation of Chief Officers","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.1.2 Designation of Chief Officers","Subdomain": "1.1. Management System","AuditEvidence": ["Documents related to the appointment of the CISO and CPO (e.g., personnel orders, personnel records)","Information protection and personal information protection organization chart","Information protection and personal information protection policies/guidelines","Job descriptions (roles and responsibilities of the CISO and CPO)","Records of CISO reports","Internal management plans (regarding the appointment of the CPO)"],"AuditChecklist": ["Has the CEO officially designated a chief officer responsible for overseeing information protection and personal information protection?","Are the CISO and CPO appointed at an executive level with authority to allocate resources such as budget and personnel, and do they meet the qualifications required by relevant laws?"],"NonComplianceCases": ["Case 1: Failure to appoint and report a CISO as required under the Information and Communications Network Act, even though the organization is obligated to do so.","Case 2: Appointing a person without substantial authority and status as the CPO, making it difficult to believe that they are responsible for overseeing personal information processing.","Case 3: Although the organization chart specifies the CISO and CPO, the formal appointment process, such as issuing personnel orders, was not followed.","Case 4: Although the entity is subject to ISMS certification and had over 5 trillion won in assets at the end of the previous year, the CISO also holds the position of CIO, in violation of the ISMS requirements."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures), Article 31 (Designation of a Personal Information Protection Officer)","Information and Communications Network Act, Article 45-3 (Designation of a Chief Information Security Officer, etc.)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans)"]}],"description": "The CEO must appoint a Chief Information Security Officer (CISO) responsible for information protection and a Chief Privacy Officer (CPO) responsible for personal information protection, both at an executive level with authority to allocate resources such as budget and personnel.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.1.3": {"name": "Organization Structure","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.1.3 Organization Structure","Subdomain": "1.1. Management System","AuditEvidence": ["Information protection and personal information protection committee regulations/minutes","Information protection and personal information protection working group regulations/minutes","Information protection and personal information protection organization chart","Internal management plan","Job descriptions"],"AuditChecklist": ["Has the organization established and operated a working group with expertise to support the work of the CISO and CPO and systematically implement the organization's information protection and personal information protection activities?","Has the organization established and operated a committee that can review, approve, and make decisions on important information protection and personal information protection matters across the organization?","Has the organization established and operated a working group composed of information protection and personal information protection officers and department-level personnel for enterprise-wide information protection and personal information protection activities?"],"NonComplianceCases": ["Case 1: The Information Protection and Personal Information Protection Committee was established, but it consists only of department heads without the inclusion of executives, making it difficult to make decisions on the organization's key information and personal information protection matters.","Case 2: Although a working group for information protection and personal information protection was established, including heads of departments that handle important information and personal data, it has not been active for an extended period.","Case 3: Although the Information Protection and Personal Information Protection Committee was convened, major matters such as the annual information protection and personal information protection plan, training plan, budget, and personnel were not reviewed or decided upon.","Case 4: Although an Information Protection Committee was established for deliberation and decision-making on information protection and personal information protection matters, only the operations and IT security departments participated, without the involvement of departments responsible for personal information protection, leaving personal information protection matters undecided."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans)"]}],"description": "The CEO must establish and operate a working group to effectively implement information protection and personal information protection, a committee that can review and approve key matters related to information protection and personal information protection across the organization, and a consultative body consisting of department-level information protection and personal information protection officers for enterprise-wide protection activities.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.1.4": {"name": "Scope Setting","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.1.4 Scope Setting","Subdomain": "1.1. Management System","AuditEvidence": ["Scope definition document for information protection and personal information protection management system","List of information assets and personal information","Document list","Service flowchart","Personal information flowchart","Organization-wide organizational chart","System and network configuration diagram"],"AuditChecklist": ["Has the organization set the scope of the management system to include key assets that may affect core services and personal information processing?","If there are exceptions within the defined scope, are clear reasons documented, and are consultations with relevant stakeholders and approvals from responsible parties recorded and managed?","Is the organization maintaining documentation that includes the major services, operational status, and systems, allowing for clear verification of the scope of the information protection and personal information protection management system?"],"NonComplianceCases": ["Case 1: The development and test systems, external staff, PCs, and test devices related to the development work for information systems and personal information processing systems were omitted from the management system's scope.","Case 2: Key organizations (personnel) in departments and business units that play critical roles in decision-making for services or businesses within the scope of the information protection and personal information protection management system were not included in the certification scope.","Case 3: The development and test systems, developer PCs, test devices, and development organizations related to the development work for information systems and personal information processing systems were omitted from the management system's scope."],"RelatedRegulations": []}],"description": "The organization must set the scope of the management system by considering its core services and the current state of personal information processing, and document the related services, personal information processing tasks, organizations, assets, and physical locations.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.1.5": {"name": "Policy Establishment","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.1.5 Policy Establishment","Subdomain": "1.1. Management System","AuditEvidence": ["Information protection and personal information protection policies/guidelines/procedures (including records of new/revised versions)","Meeting minutes of stakeholder reviews of newly established/revised information protection and personal information protection policies/guidelines/procedures","Internal management plans for personal information","Notifications of new/revised information protection and personal information protection policies/guidelines (via groupware, intranet, etc.)","Minutes of the Information Protection and Personal Information Protection Committee"],"AuditChecklist": ["Has the organization established a top-level information protection and personal information protection policy that serves as the foundation for all information protection and personal information protection activities?","Has the organization established detailed guidelines, procedures, and manuals specifying the methods, processes, and frequencies required to implement the information protection and personal information protection policies?","Are the information protection and personal information protection policies and implementation documents approved by the CEO or by someone delegated by the CEO when newly established or revised?","Are the latest versions of the information protection and personal information protection policies and implementation documents provided to relevant employees in an easily understandable format?"],"NonComplianceCases": ["Case 1: Although internal regulations stipulate that revisions to the information protection and personal information protection policies must be approved by the Information Protection and Personal Information Protection Committee, recent revisions were made solely based on the approval of the CISO and CPO without presenting the revisions to the committee.","Case 2: The information protection and personal information protection policies and guidelines were recently revised, but these changes were not communicated to relevant departments and employees, leading some departments to continue operating based on outdated guidelines.","Case 3: The information protection and personal information protection policies and guidelines are managed solely by the security department and are not made available for employees to access through bulletin boards or documents."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans)"]}],"description": "The organization must establish and document information protection and personal information protection policies and implementation documents, clearly stating the organization's information protection and personal information protection guidelines and direction. These policies and implementation documents must be approved by the executive management and communicated in an understandable form to employees and relevant parties.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.1.6": {"name": "Resource Allocation","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.1.6 Resource Allocation","Subdomain": "1.1. Management System","AuditEvidence": ["Annual action plan for information protection and personal information protection activities (including budget and personnel plans)","Reports on the results of information protection and personal information protection activities","Records of investments in information protection and personal information protection","Information protection and personal information protection organization chart"],"AuditChecklist": ["Has the organization secured personnel with expertise in the fields of information protection and personal information protection?","Has the organization evaluated and allocated the necessary resources, including budget and personnel, for the effective implementation and continuous operation of the information protection and personal information protection management system?","Has the organization established and implemented an annual detailed action plan for information protection and personal information protection, and conducted audits, analyses, and evaluations of the results?"],"NonComplianceCases": ["Case 1: The organization assembled an information protection and personal information protection team, but the team consisted only of personnel without expertise in information protection or IT, resulting in inadequate security staffing.","Case 2: The CEO failed to allocate sufficient resources, such as budget and security solutions, for implementing the technical and managerial safeguards required for personal information processing systems.","Case 3: After obtaining certification, the organization significantly reduced personnel and budget support, reassigned existing staff to other departments, and repurposed part of the budget for other uses."],"RelatedRegulations": []}],"description": "The CEO must allocate the necessary resources, including budget and personnel with expertise in the fields of information protection and personal information protection, for the effective implementation and continuous operation of the management system.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.2.1": {"name": "Identification of Information Assets","checks": {"macie_is_enabled": "PASS","resourceexplorer2_indexes_found": "PASS","config_recorder_all_regions_enabled": null,"organizations_account_part_of_organizations": null,"organizations_tags_policies_enabled_and_attached": null},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.2.1 Identification of Information Assets","Subdomain": "1.2. Risk Management","AuditEvidence": ["Information asset and personal information asset classification criteria","Information asset and personal information asset list (from asset management system screen)","Information asset and personal information security levels","Asset audit details","Risk analysis report (including asset identification)"],"AuditChecklist": ["Has the organization established classification criteria for information assets and identified all assets within the scope of the information protection and personal information protection management system, maintaining them in a list?","For the identified information assets, does the organization determine their importance by considering legal requirements and their impact on operations, and assign security levels?","Does the organization regularly review the status of information assets to keep the list up-to-date?"],"NonComplianceCases": ["Case 1: The list of assets within the scope of the information protection and personal information protection management system omits internal information leakage control systems, such as print security, document encryption, and USB media control, which are used to manage PCs handling important information and personal information.","Case 2: Personal information provided by third parties within the scope of the information protection and personal information protection management system has not been identified as an asset.","Case 3: The asset classification criteria in the internal guidelines and the classification criteria in the asset management register are inconsistent.","Case 4: Although on-premises assets have been identified, assets related to externally entrusted IT services (web hosting, server hosting, cloud, etc.) have been omitted (only for assets within the certification scope).","Case 5: The backup server storing unique identification information and other personal data has been classified with a low confidentiality rating, raising concerns about the reasonableness and reliability of the importance assessment."],"RelatedRegulations": []}],"description": "Organizations must establish classification criteria for information assets according to the characteristics of their operations, identify and classify all information assets within the scope of the management system, assess their importance, and maintain an up-to-date list.","checks_status": {"fail": 0,"pass": 2,"total": 5,"manual": 0}},"1.2.2": {"name": "Status and Flow Analysis","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.2.2 Status and Flow Analysis","Subdomain": "1.2. Risk Management","AuditEvidence": ["Information service status table","Information service workflow charts and process maps","Personal information processing status table (for ISMS-P certification)","Personal information flowcharts (for ISMS-P certification)"],"AuditChecklist": ["Has the organization identified and documented the status and workflows of information services across all areas of the management system?","Has the organization identified and documented the status of personal information processing within the scope of the management system, and mapped out personal information flows in flowcharts?","Does the organization regularly review procedures and workflows in response to changes in services, operations, and information assets, and keep the flowcharts and related documents up-to-date?"],"NonComplianceCases": ["Case 1: There are no documents outlining the workflows and procedures for major services within the scope of the management system.","Case 2: The personal information flowchart contains significant discrepancies from the actual personal information flow, or important personal information flows are missing.","Case 3: After the initial creation of the personal information flowchart, it has not been updated to reflect changes in the personal information flow."],"RelatedRegulations": []}],"description": "Organizations must analyze the status of information services and personal information processing across all areas of the management system, document the procedures and workflows, and review them regularly to maintain their accuracy.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.2.3": {"name": "Risk Assessment","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.2.3 Risk Assessment","Subdomain": "1.2. Risk Management","AuditEvidence": ["Risk management guidelines","Risk management manuals/guides","Risk management plan","Risk assessment report","Minutes of the Information Protection and Personal Information Protection Committee","Minutes of the Information Protection and Personal Information Protection Working Group","Information asset and personal information asset list","Information service and personal information flowcharts"],"AuditChecklist": ["Has the organization defined methods for identifying and assessing risks that could arise from various aspects, depending on the characteristics of the organization or service?","Does the organization annually develop a risk management plan that specifies the personnel, timeline, targets, methods, and budget for risk management activities?","Does the organization conduct regular or ad-hoc risk assessments at least once a year according to the risk management plan?","Has the organization established an acceptable target risk level and identified risks that exceed that level?","Are the results of risk identification and assessment reported to the executives?"],"NonComplianceCases": ["Case 1: The risk management plan specifies the risk assessment period and the targets and methods for risk management, but lacks details on the personnel and budget required for execution.","Case 2: While a risk assessment was conducted in the previous year, no risk assessment was conducted this year due to a lack of changes in assets.","Case 3: The organization conducted risk identification and assessment according to the risk management plan, but failed to assess the risks of important information assets within the scope, or failed to assess compliance with legal requirements related to information protection.","Case 4: The organization identified and assessed risks and set an acceptable target risk level according to the risk management plan, but did not report and seek approval from the executives (e.g., the Chief Information Security Officer).","Case 5: The method defined in the internal guidelines for risk assessment differs from the method actually used.","Case 6: The organization failed to identify and assess risks in the administrative and physical areas related to the information protection management system, and used only the results of technical vulnerability assessments as the risk assessment outcome.","Case 7: The organization set the acceptable target risk level (DoA) unreasonably high, designating risks that required action as acceptable risks, even though these risks were significant and required immediate or short-term action."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans)"]}],"description": "Organizations must collect threat information by analyzing internal and external environments, select a risk assessment method suitable for the organization, conduct a risk assessment at least once a year across all areas of the management system, and manage acceptable risks with the approval of the executives.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.2.4": {"name": "Selection of Protective Measures","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.2.4 Selection of Protective Measures","Subdomain": "1.2. Risk Management","AuditEvidence": ["Information protection and personal information protection implementation plans/risk management plans","Information protection and personal information protection measures","Information protection and personal information protection master plan","Records of management reports and approvals for the information protection and personal information protection implementation plan"],"AuditChecklist": ["Has the organization developed risk treatment strategies (e.g., risk reduction, avoidance, transfer, acceptance) and selected protective measures to address the identified risks?","Has the organization established and reported to management an implementation plan that includes priority, schedule, responsible department/personnel, and budget for the protective measures?"],"NonComplianceCases": ["Case 1: Although an implementation plan for the information protection and personal information protection measures was established, it was not reported to the CISO and CPO.","Case 2: Some risk mitigation actions that were required were missing from the implementation plan.","Case 3: Mandatory legal requirements and risks with high security vulnerabilities were accepted without additional protective measures, instead of being addressed by a risk treatment plan.","Case 4: The rationale and validity for risk acceptance were insufficient, and some risks that could have been addressed immediately or in the short term due to urgency or ease of implementation were classified under long-term plans without specific justification."],"RelatedRegulations": []}],"description": "Based on the results of the risk assessment, appropriate protective measures must be selected to address the identified risks, and an implementation plan including the priority, schedule, responsible department/personnel, and budget for the protective measures must be established and approved by management.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.3.1": {"name": "Implementation of Protective Measures","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.3.1 Implementation of Protective Measures","Subdomain": "1.3. Operation of the Management System","AuditEvidence": ["Information protection and personal information protection implementation plans/risk management plans","Information protection and personal information protection measures","Information protection and personal information protection implementation progress reports (including reports to management)","Information protection and personal information protection implementation completion reports (including reports to management)","Information protection and personal information protection operating statements"],"AuditChecklist": ["Are the protective measures effectively implemented according to the implementation plan, and are the implementation results reported to management to verify their accuracy and effectiveness?","Has the organization created and documented detailed operating statements recording the implementation and operation status of protective measures according to the certification standards of the management system?"],"NonComplianceCases": ["Case 1: The results of the completion of the information protection and personal information protection measures were not reported to the CISO and CPO.","Case 2: The risk action implementation result report indicated 'completed,' but related risks still existed, or the accuracy and effectiveness of the implementation results were not verified.","Case 3: Risks classified as medium- to long-term in the previous year's information protection measures implementation plan were not implemented in the current year, or the results were not reviewed and verified by management.","Case 4: The actual operating status described in the operating statements did not match reality, and related documents, approvals, and meeting minutes mentioned in the operating statements did not exist.","Case 5: Although the implementation results were reported to the CISO and CPO, some incomplete items were not followed up with reasons and corrective actions."],"RelatedRegulations": []}],"description": "The selected protective measures must be effectively implemented according to the implementation plan, and management must verify the accuracy and effectiveness of the implementation results.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.3.2": {"name": "Sharing of Protective Measures","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.3.2 Sharing of Protective Measures","Subdomain": "1.3. Operation of the Management System","AuditEvidence": ["List of operating or implementing departments for each protective measure","Evidence of internal sharing of information protection and personal information protection plans (e.g., notices, training materials, shared documents)"],"AuditChecklist": ["Has the organization clearly identified the departments and personnel responsible for the operation or implementation of the protective measures?","Has the organization shared or provided training to the departments and personnel responsible for the operation or implementation of the protective measures?"],"NonComplianceCases": ["Case 1: Although protective measures were developed and implemented, the relevant information was not sufficiently shared or provided through training, so the departments or personnel responsible for the actual operation or implementation were unaware of the details."],"RelatedRegulations": []}],"description": "The departments and personnel responsible for the actual operation or implementation of the protective measures must be identified, and the related information must be shared and provided through training to ensure continuous operation.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.3.3": {"name": "Operation Status Management","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.3.3 Operation Status Management","Subdomain": "1.3. Operation of the Management System","AuditEvidence": ["Annual plan for information protection and personal information protection","Operation status report for information protection and personal information protection","Results of inspections on the implementation of information protection and personal information protection activities"],"AuditChecklist": ["Are information protection and personal information protection activities that need to be performed periodically or continuously for the operation of the management system documented and managed?","Does management periodically review the effectiveness of the operation of the management system and manage it accordingly?"],"NonComplianceCases": ["Case 1: Failure to document activities that are required to be performed periodically or continuously as part of the operation of the information protection and personal information protection management system.","Case 2: Although documentation of the operational status of the information protection and personal information protection management system has been completed, periodic reviews have not been conducted, resulting in the omission of some required monthly and quarterly activities, and some activities have not been verified for implementation."],"RelatedRegulations": ["Personal Information Protection Act, Article 31 (Designation of a Personal Information Protection Officer)","Information and Communications Network Act, Article 45-3 (Designation of a Chief Information Security Officer, etc.)"]}],"description": "According to the management system established by the organization, operational activities that must be performed continuously or periodically must be recorded and managed in a way that allows identification and tracking, and management must regularly review the effectiveness of operational activities and manage them accordingly.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.4.1": {"name": "Review of Legal Requirements Compliance","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.4.1 Review of Legal Requirements Compliance","Subdomain": "1.4. Inspection and Improvement of the Management System","AuditEvidence": ["Records of legal compliance reviews","Records of reviews and revisions of information protection and personal information protection policies and guidelines","Comparison tables of revised policies and guidelines","Internal sharing documents of legal revisions","Proof of personal information liability insurance or equivalent guarantees (e.g., cyber insurance contracts)","Information protection disclosure records"],"AuditChecklist": ["Is the organization regularly identifying and maintaining up-to-date legal requirements related to information protection and personal information protection?","Is the organization conducting regular reviews of compliance with legal requirements at least once a year?"],"NonComplianceCases": ["Case 1: Although the Information and Communications Network Act and Personal Information Protection Act were recently revised, the organization did not review the impact of the changes on the organization, and as a result, the policy documents, implementation documents, and legal compliance checklists were not updated, leading to inconsistencies between the documents and the law.","Case 2: Although legal requirements that the organization must comply with were amended, the organization failed to conduct legal compliance reviews for an extended period.","Case 3: Inadequate legal compliance reviews resulted in numerous violations of the Personal Information Protection Act and other regulations.","Case 4: The organization was subject to the Personal Information Liability Compensation Guarantee system under the Personal Information Protection Act but failed to recognize this, resulting in non-compliance with insurance or reserve requirements. In cases where insurance was obtained, the organization failed to meet the minimum coverage requirements based on the number of users and revenue.","Case 5: Although the organization was required by law to disclose information protection status, it failed to do so within the legally mandated timeframe.","Case 6: The organization used a mobile app to receive personal location information from a location-based service provider, but failed to report its location-based service business.","Case 7: A foreign personal information controller without a domestic address or business office, whose personal information of domestic subjects stored and managed in the previous three months averaged over one million persons per day, failed to appoint a domestic representative in writing as required."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans)"]}],"description": "The organization must regularly identify and reflect legal requirements related to information protection and personal information protection and continuously review whether compliance is being maintained.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.4.2": {"name": "Management System Audit","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.4.2 Management System Audit","Subdomain": "1.4. Inspection and Improvement of the Management System","AuditEvidence": ["Management system audit plan (internal audit plan, internal inspection plan)","Management system audit report","Minutes of the Information Protection and Personal Information Protection Committee"],"AuditChecklist": ["Has the organization established a management system audit plan that includes the criteria, scope, frequency, and qualifications for audit personnel to audit the management system's effectiveness in accordance with legal requirements and established policies?","Has the organization conducted audits at least once a year with personnel who have independence, objectivity, and expertise, and reported any identified issues to management?"],"NonComplianceCases": ["Case 1: The audit team included personnel from the IT department, which was also the subject of the audit, compromising the independence of the audit.","Case 2: Although a management system audit was conducted this year, the audit scope was limited to certain areas, failing to cover the full scope of the information protection and personal information protection management system.","Case 3: The management system audit team was composed solely of internal staff and external consultants who participated in the development of the management system, compromising the independence of the audit."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans)"]}],"description": "The organization must audit its management system at least once a year with a team of personnel who possess independence and expertise, to ensure the system is operating effectively in accordance with internal policies and legal requirements, and report any identified issues to management.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.4.3": {"name": "Management System Improvement","checks": {},"status": "PASS","attributes": [{"Domain": "1. Establishment and Operation of the Management System","Section": "1.4.3 Management System Improvement","Subdomain": "1.4. Management System Inspection and Improvement","AuditEvidence": ["Management system inspection result reports","Management system inspection action plans and implementation result reports","Preventive measures","Effectiveness measurement indicators and results (including reports to management)"],"AuditChecklist": ["Are the root causes of the issues identified during legal compliance reviews and management system inspections analyzed, and are preventive and improvement measures established and implemented?","Are there criteria and procedures in place to verify the accuracy and effectiveness of preventive and improvement results?"],"NonComplianceCases": ["Case 1: The same issues in the operation of the information protection and personal information protection management system, identified during internal inspections, are repeated each time.","Case 2: Although internal regulations require the analysis of root causes and the establishment of preventive measures for issues identified during internal inspections, recent internal inspections failed to include root cause analysis and preventive measures.","Case 3: Preventive measures for the issues in the management system were established, and key performance indicators (KPIs) were developed for periodic measurement, but the results were not reported to management for a long period.","Case 4: Action plans were not established or the completion of actions was not confirmed for issues identified during management system inspections."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans)"]}],"description": "The root causes of the issues identified during legal compliance reviews and management system inspections must be analyzed, and preventive measures must be established and implemented. The management must confirm the accuracy and effectiveness of the improvement results.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.1.1": {"name": "Policy Maintenance","checks": {},"status": "PASS","attributes": [{"Domain": "2. Control Measures Requirements","Section": "2.1.1 Policy Maintenance","Subdomain": "2.1. Policies, Organization, and Asset Management","AuditEvidence": ["Information protection and personal information protection policies and implementation documents (e.g., guidelines, procedures, manuals)","Results of regular and ad hoc validity reviews of policies and guidelines","Meeting minutes and circulation records with relevant departments regarding policies and guidelines","Revision history of policies and guidelines"],"AuditChecklist": ["Has the organization established and implemented a procedure for regularly reviewing the validity of information protection and personal information protection policies and implementation documents?","When there are significant changes in the internal and external environment, are the impacts on information protection and personal information protection policies and implementation documents reviewed and revised as necessary?","Are stakeholders consulted when revising information protection and personal information protection policies and implementation documents?","Is there a system in place to track the revision history of information protection and personal information protection policies and implementation documents?"],"NonComplianceCases": ["Case 1: There is inconsistency between password setting rules in guidelines and procedures.","Case 2: Information protection activities (e.g., training, encryption, backup) have different targets, frequencies, levels, and methods described in internal regulations, guidelines, and procedures, leading to inconsistency.","Case 3: A new database access control solution was introduced to effectively record and manage access and operation logs for the database, but internal security guidelines such as those for security systems and database security management have not been updated to reflect these new controls.","Case 4: Although the personal information protection policy was revised, the policy implementation date was not specified, and information such as the author, creation date, and approval date were missing from the relevant policy.","Case 5: Although significant changes occurred in laws and regulations related to personal information protection, these changes were not reviewed or reflected in the personal information protection policy and implementation documents."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans)"]}],"description": "Information protection and personal information protection policies and implementation documents must be periodically reviewed and, if necessary, revised in response to changes in laws and regulations, policies of higher organizations and related agencies, and changes in the internal and external environment. These changes must be documented and tracked.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.1.2": {"name": "Organization Maintenance","checks": {},"status": "PASS","attributes": [{"Domain": "2. Control Measures Requirements","Section": "2.1.2 Organization Maintenance","Subdomain": "2.1. Policies, Organization, and Asset Management","AuditEvidence": ["Information protection and personal information protection organization chart","Job descriptions for the information protection and personal information protection organization","Assignment tables for information protection and personal information protection roles","Information protection and personal information protection policies/guidelines and internal management plans","Information protection and personal information protection communication management plans","Records of communication activities (e.g., monthly/weekly reports, internal notices)","Communication channels (e.g., information protection portal, bulletin boards)"],"AuditChecklist": ["Are the roles and responsibilities of those responsible for and involved in information protection and personal information protection clearly defined?","Has the organization established a system for evaluating the activities of those responsible for and involved in information protection and personal information protection?","Has the organization established and implemented systems and procedures for communication between the information protection and personal information protection organization and its members?"],"NonComplianceCases": ["Case 1: Although the roles and responsibilities of the CISO, CPO, and related personnel are defined in internal guidelines and job descriptions, they do not align with the actual operating status.","Case 2: There are no goals, criteria, or performance indicators in place for the periodic evaluation of the activities of the CISO and related personnel.","Case 3: Although internal guidelines require departments to set KPIs related to information protection for the information protection officers in each department to be reflected in performance evaluations, no information protection-related KPIs were set for any of the departmental information protection officers.","Case 4: Although the CISO and CPO are designated, the roles and responsibilities required by law are not specifically defined in internal guidelines or job descriptions."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures), Article 31 (Designation of a Personal Information Protection Officer)","Information and Communications Network Act, Article 45-3 (Designation of a Chief Information Security Officer, etc.)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans)"]}],"description": "Roles and responsibilities related to information protection and personal information protection must be assigned to all members of the organization, and systems must be established for evaluating these activities and for communication between members and departments.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.1.3": {"name": "Management of Information Assets","checks": {"macie_is_enabled": "PASS","resourceexplorer2_indexes_found": "PASS","config_recorder_all_regions_enabled": null,"account_maintain_current_contact_details": null,"organizations_account_part_of_organizations": null,"organizations_tags_policies_enabled_and_attached": null,"account_security_contact_information_is_registered": null,"account_security_questions_are_registered_in_the_aws_account": null,"account_maintain_different_contact_details_to_security_billing_and_operations": null},"status": "PASS","attributes": [{"Domain": "2. Protection Requirements","Section": "2.1.3 Management of Information Assets","Subdomain": "2.1. Policy, Organization, Asset Management","AuditEvidence": ["List of information assets (designation of responsible persons and managers)","Handling procedures for information assets (documents, information systems, etc.)","Information asset management system screen","Security classification indicators for information assets"],"AuditChecklist": ["Are handling procedures (creation, introduction, storage, use, disposal) and protection measures defined and implemented according to the security classification of information assets?","Have responsible persons and managers been designated for identified information assets?"],"NonComplianceCases": ["Case 1: Although internal guidelines require security classification to be indicated on documents, this has not been followed.","Case 2: Responsible persons and managers for information assets have not been identified, or the asset list has not been updated, leading to changes in responsible personnel due to resignations, transfers, etc., not being reflected.","Case 3: Although security classifications were assigned to identified information assets after evaluating their importance, handling procedures based on the security classification were not defined."],"RelatedRegulations": []}],"description": "The procedures and protection measures for handling information assets according to their purpose and importance must be established and implemented, and the responsibilities for each asset must be clearly defined and managed.","checks_status": {"fail": 0,"pass": 2,"total": 9,"manual": 0}},"2.2.1": {"name": "Designation and Management of Key Personnel","checks": {"iam_support_role_created": null,"organizations_delegated_administrators": null,"account_security_contact_information_is_registered": null},"status": "PASS","attributes": [{"Domain": "2. Protection Requirements","Section": "2.2.1 Designation and Management of Key Personnel","Subdomain": "2.2. Personnel Security","AuditEvidence": ["Criteria for key duties","List of key personnel","List of personal information handlers","Account and authority management ledger for key information systems and personal information processing systems","Management status of key personnel (e.g., training results, security pledges)"],"AuditChecklist": ["Are the criteria for key duties, such as handling personal information and important information or accessing key systems, clearly defined?","Are employees and external personnel performing key duties designated as key personnel, and is the list kept up-to-date?","Are personnel handling personal information designated as personal information handlers, and is the list kept up-to-date?","Is the designation of key personnel and personal information handlers minimized based on business needs, and are management plans established and implemented?"],"NonComplianceCases": ["Case 1: Although a list of key personnel (e.g., personal information handlers, secret information managers) has been created, some employees who handle large volumes of personal information (e.g., DBAs, DLP managers) were omitted.","Case 2: Although the list of key personnel and personal information handlers is being managed, it has not been updated, including resigned employees and newly hired personnel.","Case 3: Personal information handler privileges were granted collectively to entire departments, leading to personnel without the need to handle personal information being excessively designated as personal information handlers.","Case 4: Although internal guidelines require approval from the security team and the signing of security pledges when granting key personnel privileges, many key personnel were registered without following this process."],"RelatedRegulations": ["Personal Information Protection Act, Article 28 (Supervision of Personal Information Handlers), Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans)"]}],"description": "Criteria and management plans for key duties, such as handling personal information and important information or accessing key systems, must be established, and the number of key personnel must be minimized and their list kept up-to-date.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"2.2.2": {"name": "Separation of Duties","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Requirements","Section": "2.2.2 Separation of Duties","Subdomain": "2.2. Personnel Security","AuditEvidence": ["Guidelines on the separation of duties (e.g., personnel security guidelines)","Job descriptions (e.g., system operation/management, development/operation)","Status of supplementary controls when duties are not separated"],"AuditChecklist": ["Are criteria for the separation of duties established and applied to prevent potential harm from the misuse or abuse of authority?","If separation of duties is difficult, have supplementary controls such as mutual review between personnel, regular monitoring and approval of changes by senior management, and ensuring accountability been established?"],"NonComplianceCases": ["Case 1: Although the organization has sufficient size and personnel to enable separation of duties, the established internal separation of duties criteria were not followed due to operational convenience.","Case 2: Although the organization received approval from senior management to combine development and operation duties due to the organization's characteristics, supplementary control measures such as mutual review between personnel, regular monitoring and review of changes by senior management, and ensuring accountability were not established."],"RelatedRegulations": []}],"description": "Criteria for the separation of duties must be established and applied to prevent potential harm from the misuse or abuse of authority. If separation of duties is unavoidable, supplementary measures must be established and implemented.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.2.3": {"name": "Security Pledge","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.2.3 Security Pledge","Subdomain": "2.2. Human Security","AuditEvidence": ["Security and personal information protection pledge (for employees and external personnel)","Confidentiality agreement (for resigned employees)"],"AuditChecklist": ["When hiring new personnel, is there a signed security and personal information protection agreement that specifies their responsibilities?","If temporary or external personnel are granted access to information assets, is there a signed agreement outlining their responsibilities for information protection and confidentiality?","Upon the resignation of an employee, is a separate confidentiality agreement obtained?","Are security, personal information protection, and confidentiality agreements stored safely and managed in a way that they can be easily retrieved when necessary?"],"NonComplianceCases": ["Case 1: While it is stipulated that new hires must sign a security pledge, some recently hired employees have not completed the pledge.","Case 2: Although employees sign a security pledge, external personnel with direct access to information systems have not signed such an agreement.","Case 3: Submitted security and personal information protection pledges are poorly managed, with documents left accessible on desks where unauthorized personnel can access them.","Case 4: Although personal information handlers have signed security pledges, the content only covers confidentiality and does not include specific responsibilities related to personal information protection."],"RelatedRegulations": []}],"description": "Employees, temporary staff, or external personnel handling information assets or granted access must sign a security and confidentiality agreement in accordance with internal policies.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.2.4": {"name": "Awareness and Training","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.2.4 Awareness and Training","Subdomain": "2.2. Human Security","AuditEvidence": ["Information protection and personal information protection training plan","Training result report","General and job-specific training materials","List of training attendees"],"AuditChecklist": ["Is an annual training plan approved by management, detailing the timing, duration, target audience, content, and method of information protection and personal information protection training?","Are all employees and external personnel within the scope of the management system provided with regular training at least once per year, and are additional training sessions provided when there are significant changes in relevant laws and regulations?","Is information protection and personal information protection training provided to new hires and external personnel before they begin their duties?","Are IT, information protection, and personal information protection staff receiving specialized training to enhance their job-specific expertise?","Are training records maintained, and is the effectiveness of the training evaluated and reflected in future training plans?"],"NonComplianceCases": ["Case 1: Although an annual information protection and personal information protection training plan was established and implemented last year, no such plan was established for the current year without a valid reason.","Case 2: The annual information protection and personal information protection training plan includes the frequency and target audience but lacks details such as schedule, content, and method.","Case 3: Although the annual training plan includes general personal information awareness training for all employees, it does not include job-specific training for those responsible for personal information protection, such as the personal information protection officer.","Case 4: Upon reviewing the training plan and result reports, it was found that certain external contractors (e.g., cleaning staff and security guards who have access to critical facilities within the certification scope) were not included in the training.","Case 5: Although information protection and personal information protection training was conducted, some records (e.g., training materials, attendance lists, evaluation surveys, result reports) were not retained.","Case 6: There is no system in place to identify employees who did not complete the required training or to provide make-up sessions for them (e.g., additional training, online courses)."],"RelatedRegulations": ["Personal Information Protection Act, Article 26 (Limitation on the Processing of Personal Information by Outsourcing), Article 28 (Supervision of Personal Information Handlers), Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans)"]}],"description": "Organizations must establish and operate an annual awareness and training plan to ensure that employees and related external personnel understand the organization's management system and policies and acquire the necessary job-specific expertise. The effectiveness of this plan must be evaluated and reflected in future plans.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.2.5": {"name": "Management of Resignation and Job Changes","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.2.5 Management of Resignation and Job Changes","Subdomain": "2.2. Human Security","AuditEvidence": ["Procedures for resignation and job changes","Asset (account) return management ledger upon resignation","Security checklists and inspection records for resigned employees"],"AuditChecklist": ["Are personnel changes (e.g., resignation, job changes, department transfers, leave of absence) shared among HR, information protection, personal information protection, and IT system operations departments?","Are procedures in place and implemented to promptly return information assets, revoke or adjust access rights, and confirm results when an employee (including temporary staff and external contractors) resigns or changes roles?"],"NonComplianceCases": ["Case 1: Accounts and access rights for personnel no longer handling personal information due to job changes remain active in the personal information processing system.","Case 2: No records of asset returns or access rights revocation procedures were found for recently resigned key personnel and personal information handlers.","Case 3: While asset returns are properly managed for resigned employees, the security check and resignation confirmation forms required by HR regulations are not being completed.","Case 4: Although access rights to personal information processing systems were revoked promptly upon the resignation of personal information handlers, access rights to systems like physical access control and VPN were not revoked in a timely manner."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 5 (Management of Access Rights)"]}],"description": "Procedures must be established and managed for the return of assets, the revocation or adjustment of accounts and access rights, and confirmation of results when there is a resignation, job change, or leave of absence, involving departments such as HR, information protection, personal information protection, and IT.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.2.6": {"name": "Actions in Case of Security Violations","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Requirements","Section": "2.2.6 Actions in Case of Security Violations","Subdomain": "2.2. Human Security","AuditEvidence": ["HR regulations (disciplinary measures for violations of information protection and personal information protection regulations)","Records of disciplinary actions for violations of information protection and personal information protection guidelines","Incident case studies (company-wide notices, training materials)"],"AuditChecklist": ["Has the organization established disciplinary measures for employees and relevant external parties in case of violations of information protection and personal information protection responsibilities and obligations under laws, regulations, and internal policies?","When violations of information protection and personal information protection are detected, are actions taken in accordance with internal procedures?"],"NonComplianceCases": ["Case 1: No disciplinary measures or procedures are included in internal regulations for handling violations of information protection and personal information protection regulations.","Case 2: Although warning messages are sent to those who violate policies detected by security systems (e.g., DLP, database access control system, internal information leakage control system), follow-up actions such as explanations, additional investigations, or disciplinary actions are not carried out in accordance with internal regulations."],"RelatedRegulations": []}],"description": "In the event that employees or relevant external parties violate laws, regulations, or internal policies, the organization must establish and implement procedures to take appropriate actions.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.3.1": {"name": "Management of External Parties","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Requirements","Section": "2.3.1 Management of External Parties","Subdomain": "2.3. External Security","AuditEvidence": ["List of outsourced services and external facilities/services","Outsourcing contracts","Risk analysis reports and protective measures","Outsourcing security management guidelines, checklists, etc."],"AuditChecklist": ["Has the organization identified the status of outsourcing and the use of external facilities and services within the scope of the management system?","Has the organization identified the legal requirements and risks associated with outsourcing and the use of external facilities and services, and established appropriate protective measures?"],"NonComplianceCases": ["Case 1: Although the organization manages a list of outsourced services and external facilities/services as required by internal regulations, the list is outdated and does not reflect changes made to vendors several months ago.","Case 2: The organization has migrated some personal information processing systems to external cloud services within the scope of the management system, but no identification or risk assessment has been performed."],"RelatedRegulations": ["Personal Information Protection Act, Article 26 (Restrictions on the Processing of Personal Information by Outsourcing)","Information and Communications Network Act, Article 50-3 (Entrustment of the Transmission of Commercial Information for Profit)"]}],"description": "When outsourcing part of the work (e.g., handling personal information, information protection, operating or developing information systems) or using external facilities or services (e.g., data centers, cloud services, application services), the organization must identify the current status, understand the legal requirements and risks arising from external organizations or services, and establish appropriate protective measures.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.3.2": {"name": "Security in Contracts with External Parties","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Requirements","Section": "2.3.2 Security in Contracts with External Parties","Subdomain": "2.3. External Security","AuditEvidence": ["Outsourcing contracts","Information protection and personal information protection agreements (agreements, annexes)","Internal guidelines on outsourcing","RFPs (Requests for Proposals), evaluation forms related to the selection of outsourcing vendors"],"AuditChecklist": ["When selecting external services or outsourcing vendors related to the handling of important information and personal information, does the organization follow procedures to consider the vendors' capabilities in information protection and personal information protection?","Has the organization identified the information protection and personal information protection requirements associated with the use of external services and outsourcing, and specified them in contracts or agreements?","When outsourcing the development of information systems and personal information processing systems, has the organization specified the information protection and personal information protection requirements that must be followed during development in the contract?"],"NonComplianceCases": ["Case 1: No outsourcing contract exists for external vendors performing IT operations, development, or personal information processing tasks.","Case 2: The outsourcing contract with an external vendor handling personal information does not include some items required by the Personal Information Protection Act (e.g., management and supervision provisions).","Case 3: Although infrastructure operation and part of personal information processing tasks are outsourced to external vendors, the contract does not specify security requirements related to the nature of the outsourced work, but only includes general provisions on confidentiality and liability for damages."],"RelatedRegulations": ["Personal Information Protection Act, Article 26 (Restrictions on the Processing of Personal Information by Outsourcing)"]}],"description": "When using external services or outsourcing work to external parties, the organization must identify the information protection and personal information protection requirements and specify them in contracts or agreements.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.3.3": {"name": "External Party Security Implementation Management","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.3.3 External Party Security Implementation Management","Subdomain": "2.3. External Party Security","AuditEvidence": ["Security inspection results for external parties and contractors","Training details for external parties and contractors (training outcomes, attendee list, training materials, etc.)","Personal information outsourcing contract","Evidence of consent for re-outsourcing of personal information processing tasks"],"AuditChecklist": ["Are periodic inspections or audits conducted to ensure external parties comply with information protection and personal information protection requirements specified in contracts, agreements, and internal policies?","When issues are identified during inspections or audits of external parties, are improvement plans established and implemented?","If a contractor entrusted with personal information processing re-outsources related tasks to a third party, does the contractor obtain the principal's consent?"],"NonComplianceCases": ["Case 1: Failure to regularly conduct security inspections of external contractors who perform IT development and operations tasks on-site.","Case 2: Sending a notification to contractors entrusted with personal information processing to conduct security training, but failing to verify whether the training has been conducted.","Case 3: Allowing contractors to perform their own security inspections and report the results, without a verification process to ensure the inspections were properly conducted, thus undermining the reliability of the inspection results.","Case 4: Allowing contractors to re-outsource personal information processing tasks to a third party without the principal's consent.","Case 5: Failure to supervise contractors entrusted with transmitting commercial information for profit."],"RelatedRegulations": ["Personal Information Protection Act, Article 26 (Restrictions on the Outsourcing of Personal Information Processing)","Information and Communications Network Act, Article 50-3 (Outsourcing of the Transmission of Commercial Information for Profit)"]}],"description": "Security measures specified in contracts, agreements, and internal policies must be regularly inspected or audited to ensure external parties comply with information protection and personal information protection requirements.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.3.4": {"name": "Security for External Party Contract Changes and Expiry","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.3.4 Security for External Party Contract Changes and Expiry","Subdomain": "2.3. External Party Security","AuditEvidence": ["Information protection and personal information protection agreements","Confidentiality agreements","Information and personal information destruction agreements","Internal policies and guidelines related to the termination of external party contracts"],"AuditChecklist": ["Has the organization established and implemented security measures to ensure the return of information assets, deletion of information system access accounts, and the acquisition of confidentiality agreements in accordance with official procedures when an external party contract expires, a task is completed, or there is a personnel change?","When an external party contract expires, has the organization established and implemented procedures to confirm whether the external party holds any sensitive or personal information related to the outsourced task, and to retrieve or destroy such information?"],"NonComplianceCases": ["Case 1: Failure to delete accounts and permissions for external parties after their contract has expired, allowing access to certain information systems.","Case 2: During an outsourcing project, failure to take appropriate measures for some contractors who were replaced or whose contracts expired, including failing to obtain security agreements as required by internal regulations.","Case 3: After terminating a contract with a contractor entrusted with personal information processing, failure to verify whether the contractor destroyed any personal information they held."],"RelatedRegulations": ["Personal Information Protection Act, Article 26 (Restrictions on the Outsourcing of Personal Information Processing)","Information and Communications Network Act, Article 50-3 (Outsourcing of the Transmission of Commercial Information for Profit)"]}],"description": "When an external party contract expires, the task is completed, or there is a personnel change, security measures such as returning provided information assets, deleting information system access accounts, destroying sensitive information, and obtaining confidentiality agreements for acquired information must be implemented.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.1": {"name": "Designation of Protected Zones","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.4.1 Designation of Protected Zones","Subdomain": "2.4. Physical Security","AuditEvidence": ["Physical security guidelines (criteria for designating protected zones)","List of designated protected zones","Protected zone signage","List of protection measures for each zone"],"AuditChecklist": ["Has the organization established criteria for designating physical protection zones such as controlled areas, restricted areas, and reception areas to protect personal and sensitive information, documents, storage media, key facilities, and systems from physical and environmental threats?","Has the organization designated physical protection zones in accordance with the criteria and established and implemented protection measures for each zone?"],"NonComplianceCases": ["Case 1: Although internal physical security guidelines state that areas where personal information is stored and processed must be designated as controlled zones, certain document storage rooms containing membership application forms were omitted from the list of controlled zones.","Case 2: Internal physical security guidelines require that controlled zones be marked with specific signs, but some controlled zones do not have the required signage."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 10 (Physical Safety Measures)"]}],"description": "To protect personal and sensitive information, documents, storage media, key facilities, and systems from physical and environmental threats, physical protection zones such as controlled areas, restricted areas, and reception areas must be designated, and protection measures for each zone must be established and implemented.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.2": {"name": "Access Control","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protective Measures Requirements","Section": "2.4.2 Access Control","Subdomain": "2.4. Physical Security","AuditEvidence": ["Access logbook and entry logs","Access registration application form and approval records","Entry record review report","Access control system management screen (status of registered personnel, etc.)"],"AuditChecklist": ["Is access to protected areas controlled so that only authorized personnel are allowed to enter according to access procedures?","Are entry records for internal and external personnel for each protected area retained for a certain period, and are entry records and access permissions reviewed periodically?"],"NonComplianceCases": ["Case 1: Although control areas are defined, protective measures are established, and employees with access are managed, the entry records are not reviewed periodically, resulting in many inactive personnel (due to retirement, transfer, etc.) having long periods of no entry.","Case 2: Although access control devices are installed in controlled areas such as data centers and document storage rooms, they are left open for extended periods without valid reasons or approval.","Case 3: Some external partner employees are excessively granted all-area access cards for unrestricted entry."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 10 (Physical Safety Measures)"]}],"description": "Access to protected areas must be restricted to authorized personnel only, and entry and access logs should be reviewed periodically to ensure accountability.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.3": {"name": "Information System Protection","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protective Measures Requirements","Section": "2.4.3 Information System Protection","Subdomain": "2.4. Physical Security","AuditEvidence": ["Data processing facility diagram","Information system layout","Asset list"],"AuditChecklist": ["Are information systems placed in separated locations based on their importance, usage, and characteristics?","Are there measures in place to easily verify the actual physical location of the information systems?","Are power and communication cables protected from physical damage and electrical interference from external sources?"],"NonComplianceCases": ["Case 1: The system layout is not updated to reflect the latest changes, making it difficult to quickly identify the information system that has experienced a failure.","Case 2: Many cables are tangled and not properly organized on the server room floor or in racks, increasing the risk of failure due to electrical interference, damage, leakage, or negligence."],"RelatedRegulations": []}],"description": "Information systems should be arranged considering their importance and characteristics to reduce environmental threats, harmful factors, and unauthorized access, and communication and power cables should be protected from damage.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.4": {"name": "Operation of Protective Facilities","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protective Measures Requirements","Section": "2.4.4 Operation of Protective Facilities","Subdomain": "2.4. Physical Security","AuditEvidence": ["Physical security guidelines (related to protective facilities)","Data center facility status and inspection checklist","IDC outsourcing contract, SLA, etc."],"AuditChecklist": ["Are necessary facilities established and operational procedures set up based on the importance and characteristics of each protected area to prevent disasters such as fire, flood, and power failure caused by human error or natural disasters?","If operating outsourced integrated data centers (IDC), are physical security requirements included in the contract, and is the operational status periodically reviewed?"],"NonComplianceCases": ["Case 1: In some protected areas, such as the main office data center, the required protective facilities specified in internal guidelines are not installed.","Case 2: Although protective facilities such as UPS and fire suppression systems are in place in the data center, operational and inspection standards for the related facilities are not established.","Case 3: Although temperature and humidity control devices were installed in the data center according to operational guidelines, insufficient capacity means that the standard temperature and humidity levels are not maintained, increasing the risk of failure."],"RelatedRegulations": ["Information and Communications Network Act, Article 46 (Protection of Integrated Data Centers)","Guidelines for the Protection of Integrated Data Centers","Fire Facility Installation and Management Act, Article 12 (Management of Fire Protection Facilities in Specific Fire Protection Objects), Article 16 (Management of Evacuation Facilities, Fire Zones, and Fire Protection Facilities)"]}],"description": "Based on the importance and characteristics of the information systems located in protected areas, protective facilities such as temperature and humidity control, fire detection, firefighting equipment, leak detection, UPS, emergency generators, and dual power lines should be established and operated according to operational procedures.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.5": {"name": "Operations in Secure Zones","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Requirements","Section": "2.4.5 Operations in Secure Zones","Subdomain": "2.4. Physical Security","AuditEvidence": ["Operation request forms, operation logs","Entry logs for controlled areas","Records of review of entry and operation logs for controlled areas"],"AuditChecklist": ["When operations within secure zones, such as the introduction and maintenance of information systems, are required, are formal procedures for application and execution of such operations established and implemented?","Are the records of operations within secure zones periodically reviewed to confirm that the operations were carried out in accordance with the control procedures?"],"NonComplianceCases": ["Case 1: The entry log of the data center shows the presence of external maintenance personnel, but there is no record of an operation request or approval for work within the secure zone (i.e., entry and work in the secure zone were carried out without an operation request as required by internal regulations).","Case 2: Although internal regulations state that the records of operations within secure zones must be reviewed at least once per quarter, the review of such records has not been conducted for a long period without a valid reason."],"RelatedRegulations": []}],"description": "Procedures to prevent unauthorized actions and abuse of privileges within secure zones must be established and implemented, and the records of operations should be periodically reviewed.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.6": {"name": "Device Control for Inbound and Outbound","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Requirements","Section": "2.4.6 Device Control for Inbound and Outbound","Subdomain": "2.4. Physical Security","AuditEvidence": ["Inbound and outbound application forms for secure zones","Inbound and outbound management logs","Results of the review of inbound and outbound records"],"AuditChecklist": ["Are control procedures established and implemented to prevent security incidents such as information leakage and malware infection when information systems, mobile devices, storage media, etc., are brought into or taken out of secure zones?","Are records maintained and managed in accordance with the inbound and outbound control procedures, and is the compliance with the procedures periodically checked by reviewing the history of inbound and outbound activities?"],"NonComplianceCases": ["Case 1: Although control procedures for the inbound and outbound of mobile computing devices are established, there is no control over the movement of such devices within the controlled area, allowing both internal and external personnel with access to the controlled area to use mobile computing devices without restriction.","Case 2: Although internal guidelines state that inbound and outbound details of IT equipment must be recorded in the operation plan and signed by the person responsible for management, many signatures of responsible managers are missing from the records."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 10 (Physical Safety Measures)"]}],"description": "Procedures to control the inbound and outbound movement of information systems, mobile devices, storage media, etc., within secure zones must be established, implemented, and periodically reviewed.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.7": {"name": "Work Environment Security","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Requirements","Section": "2.4.7 Work Environment Security","Subdomain": "2.4. Physical Security","AuditEvidence": ["Security inspection reports for offices and shared spaces","Security inspection checklists for offices and shared spaces","Actions taken for non-compliance (e.g., training, rewards and penalties)","Current status of protection measures for printed and copied materials"],"AuditChecklist": ["Are protection measures established and implemented for shared facilities and office equipment such as document storage, shared PCs, multifunction printers, file servers, etc.?","Are protection measures established and implemented to prevent the exposure or leakage of personal and sensitive information through individual work environments such as work PCs, desks, drawers, etc.?","Are appropriate protection measures in place to ensure the safe handling of printed or copied materials containing personal information, such as paper documents?","Is compliance with information protection requirements in both individual and shared work environments periodically reviewed?"],"NonComplianceCases": ["Case 1: Although the internal management plan for personal information specifies that regular security inspections (e.g., clean desk policies) must be conducted, no such inspections have been carried out.","Case 2: Documents containing personal information, such as membership application forms, are stored in an office cabinet without a lock.","Case 3: Employee computers do not have screen savers or passwords set, and important documents have been left on vacationing employees' desks for an extended period.","Case 4: No protection measures are in place for shared PCs installed in shared office spaces such as meeting rooms, resulting in personal information files being stored unencrypted, or security updates not applied, or antivirus software not installed, leaving the systems vulnerable."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 10 (Physical Safety Measures), Article 12 (Safety Measures for Printing and Copying)"]}],"description": "Protection measures such as clean desk policies and regular inspections must be established and implemented to prevent unauthorized exposure or leakage of personal and sensitive information through shared office equipment (e.g., document storage, shared PCs, multifunction printers, file servers) and individual work environments (e.g., work PCs, desks).","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.5.1": {"name": "User Account Management","checks": {"iam_user_accesskey_unused": null,"iam_securityaudit_role_created": null,"iam_user_console_access_unused": null,"iam_policy_no_full_access_to_kms": null,"iam_role_administratoraccess_policy": null,"iam_user_administrator_access_policy": null,"organizations_scp_check_deny_regions": null,"iam_group_administrator_access_policy": null,"iam_policy_allows_privilege_escalation": null,"iam_inline_policy_no_full_access_to_kms": null,"iam_policy_no_full_access_to_cloudtrail": null,"iam_policy_attached_only_to_group_or_roles": null,"cognito_user_pool_self_registration_disabled": null,"iam_role_cross_account_readonlyaccess_policy": null,"iam_inline_policy_allows_privilege_escalation": null,"iam_inline_policy_no_administrative_privileges": null,"iam_inline_policy_no_full_access_to_cloudtrail": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_role_cross_service_confused_deputy_prevention": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"iam_customer_unattached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.5.1 User Account Management","Subdomain": "2.5. Authentication and Access Management","AuditEvidence": ["User account and access request forms","User account and access management log or screen","Access classification table for information systems and personal information processing systems","Lists of users, administrators, and personal information handlers for each information system and personal information processing system"],"AuditChecklist": ["Has the organization established and implemented formal procedures for registering, changing, and deleting user accounts and access rights to information systems, personal information, and critical information?","When creating and registering user accounts and access rights to information systems, personal information, and critical information, is access limited to the minimum necessary for each job based on the job-specific access classification system?","When granting users accounts and access rights, are they made fully aware that they are responsible for the security of those accounts?"],"NonComplianceCases": ["Case 1: User registration, termination, and approval procedures for accounts and permissions for users and personal information handlers were processed through verbal requests, email, etc., without proper approval and handling records.","Case 2: A personal information handler shared their account with an unauthorized individual for backup purposes during vacations, business trips, or other absences without going through official procedures.","Case 3: Users of information systems or personal information processing systems were granted excessive permissions, allowing access to unnecessary information or personal data."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 5 (Management of Access Rights)"]}],"description": "To control unauthorized access to information systems, personal information, and critical information, organizations must establish and implement procedures for user registration, termination, and granting, changing, or revoking access rights, ensuring that access rights are granted only to the minimum necessary for work purposes. Additionally, when registering or granting user rights, it must be made clear to users that they are responsible for the security of their accounts.","checks_status": {"fail": 0,"pass": 0,"total": 22,"manual": 0}},"2.5.2": {"name": "User Identification","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.5.2 User Identification","Subdomain": "2.5. Authentication and Access Management","AuditEvidence": ["Login screen for information systems and personal information processing systems","Lists of administrators, users, and personal information handlers for information systems and personal information processing systems","Records of approvals for exceptions"],"AuditChecklist": ["Are unique identifiers assigned to users and personal information handlers in information systems and personal information processing systems, and is the use of easily guessable identifiers restricted?","If the same identifier is shared by multiple users for unavoidable reasons, has the justification been reviewed and have supplementary measures such as approval from the responsible party been established?"],"NonComplianceCases": ["Case 1: The account status of information systems (servers, networks, firewalls, DBMS, etc.) shows that default administrator accounts provided by the manufacturer are still in use, despite being technically modifiable.","Case 2: Developers are sharing personal information processing system accounts for common use without any justification or approval from responsible parties.","Case 3: External personnel maintaining information systems are using operational accounts like personal accounts without going through the required approval procedures."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 5 (Management of Access Rights)"]}],"description": "User accounts must be assigned unique identifiers that distinguish each user individually, and the use of easily guessable identifiers must be restricted. If the same identifier is shared by multiple users, the reason and justification must be reviewed, supplementary measures such as approval from a responsible party must be established, and accountability must be ensured.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.5.3": {"name": "User Authentication","checks": {"iam_root_mfa_enabled": null,"iam_user_accesskey_unused": null,"iam_check_saml_providers_sts": null,"cognito_user_pool_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_hardware_mfa_enabled": null,"iam_user_two_active_access_key": null,"iam_administrator_access_with_mfa": null,"iam_user_mfa_enabled_console_access": null,"iam_user_with_temporary_credentials": null,"apigatewayv2_api_authorizers_enabled": "FAIL","iam_user_no_setup_initial_access_key": null,"apigateway_restapi_authorizers_enabled": "PASS","rds_cluster_iam_authentication_enabled": "FAIL","rds_instance_iam_authentication_enabled": "FAIL","kafka_cluster_unrestricted_access_disabled": null,"cognito_identity_pool_guest_access_disabled": "FAIL","cognito_user_pool_advanced_security_enabled": null,"cognito_user_pool_self_registration_disabled": null,"directoryservice_supported_mfa_radius_enabled": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cognito_user_pool_client_token_revocation_enabled": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cognito_user_pool_client_prevent_user_existence_errors": null,"opensearch_service_domains_internal_user_database_enabled": null,"cognito_user_pool_blocks_potential_malicious_sign_in_attempts": null,"opensearch_service_domains_use_cognito_authentication_for_kibana": null,"cognito_user_pool_blocks_compromised_credentials_sign_in_attempts": null},"status": "FAIL","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.5.3 User Authentication","Subdomain": "2.5. Authentication and Authorization Management","AuditEvidence": ["Login screen for information systems and personal information processing systems","Login attempt limitation setting screen","Login failure message screen","Procedures for external access (e.g., external access request forms, list of external accessors)"],"AuditChecklist": ["Is access to information systems and personal information processing systems controlled through secure user authentication procedures, login attempt limitations, and warnings for illegal login attempts?","When accessing personal information processing systems from outside via a communication network, are secure authentication methods or secure access measures applied in accordance with legal requirements?"],"NonComplianceCases": ["Case 1: When a personal information handler accesses a personal information processing system through the public external internet, secure authentication methods are not applied, and authentication is done only through ID and password.","Case 2: In the login process for information systems and personal information processing systems, detailed messages are displayed about whether the ID exists or the password is incorrect, and there is no limit on login failure attempts."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 5 (Management of Access Rights), Article 6 (Access Control)"]}],"description": "User access to information systems, personal information, and critical information must be secured through safe authentication procedures and, if necessary, enhanced authentication methods. In addition, access control measures such as limiting login attempts and issuing warnings for illegal login attempts must be established and implemented.","checks_status": {"fail": 4,"pass": 1,"total": 29,"manual": 0}},"2.5.4": {"name": "Password Management","checks": {"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_reuse_24": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_password_policy_minimum_length_14": null,"cognito_user_pool_password_policy_number": null,"cognito_user_pool_password_policy_symbol": null,"cognito_user_pool_password_policy_lowercase": null,"cognito_user_pool_password_policy_uppercase": null,"cognito_user_pool_temporary_password_expiration": null,"cognito_user_pool_password_policy_minimum_length_14": null,"iam_password_policy_expires_passwords_within_90_days_or_less": null},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.5.4 Password Management","Subdomain": "2.5. Authentication and Authorization Management","AuditEvidence": ["Password setting screens for web pages, information systems, and personal information processing systems","Password management policies and procedures"],"AuditChecklist": ["Are procedures for managing and creating secure user passwords for information systems established and implemented?","Are password creation rules established and enforced to ensure that users can use secure passwords?","Are authentication methods for personal information handlers and users securely applied and managed?"],"NonComplianceCases": ["Case 1: Although password creation rules are set in policies and guidelines related to information protection and personal information protection, some information systems and personal information processing systems use passwords that differ from internal guidelines.","Case 2: Internal regulations state that when passwords are reset, temporary passwords must be assigned and forced to be changed, but in practice, temporary passwords are being used without change.","Case 3: Although internal regulations require users and personal information handlers to change their passwords periodically, passwords are being used without change."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 5 (Management of Access Rights)"]}],"description": "Procedures for managing passwords used by users of information systems, as well as customers and members, must be established and implemented, taking into account legal requirements and external threats.","checks_status": {"fail": 0,"pass": 0,"total": 13,"manual": 0}},"2.5.5": {"name": "Management of Special Accounts and Privileges","checks": {"iam_avoid_root_usage": null,"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_support_role_created": null,"rds_cluster_default_admin": "FAIL","rds_instance_default_admin": "FAIL","ec2_instance_profile_attached": "PASS","iam_root_hardware_mfa_enabled": null,"organizations_delegated_administrators": null,"cloudwatch_log_metric_filter_root_usage": null,"sagemaker_notebook_instance_root_access_disabled": null},"status": "FAIL","attributes": [{"Domain": "2. Protection Measures Requirements","Section": "2.5.5 Management of Special Accounts and Privileges","Subdomain": "2.5. Authentication and Privilege Management","AuditEvidence": ["Guidelines related to special privileges","Records of special privilege requests and approvals","List of special privilege holders","Records of special privilege reviews"],"AuditChecklist": ["Is there a formal privilege request and approval process established and implemented to ensure that special privileges, such as administrative privileges, are only granted to a minimal number of people?","Is there a control procedure established and implemented to identify and manage accounts and privileges granted for special purposes in a separate list?"],"NonComplianceCases": ["Case 1: The approval history for granting administrator and special privileges in the information system and personal information processing system is not documented or does not match the special privileges list.","Case 2: Internal regulations require that personal information administrators and special privilege holders be documented and managed in a list, but the list is not maintained or some special privileges, such as security system administrators, are not identified or managed.","Case 3: A maintenance special account for visiting maintenance once a quarter remains active at all times without a time limit on usage.","Case 4: Regular reviews of administrator and special privilege usage are not conducted, and some individuals retain special privileges even after their roles have changed."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 5 (Management of Access Rights)"]}],"description": "Accounts and privileges used for special purposes, such as managing information systems, personal information, and important information, should be granted minimally, separately identified, and controlled.","checks_status": {"fail": 2,"pass": 1,"total": 11,"manual": 0}},"2.5.6": {"name": "Review of Access Rights","checks": {"accessanalyzer_enabled": "PASS","cloudtrail_insights_exist": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","accessanalyzer_enabled_without_findings": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"cloudwatch_cross_account_sharing_disabled": null,"cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"cloudtrail_multi_region_enabled_logging_management_events": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. Protection Measures Requirements","Section": "2.5.6 Review of Access Rights","Subdomain": "2.5. Authentication and Privilege Management","AuditEvidence": ["Access rights review standards and procedures","History of access rights reviews","Access rights review reports and follow-up action records"],"AuditChecklist": ["Are the histories of account and access right creation, registration, granting, use, modification, and deletion for information systems, personal information, and important information being recorded?","Are standards, review subjects, review methods, and periodic review schedules established to regularly review the appropriateness of user accounts and access rights to information systems, personal information, and important information?","When issues such as excessive access rights, failure to follow access right granting procedures, or misuse of access rights are identified in the review results, are appropriate response procedures established and implemented?"],"NonComplianceCases": ["Case 1: The methods, review periods, reporting structure, and misuse criteria related to access rights reviews are not clearly defined in the relevant guidelines, leading to irregular performance of access rights reviews.","Case 2: Although internal policies and guidelines require locking (deactivating) or deleting long-unused accounts, some accounts that have not been accessed for more than six months remain active (indicating that the access rights review was not thoroughly conducted, failing to identify these accounts).","Case 3: During the access rights review, cases of excessive privileges or suspected misuse were identified, but no detailed investigation, internal reporting, or follow-up actions were taken."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 5 (Management of Access Rights)"]}],"description": "The registration, use, and deletion of user accounts accessing information systems, personal information, and important information, as well as the history of granting, changing, and deleting access rights, should be recorded and periodically reviewed to ensure their appropriateness.","checks_status": {"fail": 2,"pass": 1,"total": 14,"manual": 0}},"2.6.1": {"name": "Network Access","checks": {"ec2_ami_public": null,"elb_internet_facing": "FAIL","ec2_elastic_ip_shodan": null,"elbv2_internet_facing": "PASS","ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","kafka_cluster_is_public": null,"s3_bucket_acl_prohibited": "FAIL","apigateway_restapi_public": "FAIL","lightsail_database_public": null,"lightsail_instance_public": null,"ec2_securitygroup_not_used": "FAIL","elbv2_listeners_underneath": "PASS","networkfirewall_in_all_vpc": "FAIL","s3_bucket_public_write_acl": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","ssm_documents_set_as_public": "PASS","awslambda_function_url_public": null,"dms_instance_no_public_access": null,"rds_instance_no_public_access": "PASS","emr_cluster_publicly_accesible": null,"redshift_cluster_public_access": null,"neptune_cluster_public_snapshot": null,"eks_cluster_private_nodes_enabled": null,"awslambda_function_url_cors_policy": null,"documentdb_cluster_public_snapshot": null,"eks_cluster_network_policy_enabled": null,"neptune_cluster_uses_public_subnet": null,"sns_topics_not_publicly_accessible": "PASS","sqs_queues_not_publicly_accessible": "PASS","vpc_subnet_no_public_ip_by_default": "FAIL","vpc_subnet_separate_private_public": "FAIL","eks_cluster_not_publicly_accessible": null,"glacier_vaults_policy_public_access": null,"iam_user_mfa_enabled_console_access": null,"s3_access_point_public_access_block": "PASS","s3_bucket_level_public_access_block": "PASS","iam_user_administrator_access_policy": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"iam_group_administrator_access_policy": null,"s3_account_level_public_access_blocks": null,"apigateway_restapi_authorizers_enabled": "PASS","elasticache_cluster_uses_public_subnet": "PASS","rds_instance_iam_authentication_enabled": "FAIL","appstream_fleet_maximum_session_duration": null,"ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ecr_repositories_not_publicly_accessible": "PASS","emr_cluster_account_public_block_enabled": "PASS","sagemaker_models_vpc_settings_configured": null,"apigateway_restapi_public_with_authorizer": "FAIL","ec2_instance_port_ftp_exposed_to_internet": "PASS","ec2_instance_port_rdp_exposed_to_internet": "PASS","ec2_instance_port_ssh_exposed_to_internet": "PASS","vpc_endpoint_connections_trust_boundaries": "FAIL","appstream_fleet_session_disconnect_timeout": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_instance_port_cifs_exposed_to_internet": "PASS","ec2_instance_port_ldap_exposed_to_internet": "PASS","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","kafka_cluster_unrestricted_access_disabled": null,"sagemaker_models_network_isolation_enabled": null,"cognito_identity_pool_guest_access_disabled": "FAIL","ec2_instance_port_kafka_exposed_to_internet": "PASS","ec2_instance_port_mysql_exposed_to_internet": "PASS","ec2_instance_port_redis_exposed_to_internet": "PASS","workspaces_vpc_2private_1public_subnets_nat": null,"ec2_instance_port_oracle_exposed_to_internet": "PASS","ec2_instance_port_telnet_exposed_to_internet": "PASS","ec2_instance_port_mongodb_exposed_to_internet": "PASS","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","ec2_instance_port_kerberos_exposed_to_internet": "PASS","ec2_transitgateway_auto_accept_vpc_attachments": null,"appstream_fleet_session_idle_disconnect_timeout": null,"ec2_instance_port_cassandra_exposed_to_internet": "PASS","ec2_instance_port_memcached_exposed_to_internet": "PASS","ec2_instance_port_sqlserver_exposed_to_internet": "PASS","rds_instance_event_subscription_security_groups": "FAIL","sagemaker_training_jobs_vpc_settings_configured": null,"vpc_peering_routing_tables_with_least_privilege": "PASS","appstream_fleet_default_internet_access_disabled": null,"ec2_instance_port_postgresql_exposed_to_internet": "PASS","ec2_securitygroup_with_many_ingress_egress_rules": "PASS","cloudfront_distributions_geo_restrictions_enabled": null,"sagemaker_training_jobs_network_isolation_enabled": null,"opensearch_service_domains_not_publicly_accessible": null,"sagemaker_notebook_instance_vpc_settings_configured": null,"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_any_port": "PASS","vpc_endpoint_services_allowed_principals_trust_boundaries": null,"ec2_instance_port_elasticsearch_kibana_exposed_to_internet": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","cognito_user_pool_blocks_potential_malicious_sign_in_attempts": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. Control Measures","Section": "2.6.1 Network Access","Subdomain": "2.6. Access Control","AuditEvidence": ["Network diagram","IP management ledger","Information asset list","Firewall rules"],"AuditChecklist": ["Has the organization identified all access paths to its network and ensured that internal networks are controlled so that only authorized users can access them according to the access control policy?","Has the organization physically or logically segmented the network based on services, user groups, information asset importance, and legal requirements, and applied access control between different network segments?","Has the organization established IP address allocation standards for each network segment, and applied measures such as assigning private IPs to systems like database servers that do not require external connections?","Has the organization implemented protective measures for communication paths when connecting networks between physically separated locations, such as IDCs, branches, and agents?"],"NonComplianceCases": ["Case 1: The network configuration and interviews revealed that data transmission and reception between external sites and the servers located in the IDC are being processed through the general internet line, rather than using VPN or dedicated lines as specified in internal regulations.","Case 2: The IP addresses of some important servers, such as database servers located in the internal network, were set to public IPs instead of private IPs as per internal regulations, and network access blocking was not applied.","Case 3: Although a server farm was established, access from the internal network to the server farm was excessively allowed due to insufficient network access control settings.","Case 4: The network provided to external parties (e.g., external developers, visitors) was not separated from the internal business network without appropriate controls.","Case 5: Contrary to internal regulations, the organization's network could be accessed and used simply by connecting a network cable without applying protective measures such as MAC address authentication and mandatory security software installation."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 6 (Access Control)"]}],"description": "In order to control unauthorized access to the network, management procedures such as IP management and device authentication must be established and implemented. Network segmentation (DMZ, server farm, DB zone, development zone, etc.) and access controls must be applied according to the business purpose and importance.","checks_status": {"fail": 17,"pass": 54,"total": 112,"manual": 0}},"2.6.2": {"name": "Access to Information Systems","checks": {"ec2_elastic_ip_shodan": null,"ec2_instance_public_ip": "FAIL","ec2_elastic_ip_unassigned": "FAIL","lightsail_instance_public": null,"lightsail_static_ip_unused": null,"ec2_instance_managed_by_ssm": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_instance_port_ftp_exposed_to_internet": "PASS","ec2_instance_port_rdp_exposed_to_internet": "PASS","ec2_instance_port_ssh_exposed_to_internet": "PASS","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_instance_port_telnet_exposed_to_internet": "PASS","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","ec2_securitygroup_with_many_ingress_egress_rules": "PASS","ec2_instance_internet_facing_with_instance_profile": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_any_port": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. Protection Requirements","Section": "2.6.2 Access to Information Systems","Subdomain": "2.6. Access Control","AuditEvidence": ["List of operating system accounts of information systems","Server security settings","Server access control policy (e.g., SecureOS management screen)","Server and network configuration diagram","Information asset list"],"AuditChecklist": ["Have users, access locations, and access means allowed to access operating systems (OS) of information systems such as servers, network systems, and security systems been defined and controlled?","Is the system automatically disconnected when there is no work processed after accessing the information system for a certain period?","Are services unrelated to the purpose of using the information system removed?","Are information systems that provide key services operated on independent servers?"],"NonComplianceCases": ["Case 1: When a server administrator accesses a Windows server located in the IDC from the office using terminal services, session timeout settings are not configured, allowing the session to remain open for a long period without any activity.","Case 2: Due to improper restrictions on server-to-server access, a user authorized to access a particular server can access other unauthorized servers via that server.","Case 3: Unsafe access protocols (e.g., telnet, ftp) are being used without valid reasons or compensatory measures, and unnecessary services and ports are open.","Case 4: Although the access control policy requires all access to servers to go through a server access control system, bypass routes exist that allow access to servers without going through the system."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 6 (Access Control)"]}],"description": "The users, access restriction methods, and secure access means for accessing information systems such as servers and network systems must be defined and controlled.","checks_status": {"fail": 8,"pass": 13,"total": 24,"manual": 0}},"2.6.3": {"name": "Access to Applications","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Requirements","Section": "2.6.3 Access to Applications","Subdomain": "2.6. Access Control","AuditEvidence": ["Application access rights classification system","Application account and rights management screen","Application user and administrator screens (e.g., personal information viewing, etc.)","Application session time and concurrent session restriction settings","Application administrator access log monitoring details","Information asset list","Personal information processing system's personal information viewing and search screens","Personal information masking standards","Personal information masking application screen"],"AuditChecklist": ["Are access rights to applications granted differentially based on the user's tasks to control access to sensitive information?","Are sessions automatically disconnected after a certain period of inactivity, and is the number of simultaneous sessions per user restricted?","Is access to administrator-exclusive applications (e.g., admin web pages, admin consoles) restricted to unauthorized users?","Are criteria established and applied to ensure consistency in protection measures for limiting the display of personal and sensitive information?","Are applications implemented and operated to minimize unnecessary exposure (e.g., viewing, screen display, printing, downloading) of personal and sensitive information?"],"NonComplianceCases": ["Case 1: There is a flaw in the authorization control function of certain personal information processing screens in the application, allowing users without permission to view personal information.","Case 2: The administrator page of the application is open to the public internet without secure authentication methods applied.","Case 3: Session timeouts or concurrent logins for the same user account are not restricted without valid reasons.","Case 4: When personal information is downloaded through the application, the file contains excessive unnecessary information such as resident registration numbers.","Case 5: The application excessively allows 'like' searches, allowing all users to retrieve all customer information by searching only for a surname, even beyond their work scope.","Case 6: Due to the lack of criteria for limiting the display of personal information or failure to adhere to them, different masking standards are applied to the same personal information items on different screens of the personal information processing system.","Case 7: Although personal information is masked on the screen of the personal information processing system, unmasked personal information is exposed by viewing the web browser source."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 5 (Management of Access Rights), Article 6 (Access Control), Article 12 (Safety Measures for Printing and Copying)"]}],"description": "Access rights to applications must be restricted according to the user's tasks and the importance of the accessed information, and criteria should be established to minimize exposure of unnecessary or sensitive information.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.6.4": {"name": "Database Access","checks": {"accessanalyzer_enabled": "PASS","lightsail_database_public": null,"rds_snapshots_public_access": "PASS","dms_instance_no_public_access": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"neptune_cluster_public_snapshot": null,"rds_instance_transport_encrypted": "FAIL","documentdb_cluster_public_snapshot": null,"neptune_cluster_uses_public_subnet": null,"vpc_subnet_separate_private_public": "FAIL","dynamodb_table_cross_account_access": null,"rds_cluster_iam_authentication_enabled": "FAIL","accessanalyzer_enabled_without_findings": "FAIL","rds_instance_iam_authentication_enabled": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","neptune_cluster_iam_authentication_enabled": null,"ec2_instance_port_mysql_exposed_to_internet": "PASS","ec2_instance_port_redis_exposed_to_internet": "PASS","ec2_instance_port_oracle_exposed_to_internet": "PASS","ec2_instance_port_mongodb_exposed_to_internet": "PASS","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","ec2_instance_port_cassandra_exposed_to_internet": "PASS","ec2_instance_port_sqlserver_exposed_to_internet": "PASS","opensearch_service_domains_not_publicly_accessible": null,"opensearch_service_domains_https_communications_enforced": null,"opensearch_service_domains_internal_user_database_enabled": null,"ec2_instance_port_elasticsearch_kibana_exposed_to_internet": "PASS","opensearch_service_domains_use_cognito_authentication_for_kibana": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. Protection Measures Requirements","Section": "2.6.4 Database Access","Subdomain": "2.6. Access Control","AuditEvidence": ["Database status (e.g., tables, columns)","List of database user accounts and permissions","Database access control policy (e.g., database access control system management screen)","Network diagram (e.g., database zone)","Information asset list"],"AuditChecklist": ["Are you identifying the information stored and managed in the database, such as the table list?","Are you clearly identifying the applications, information systems (servers), and users that need access to information in the database and controlling access according to the access control policy?"],"NonComplianceCases": ["Case 1: A database that stores and processes a large amount of personal information is operated on the same physical server as a web application accessible via the Internet, without separating them.","Case 2: Developers and operators share accounts used by the application to access the production database.","Case 3: Although internal regulations require database access rights to be restricted by object, access rights to the database are granted uniformly to administrators, even those who do not need access to personal information tables.","Case 4: A database access control solution has been implemented, but access to the database is not properly restricted by IP address, allowing users to bypass the access control solution.","Case 5: The table status of a database storing personal information has not been identified, resulting in the unnecessary retention of personal information in temporary tables that have not been deleted."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 5 (Management of Access Rights), Article 6 (Access Control)"]}],"description": "Identify the information stored and managed in the database, such as the table list, and establish and implement access control policies according to the importance of the information and the type of applications and users.","checks_status": {"fail": 6,"pass": 19,"total": 37,"manual": 0}},"2.6.5": {"name": "Wireless Network Access","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Measures Requirements","Section": "2.6.5 Wireless Network Access","Subdomain": "2.6. Access Control","AuditEvidence": ["Network diagram","AP security settings history","Inspection records of unauthorized wireless networks","Wireless network usage application and approval records"],"AuditChecklist": ["When using a wireless network for business purposes, are you establishing and implementing protection measures such as authentication and encryption of transmitted and received data to ensure the security of the wireless AP and network segment?","Have you established and implemented procedures for applying for and terminating access to ensure that only authorized employees can use the wireless network?","Have you established and implemented protection measures against unauthorized wireless networks, such as detecting and blocking AD Hoc connections and unauthorized wireless APs within the organization?"],"NonComplianceCases": ["Case 1: The wireless network segments for external users and internal users are the same, allowing external users to access the internal network without separate control via the wireless network.","Case 2: Although the encryption function for information transmission and reception was enabled when configuring the wireless AP, it was set in an insecure manner.","Case 3: A wireless AP connected to the internal network for business purposes has security settings that are insufficient, such as exposure of the administrator password (using the default password) and lack of access control."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 6 (Access Control)"]}],"description": "When using a wireless network, wireless network protection measures such as user authentication, encryption of transmitted and received data, and AP control must be applied. In addition, protection measures must be established and implemented to prevent unauthorized wireless network access, such as AD Hoc connections and the use of unauthorized APs.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.6.6": {"name": "Remote Access Control","checks": {"vpc_flow_logs_enabled": "FAIL","networkfirewall_in_all_vpc": "FAIL","cognito_user_pool_mfa_enabled": null,"iam_user_console_access_unused": null,"vpc_subnet_no_public_ip_by_default": "FAIL","vpc_subnet_separate_private_public": "FAIL","iam_user_mfa_enabled_console_access": null,"workspaces_volume_encryption_enabled": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","appstream_fleet_maximum_session_duration": null,"ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_instance_port_rdp_exposed_to_internet": "PASS","ec2_instance_port_ssh_exposed_to_internet": "PASS","appstream_fleet_session_disconnect_timeout": null,"ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","cognito_identity_pool_guest_access_disabled": "FAIL","workspaces_vpc_2private_1public_subnets_nat": null,"cognito_user_pool_self_registration_disabled": null,"appstream_fleet_session_idle_disconnect_timeout": null,"appstream_fleet_default_internet_access_disabled": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"ec2_client_vpn_endpoint_connection_logging_enabled": null,"cloudwatch_log_metric_filter_authentication_failures": null,"ec2_securitygroup_allow_ingress_from_internet_to_any_port": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. Protective Measures Requirements","Section": "2.6.6 Remote Access Control","Subdomain": "2.6. Access Control","AuditEvidence": ["Remote access application form (e.g., VPN)","VPN account list","VPN access control policy settings","IP management log","Remote access control settings (server settings, security system settings, etc.)","Designation and management of management terminals","Network diagram"],"AuditChecklist": ["Is remote operation of information systems through external networks such as the internet prohibited in principle, and are compensatory measures in place if allowed for unavoidable reasons such as incident response?","Is access through remote operation of information systems allowed only for specific devices when done through internal networks?","Are protective measures established and implemented to prevent security incidents such as data breaches and hacking during remote work, such as telecommuting, remote collaboration, and smart work?","Are the devices used for remote access to personal information processing systems for management, operation, development, and security purposes designated as management terminals, and are safety measures such as prohibiting unauthorized operations and use for purposes other than those intended being applied?"],"NonComplianceCases": ["Case 1: Although internal regulations state that remote access to the system is prohibited in principle and, when allowed, access is restricted through IP-based access control, remote desktop connections and SSH access to the system are not limited by IP addresses, allowing access from any PC.","Case 2: A VPN has been established for remote management, but it is always available without usage approval or access period restrictions.","Case 3: Work-related mobile apps have been installed on personal smart devices for external workers, but appropriate protective measures (e.g., antivirus, encryption, wiping in case of loss or theft) to prevent personal information leaks are not being applied.","Case 4: VPN access for external users is not limited by network segments and information systems, allowing excessive access to the entire internal network and information systems for authenticated remote users."],"RelatedRegulations": ["Personal Information Protection Act Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information Article 6 (Access Control)"]}],"description": "Managing information systems and handling personal information outside of protected areas is, in principle, prohibited. However, if remote access is allowed for unavoidable reasons such as telecommuting, incident response, or remote collaboration, protective measures must be established and implemented, including approval from responsible personnel, designation of access devices, setting access scope and duration, enhanced authentication, encrypted communication, and securing access devices (e.g., antivirus, patches).","checks_status": {"fail": 8,"pass": 5,"total": 26,"manual": 0}},"2.6.7": {"name": "Internet Access Control","checks": {"ec2_elastic_ip_shodan": null,"vpc_flow_logs_enabled": "FAIL","ec2_instance_public_ip": "FAIL","ec2_elastic_ip_unassigned": "FAIL","networkfirewall_in_all_vpc": "FAIL","vpc_subnet_no_public_ip_by_default": "FAIL","vpc_subnet_separate_private_public": "FAIL","workspaces_volume_encryption_enabled": null,"route53_dangling_ip_subdomain_takeover": null,"appstream_fleet_maximum_session_duration": null,"appstream_fleet_session_disconnect_timeout": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"workspaces_vpc_2private_1public_subnets_nat": null,"ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","appstream_fleet_session_idle_disconnect_timeout": null,"appstream_fleet_default_internet_access_disabled": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Domain": "2. Protective Measures Requirements","Section": "2.6.7 Internet Access Control","Subdomain": "2.6. Access Control","AuditEvidence": ["Policy for blocking non-work-related sites (e.g., P2P) (management screen of non-work-related site blocking system)","Internet access monitoring history","List of individuals subject to internet access restriction measures","Procedures and records for data transfer between networks (e.g., application and approval records)","Network diagram"],"AuditChecklist": ["Is there an established and implemented policy to control internet access for work PCs used for key duties and personal information handling terminals?","Is unnecessary external internet access from key information systems (e.g., database servers) being controlled?","Are internet access restrictions being applied in a secure manner for individuals who are required by law to have their internet access restricted?"],"NonComplianceCases": ["Case 1: Internet access restriction measures were applied according to the Personal Information Protection Act, but the restriction was not applied to some individuals with the authority to set access rights for personal information processing systems.","Case 2: Although internet access restriction measures were applied as required under the Personal Information Protection Act, it was possible to bypass the restriction by accessing the system through another server, allowing the download and deletion of personal information.","Case 3: Some servers located in the DMZ and internal network were unnecessarily able to access the internet directly.","Case 4: Although a physical network separation system was applied between internet PCs and internal work PCs, and a data transfer system was established, there was no approval process for data transfers, and there was no periodic review of the data transfer records.","Case 5: Internal regulations require that individuals handling personal information obtain approval from a responsible person before accessing P2P or web hard drive sites, and access is only permitted for a specific period, but there are numerous cases of exceptions being made without going through the approval process."],"RelatedRegulations": ["Personal Information Protection Act Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information Article 6 (Access Control)"]}],"description": "To prevent information leaks, malware infections, and intrusions into the internal network through the internet, policies must be established and implemented to restrict internet access or services (e.g., P2P, web hard drives, messengers) on key information systems, devices handling sensitive duties, and terminals processing personal information.","checks_status": {"fail": 6,"pass": 1,"total": 19,"manual": 0}},"2.7.1": {"name": "Application of Encryption Policy","checks": {"elb_ssl_listeners": "FAIL","backup_vaults_exist": null,"elbv2_ssl_listeners": "FAIL","ssm_document_secrets": "PASS","backup_vaults_encrypted": "PASS","rds_snapshots_encrypted": "FAIL","elb_insecure_ssl_ciphers": "PASS","s3_bucket_kms_encryption": "FAIL","ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","elbv2_insecure_ssl_ciphers": "PASS","athena_workgroup_encryption": null,"ec2_ebs_snapshots_encrypted": "FAIL","s3_bucket_default_encryption": "PASS","ec2_instance_secrets_user_data": "PASS","ec2_launch_template_no_secrets": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","rds_instance_transport_encrypted": "FAIL","cloudtrail_kms_encryption_enabled": "FAIL","neptune_cluster_storage_encrypted": null,"s3_bucket_secure_transport_policy": "FAIL","documentdb_cluster_storage_encrypted": null,"workspaces_volume_encryption_enabled": null,"awslambda_function_no_secrets_in_code": "PASS","glue_database_connections_ssl_enabled": null,"athena_workgroup_enforce_configuration": null,"cloudfront_distributions_https_enabled": null,"cloudwatch_log_group_no_secrets_in_logs": "FAIL","cloudformation_stack_outputs_find_secrets": "PASS","codebuild_project_no_secrets_in_variables": "PASS","kafka_cluster_encryption_at_rest_uses_cmk": null,"sns_subscription_not_using_http_endpoints": "PASS","sns_topics_kms_encryption_at_rest_enabled": "FAIL","sqs_queues_server_side_encryption_enabled": "PASS","awslambda_function_no_secrets_in_variables": "PASS","dynamodb_tables_kms_cmk_encryption_enabled": null,"glue_etl_jobs_amazon_s3_encryption_enabled": "PASS","acm_certificates_with_secure_key_algorithms": "PASS","cloudwatch_log_group_kms_encryption_enabled": "FAIL","ecs_task_definitions_no_environment_secrets": "PASS","kafka_cluster_in_transit_encryption_enabled": null,"storagegateway_fileshare_encryption_enabled": null,"apigateway_restapi_client_certificate_enabled": "FAIL","glue_etl_jobs_job_bookmark_encryption_enabled": "FAIL","glue_data_catalogs_metadata_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null,"kafka_cluster_mutual_tls_authentication_enabled": null,"directoryservice_radius_server_security_protocol": null,"glue_development_endpoints_s3_encryption_enabled": null,"glue_etl_jobs_cloudwatch_logs_encryption_enabled": "FAIL","autoscaling_find_secrets_ec2_launch_configuration": "PASS","eks_cluster_kms_cmk_encryption_in_secrets_enabled": null,"elasticache_redis_cluster_rest_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"cloudfront_distributions_field_level_encryption_enabled": null,"cloudfront_distributions_using_deprecated_ssl_protocols": null,"elasticache_redis_cluster_in_transit_encryption_enabled": null,"opensearch_service_domains_https_communications_enforced": null,"sagemaker_training_jobs_intercontainer_encryption_enabled": null,"glue_data_catalogs_connection_passwords_encryption_enabled": "FAIL","glue_development_endpoints_job_bookmark_encryption_enabled": null,"opensearch_service_domains_node_to_node_encryption_enabled": null,"sagemaker_training_jobs_volume_and_output_encryption_enabled": null,"glue_development_endpoints_cloudwatch_logs_encryption_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. Protection Measures Requirements","Section": "2.7.1 Application of Encryption Policy","Subdomain": "2.7. Application of Encryption","AuditEvidence": ["Encryption control policy (targets, methods, algorithms, etc.)","Encryption application status (during storage and transmission)","Risk analysis results (if encryption is not applied to unique identifiers other than resident registration numbers in the internal network)","Encryption solution management screen"],"AuditChecklist": ["Has an encryption policy been established that includes encryption targets, encryption strength, and encryption usage in consideration of legal requirements for the protection of personal and important information?","Is encryption applied to personal and important information during storage, transmission, and transfer according to the encryption policy?"],"NonComplianceCases": ["Case 1: Internal policies and guidelines do not properly specify encryption targets, encryption strength, encryption methods during storage and transmission, or the roles and responsibilities of those responsible for encryption, considering legal requirements.","Case 2: The company applied incorrect regulations during the creation of its encryption policy, leading to non-compliance with legal encryption requirements (e.g., storing user account numbers without encryption).","Case 3: Although one-way encryption was applied to the passwords of both personal information handlers and data subjects, an insecure MD5 algorithm was used.","Case 4: Although a security server was applied to an internet shopping mall in accordance with relevant laws and internal regulations, encryption was missing in some sections where users' personal information is transmitted (e.g., viewing or modifying member information, password retrieval, password changes).","Case 5: Passwords for accessing information systems, authentication keys, and other values were stored in plaintext in system configuration files and source code."],"RelatedRegulations": ["Personal Information Protection Act, Article 24-2 (Restrictions on Processing of Resident Registration Numbers), Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 7 (Encryption of Personal Information)"]}],"description": "To protect personal and important information, encryption policies that reflect legal requirements, such as encryption targets, encryption strength, and encryption usage policies, must be established. Encryption must be applied during the storage, transmission, and transfer of personal and important information.","checks_status": {"fail": 18,"pass": 19,"total": 66,"manual": 0}},"2.7.2": {"name": "Cryptographic Key Management","checks": {"kms_cmk_are_used": null,"kms_cmk_rotation_enabled": null,"kms_key_not_publicly_accessible": null,"kms_cmk_not_deleted_unintentionally": null,"rds_instance_certificate_expiration": "PASS","secretsmanager_automatic_rotation_enabled": "FAIL","acm_certificates_transparency_logs_enabled": "PASS","directoryservice_ldap_certificate_expiration": null,"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "FAIL","attributes": [{"Domain": "2. Security Control Requirements","Section": "2.7.2 Cryptographic Key Management","Subdomain": "2.7. Application of Encryption","AuditEvidence": ["Cryptographic Key Management Policy","Cryptographic Key Management Log and System Screens"],"AuditChecklist": ["Are procedures for the generation, use, storage, distribution, modification, recovery, and destruction of cryptographic keys established and implemented?","Are cryptographic keys securely stored in a separate location to ensure they can be recovered if necessary, and is access to the use of cryptographic keys minimized?"],"NonComplianceCases": ["Case 1: If encryption policies do not specify procedures and methods for managing cryptographic keys, leading to varying levels and methods of cryptographic key management among personnel, resulting in vulnerabilities.","Case 2: Internal regulations require the generation of encryption keys under the approval of a responsible person when encrypting important information, and to maintain a key management log, but some keys are either missing or outdated in the log.","Case 3: The encryption key applied in the development system is the same as the one applied in the production system, making it easy to decrypt actual data through the development system."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 7 (Encryption of Personal Information)"]}],"description": "Establish and implement management procedures for the secure generation, use, storage, distribution, and destruction of cryptographic keys, and prepare recovery methods if necessary.","checks_status": {"fail": 1,"pass": 2,"total": 9,"manual": 0}},"2.8.1": {"name": "Definition of Security Requirements","checks": {"macie_is_enabled": "PASS","securityhub_enabled": "PASS","fms_policy_compliant": null,"guardduty_is_enabled": "PASS","inspector2_is_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","accessanalyzer_enabled": "PASS","networkfirewall_in_all_vpc": "FAIL","guardduty_centrally_managed": "FAIL","wafv2_webacl_logging_enabled": "FAIL","config_recorder_all_regions_enabled": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","codebuild_project_no_secrets_in_variables": "PASS","codebuild_project_user_controlled_buildspec": "PASS","wellarchitected_workload_no_high_or_medium_risks": "FAIL","codebuild_project_source_repo_url_no_sensitive_credentials": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. Security Control Requirements","Section": "2.8.1 Definition of Security Requirements","Subdomain": "2.8. Security for Information System Introduction and Development","AuditEvidence": ["Information System Acquisition Standards and Procedures","RFP (Request for Proposal) and Purchase Contracts for Information System Introduction","Development Outputs (Project Execution Plans, Requirements Definition, Screen Design, Security Architecture Design, Test Plans, etc.)","Secure Coding Standards"],"AuditChecklist": ["When introducing, developing, or modifying an information system, are procedures for reviewing the validity of information protection and personal information protection aspects and for acquisition established and implemented?","When introducing, developing, or modifying an information system, are security requirements, including legal requirements and the latest vulnerabilities, clearly defined and reflected from the design stage?","Are coding standards for secure implementation of the information system established and applied?"],"NonComplianceCases": ["Case 1: Lack of established security verification standards and procedures prior to acquiring an information system.","Case 2: Internal regulations require the review of the security impact and the operating environment when introducing a new system, but recent acquisitions of some information systems lacked detailed standards and plans, and therefore, no security review was conducted during the acquisition.","Case 3: Internal development guidelines do not define key security requirements related to development (authentication and encryption, security logging, etc.).","Case 4: In the 'Development Standards Definition Document', user passwords are to be encrypted using insecure algorithms (MD5, SHA1), resulting in failure to comply with relevant legal requirements."],"RelatedRegulations": []}],"description": "When introducing, developing, or modifying information systems, security requirements such as legal requirements related to information protection and personal information protection, the latest security vulnerabilities, and secure coding methods must be defined and applied.","checks_status": {"fail": 7,"pass": 7,"total": 16,"manual": 0}},"2.8.2": {"name": "Review and Testing of Security Requirements","checks": {"macie_is_enabled": "PASS","securityhub_enabled": "PASS","fms_policy_compliant": null,"guardduty_is_enabled": "PASS","inspector2_is_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","accessanalyzer_enabled": "PASS","networkfirewall_in_all_vpc": "FAIL","guardduty_centrally_managed": "FAIL","wafv2_webacl_logging_enabled": "FAIL","inspector2_active_findings_exist": "FAIL","config_recorder_all_regions_enabled": null,"guardduty_no_high_severity_findings": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","accessanalyzer_enabled_without_findings": "FAIL","codebuild_project_no_secrets_in_variables": "PASS","codebuild_project_user_controlled_buildspec": "PASS","wellarchitected_workload_no_high_or_medium_risks": "FAIL","codebuild_project_source_repo_url_no_sensitive_credentials": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. Security Control Requirements","Section": "2.8.2 Review and Testing of Security Requirements","Subdomain": "2.8. Security for Information System Introduction and Development","AuditEvidence": ["Information System Acceptance Test Results","Requirements Traceability Matrix","Test Plans, Test Results","Vulnerability Assessment Results","Personal Information Impact Assessment Report","Confirmation of Implementation of Corrective Actions for Personal Information Impact Assessment"],"AuditChecklist": ["When introducing, developing, or modifying an information system, are tests conducted to verify whether the security requirements defined during the analysis and design stages have been effectively applied?","Are vulnerability assessments conducted to confirm that the information system has been securely developed according to secure coding standards?","Are procedures established and implemented to ensure that issues identified during testing and vulnerability assessments are promptly addressed through corrective action plans and follow-up checks?","For public institutions, are impact assessments conducted during the analysis and design stages when developing or modifying personal information processing systems, as required by relevant laws, and are the results reflected during development and modification?"],"NonComplianceCases": ["Case 1: Failure to test security requirements defined in internal guidelines and documents after implementing an information system.","Case 2: In the application program test scenario and technical vulnerability checklist, important validation items such as input validation checks were omitted.","Case 3: Failure to assess whether known technical vulnerabilities exist during implementation or testing, or failure to address identified vulnerabilities without valid reasons or approval.","Case 4: A public institution failed to conduct an impact assessment when developing a personal information file or personal information processing system subject to an impact assessment requirement, such as processing unique identifiers of more than 50,000 data subjects.","Case 5: A public institution failed to submit the impact assessment report to the Personal Information Protection Commission within two months after receiving the report from the impact assessment agency.","Case 6: Internal guidelines require reviewing the security and impact on the operating environment when introducing a new system (e.g., vulnerability assessments), but recent acquisitions of some information systems lacked security reviews during the acceptance process."],"RelatedRegulations": ["Personal Information Protection Act, Article 33 (Personal Information Impact Assessment)","Notification on Personal Information Impact Assessment"]}],"description": "To verify that an information system has been introduced or implemented according to predefined security requirements, review standards and procedures must be established and implemented to check compliance with legal requirements, the latest security vulnerabilities, secure coding implementation, and personal information impact assessment, and corrective measures must be taken for any identified issues.","checks_status": {"fail": 10,"pass": 7,"total": 19,"manual": 0}},"2.8.3": {"name": "Separation of Test and Production Environments","checks": {"codebuild_project_user_controlled_buildspec": "PASS"},"status": "PASS","attributes": [{"Domain": "2. Security Requirements for Protection Measures","Section": "2.8.3 Separation of Test and Production Environments","Subdomain": "2.8. Security for Information System Introduction and Development","AuditEvidence": ["Network diagrams (including test environment configuration)","Current application of access control between the production environment and the development/test environment"],"AuditChecklist": ["Are development and test systems separated from the production system?","If separation of development and production environments is difficult due to unavoidable reasons, have security measures such as mutual review, monitoring by supervisors, approval for changes, and ensuring accountability been implemented?"],"NonComplianceCases": ["Case 1: Source code changes are being made directly in the production environment without a separate development environment or proper approval.","Case 2: Although it is unavoidable to operate the development and production systems without separation, records of mutual review or monitoring are missing.","Case 3: Although a separate development system is in place, access from the development environment to the production environment is not controlled, allowing developers unnecessary access to the production system through the development system."],"RelatedRegulations": []}],"description": "Development and test systems must, in principle, be separated from production systems to reduce the risk of unauthorized access and changes to the production system.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.8.4": {"name": "Test Data Security","checks": {"codebuild_project_no_secrets_in_variables": "PASS"},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.8.4 Test Data Security","Subdomain": "2.8. Security in Information System Introduction and Development","AuditEvidence": ["Test data status","Test data generation rules","If operational data was used in a test environment, the approval history"],"AuditChecklist": ["Is the use of actual operational data restricted during the development and testing of information systems?","If it is inevitable to use operational data in a test environment, are control procedures such as approval by the responsible person, monitoring of access and leakage, and deletion of data after testing established and implemented?"],"NonComplianceCases": ["Case 1: There are no specific standards and procedures established for generating test data for use on the development server.","Case 2: Operational data is being used as test data without proper processing and without approval from the responsible person for a valid reason.","Case 3: Although operational data was approved in advance for use as test data for unavoidable reasons, the same level of access control as the operational database is not applied to the test database.","Case 4: After using operational data for testing purposes, the data was not deleted from the test database even though the testing was completed."],"RelatedRegulations": []}],"description": "In order to prevent the leakage of operational data during system testing, procedures for the creation, use, management, disposal, and technical protection measures of test data must be established and implemented.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.8.5": {"name": "Source Program Management","checks": {"ecr_repositories_not_publicly_accessible": "PASS","codeartifact_packages_external_public_publishing_disabled": null},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.8.5 Source Program Management","Subdomain": "2.8. Security in Information System Introduction and Development","AuditEvidence": ["Status of configuration management systems such as SVN (e.g., list of authorized personnel)","History of changes to the source program"],"AuditChecklist": ["Have procedures been established and implemented to control access to source programs by unauthorized persons?","Is the source program stored safely in a non-operational environment for emergencies such as system failures?","Is the history of changes to the source program being managed?"],"NonComplianceCases": ["Case 1: There is no separate backup or configuration management system for source programs, and previous versions of the source code are stored on the operational server or developer's PC without approval or history management.","Case 2: A configuration management system has been established, but access control, access and change history for the system or the source code stored in the system are not properly managed.","Case 3: The internal regulations require version control of source programs through a configuration management system, but the latest version of the source program is only stored on the developer's PC, and no separate backup is performed."],"RelatedRegulations": []}],"description": "Source programs must be managed so that only authorized users can access them, and it is a principle that they should not be stored in the operational environment.","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"2.8.6": {"name": "Transition to Operational Environment","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.8.6 Transition to Operational Environment","Subdomain": "2.8. Security in Information System Introduction and Development","AuditEvidence": ["Transition procedures","Transition records (requests, approvals, tests, transitions, etc.)"],"AuditChecklist": ["Have control procedures been established and implemented to safely transition newly introduced, developed, or modified systems to the operational environment?","Are contingency plans in place to address issues that may arise during the transition to the operational environment?","Are only the files necessary for service execution installed in the operational environment?"],"NonComplianceCases": ["Case 1: There are no procedures in place to review and approve the transition of developed or modified source programs to the operational environment.","Case 2: Unnecessary files (source code, distribution modules, backups, development-related documents, manuals, etc.) exist in the operational server.","Case 3: The internal guidelines require the preparation of change request and result documents for safe transition and recovery during transitions to the operational environment, but such documents are not available.","Case 4: The internal guidelines require internal review and approval before distributing mobile apps to the app market, but developers are bypassing these procedures and distributing the apps directly."],"RelatedRegulations": []}],"description": "When transitioning newly introduced, developed, or modified systems to the operational environment, the process must be controlled, and the executable code must be run according to test and user acceptance procedures.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.9.1": {"name": "Change Management","checks": {"codebuild_project_older_90_days": "FAIL","config_recorder_all_regions_enabled": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.9.1 Change Management","Subdomain": "2.9. System and Service Operations Management","AuditEvidence": ["Change management procedures","Change management records (requests, approvals, change details, etc.)","Impact analysis results of changes"],"AuditChecklist": ["Have procedures been established and implemented for changes to assets related to information systems (hardware, operating systems, commercial software packages, etc.)?","Are the performance and security impacts analyzed before making changes to information system-related assets?"],"NonComplianceCases": ["Case 1: A recent change to the DMZ section for redundancy was made, but there is no evidence of performing and approving the security risk and performance evaluation that may occur after the change.","Case 2: A recent network change was made, but the review and notification were not sufficiently carried out, so the changes were not properly reflected in the network diagram or some access control systems (e.g., firewalls, database access control systems) ACLs.","Case 3: Although a change management system was established to analyze and discuss the impact on performance and security when information systems are introduced or changed, changes can still be made outside the system, and related changes are not properly reviewed."],"RelatedRegulations": []}],"description": "Procedures must be established and implemented to manage all changes to assets related to information systems, and the impact on system performance and security must be analyzed before changes are made.","checks_status": {"fail": 2,"pass": 0,"total": 14,"manual": 0}},"2.9.2": {"name": "Performance and Fault Management","checks": {"rds_cluster_multi_az": "FAIL","elb_is_in_multiple_az": "FAIL","rds_instance_multi_az": "FAIL","elbv2_is_in_multiple_az": "PASS","s3_bucket_no_mfa_delete": "FAIL","vpc_subnet_different_az": "PASS","neptune_cluster_multi_az": null,"elbv2_deletion_protection": "FAIL","autoscaling_group_multiple_az": null,"dms_instance_multi_az_enabled": null,"rds_cluster_backtrack_enabled": null,"cloudtrail_multi_region_enabled": "PASS","rds_cluster_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL","acm_certificates_expiration_check": "PASS","s3_bucket_cross_region_replication": "FAIL","trustedadvisor_errors_and_warnings": null,"config_recorder_all_regions_enabled": null,"kms_cmk_not_deleted_unintentionally": null,"neptune_cluster_deletion_protection": null,"networkfirewall_deletion_protection": null,"rds_instance_certificate_expiration": "PASS","route53_domains_transferlock_enabled": null,"cloudtrail_bucket_requires_mfa_delete": null,"elb_cross_zone_load_balancing_enabled": "PASS","documentdb_cluster_deletion_protection": null,"documentdb_cluster_cloudwatch_log_export": null,"ec2_instance_detailed_monitoring_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","iam_no_expired_server_certificates_stored": null,"kafka_cluster_enhanced_monitoring_enabled": null,"elasticache_redis_cluster_multi_az_enabled": null,"directoryservice_ldap_certificate_expiration": null,"cognito_user_pool_deletion_protection_enabled": null,"trustedadvisor_premium_support_plan_subscribed": null,"directoryservice_directory_monitor_notifications": null,"cloudformation_stacks_termination_protection_enabled": "FAIL","cloudtrail_multi_region_enabled_logging_management_events": null,"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "FAIL","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.9.2 Performance and Fault Management","Subdomain": "2.9. System and Service Operations Management","AuditEvidence": ["Procedures for performance and capacity monitoring","Evidence of performance and capacity monitoring (e.g., internal reporting results)","Fault response procedures","Fault response report"],"AuditChecklist": ["Have procedures been established and implemented to continuously monitor performance and capacity to ensure the availability of information systems?","Are response procedures in place and implemented to address cases where the performance and capacity requirements (thresholds) of the information system are exceeded?","Have procedures been established and implemented to immediately recognize and respond to information system faults?","Are procedures in place to record and manage actions taken in response to faults through fault response reports?","For serious faults, are measures being taken to prevent recurrence through cause analysis?"],"NonComplianceCases": ["Case 1: Failure to define requirements (e.g., thresholds) for managing performance and capacity for each target, or the absence of records in regular inspection reports, making it difficult to assess the current status.","Case 2: Performance or capacity standards were exceeded, but no related reviews or follow-up measures were taken or implemented.","Case 3: Fault response procedures for IT equipment have been established, but internal and external environmental changes such as network configuration or vendor changes are not adequately reflected.","Case 4: Inconsistencies exist between fault handling procedures and fault type-specific response methods, or there is a lack of rationale for estimating response times, making swift, accurate, and systematic responses difficult."],"RelatedRegulations": []}],"description": "To ensure the availability of information systems, performance and capacity requirements must be defined, and the status must be continuously monitored. Procedures for detecting, recording, analyzing, recovering, and reporting in response to faults must be established and managed effectively.","checks_status": {"fail": 11,"pass": 6,"total": 39,"manual": 0}},"2.9.3": {"name": "Backup and Recovery Management","checks": {"ec2_ami_public": null,"backup_plans_exist": "PASS","backup_vaults_exist": null,"backup_vaults_encrypted": "PASS","ec2_ebs_public_snapshot": "PASS","efs_have_backup_enabled": "FAIL","s3_bucket_public_access": null,"backup_reportplans_exist": null,"s3_bucket_kms_encryption": "FAIL","s3_bucket_public_list_acl": null,"s3_bucket_public_write_acl": null,"ec2_ebs_snapshots_encrypted": "FAIL","rds_instance_backup_enabled": "PASS","rds_snapshots_public_access": "PASS","s3_bucket_lifecycle_enabled": "FAIL","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"s3_bucket_default_encryption": "PASS","rds_cluster_backtrack_enabled": null,"neptune_cluster_backup_enabled": null,"ec2_ebs_volume_snapshots_exists": "FAIL","neptune_cluster_public_snapshot": null,"documentdb_cluster_backup_enabled": null,"documentdb_cluster_public_snapshot": null,"rds_cluster_copy_tags_to_snapshots": "FAIL","s3_bucket_cross_region_replication": "FAIL","rds_instance_copy_tags_to_snapshots": null,"redshift_cluster_automated_snapshot": null,"s3_access_point_public_access_block": "PASS","s3_bucket_policy_public_write_access": "PASS","lightsail_instance_automated_snapshots": null,"dlm_ebs_snapshot_lifecycle_policy_exists": "FAIL","elasticache_redis_cluster_backup_enabled": null,"ecr_repositories_lifecycle_policy_enabled": "FAIL","directoryservice_directory_snapshots_limit": null,"ec2_ebs_snapshot_account_block_public_access": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.9.3 Backup and Recovery Management","Subdomain": "2.9. System and Service Operation Management","AuditEvidence": ["Backup and recovery procedures","Recovery test results","Disaster recovery backup status"],"AuditChecklist": ["Have backup and recovery procedures been established and implemented, including targets, frequency, methods, and procedures?","Is regular recovery testing conducted to verify the completeness and accuracy of the backed-up information and the adequacy of the recovery procedures?","For backup media containing critical information, is the media stored in physically separate locations to address disaster recovery?"],"NonComplianceCases": ["Case 1: Backup and recovery procedures, including targets, frequency, methods, and procedures, have not been established.","Case 2: Although a backup policy is in place, information required to be stored for a long period (6 months, 3 years, 5 years, etc.) according to legal requirements is not being stored according to the backup policy.","Case 3: Some systems (e.g., security system policies and logs) that are required to be separately backed up according to higher-level or internal guidelines are not being backed up.","Case 4: Although higher-level or internal guidelines stipulate that recovery tests for backup media should be conducted periodically, recovery tests have not been performed for an extended period."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 11 (Safety Measures for Disaster Recovery)"]}],"description": "To maintain the availability and data integrity of the information system, procedures must be established and implemented regarding the backup targets, frequency, methods, storage locations, retention periods, and disaster recovery. Additionally, management must ensure timely recovery in case of incidents.","checks_status": {"fail": 11,"pass": 8,"total": 37,"manual": 0}},"2.9.4": {"name": "Log and Access Record Management","checks": {"macie_is_enabled": "PASS","elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","inspector2_is_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","ec2_ebs_public_snapshot": "PASS","eventbridge_bus_exposed": "PASS","rds_snapshots_encrypted": "FAIL","s3_bucket_public_access": null,"s3_bucket_kms_encryption": "FAIL","cloudtrail_insights_exist": null,"s3_bucket_public_list_acl": null,"s3_bucket_public_write_acl": null,"ec2_ebs_snapshots_encrypted": "FAIL","ec2_instance_managed_by_ssm": "FAIL","efs_not_publicly_accessible": "FAIL","guardduty_centrally_managed": "FAIL","rds_snapshots_public_access": "PASS","s3_bucket_default_encryption": "PASS","wafv2_webacl_logging_enabled": "FAIL","iam_securityaudit_role_created": null,"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","config_recorder_all_regions_enabled": null,"s3_access_point_public_access_block": "PASS","s3_bucket_level_public_access_block": "PASS","eventbridge_bus_cross_account_access": "FAIL","s3_bucket_policy_public_write_access": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"s3_account_level_public_access_blocks": null,"cloudtrail_log_file_validation_enabled": "FAIL","cloudtrail_s3_dataevents_write_enabled": null,"apigatewayv2_api_access_logging_enabled": "FAIL","cloudwatch_log_group_no_secrets_in_logs": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"s3_bucket_server_access_logging_enabled": "FAIL","cloudfront_distributions_logging_enabled": null,"documentdb_cluster_cloudwatch_log_export": null,"ec2_instance_detailed_monitoring_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_cross_account_sharing_disabled": null,"kafka_cluster_enhanced_monitoring_enabled": null,"acm_certificates_transparency_logs_enabled": "PASS","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","cloudwatch_log_metric_filter_policy_changes": null,"eks_control_plane_logging_all_types_enabled": null,"ec2_ebs_snapshot_account_block_public_access": null,"iam_inline_policy_no_full_access_to_cloudtrail": null,"trustedadvisor_premium_support_plan_subscribed": null,"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL","cloudwatch_log_metric_filter_sign_in_without_mfa": null,"directoryservice_directory_monitor_notifications": null,"eventbridge_schema_registry_cross_account_access": "FAIL","glue_etl_jobs_cloudwatch_logs_encryption_enabled": "FAIL","opensearch_service_domains_audit_logging_enabled": null,"directoryservice_directory_log_forwarding_enabled": null,"ec2_client_vpn_endpoint_connection_logging_enabled": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS","cloudwatch_log_metric_filter_authentication_failures": null,"opensearch_service_domains_cloudwatch_logging_enabled": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"route53_public_hosted_zones_cloudwatch_logging_enabled": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudtrail_multi_region_enabled_logging_management_events": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL","glue_development_endpoints_cloudwatch_logs_encryption_enabled": null,"awslambda_function_invoke_api_operations_cloudtrail_logging_enabled": "PASS","cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.9.4 Log and Access Record Management","Subdomain": "2.9. System and Service Operation Management","AuditEvidence": ["Log management procedures","Log record details","Access control records for log storage devices","Access records of personal information"],"AuditChecklist": ["Has the organization established log management procedures for information systems such as servers, applications, security systems, and network systems, and is it generating and storing the necessary logs accordingly?","Are log records of information systems securely stored to prevent tampering, theft, or loss, and is access to the log records minimized?","Are access records for personal information processing systems securely stored for a specified period in accordance with legal requirements, including all necessary items?"],"NonComplianceCases": ["Case 1: The detailed criteria and procedures for log recording, retention periods, review frequency, and responsible personnel have not been established.","Case 2: The maximum size for critical logs such as security event logs, application, and service logs (for Windows Server 2008 or later) is not sufficiently configured, resulting in logs not being recorded and retained for the period specified by internal standards.","Case 3: The log records of important Linux/UNIX servers are not separately backed up or adequately protected, allowing users to arbitrarily delete command execution histories and access logs.","Case 4: Upon reviewing access records for the personal information processing system, while the account, access time, and IP address of the user were logged, details about the data subject information handled and the tasks performed (e.g., view, modify, delete, download) were not recorded.","Case 5: The capacity of the log server is insufficient, leaving only two months of access records for the personal information processing system.","Case 6: A personal information processing system handling personal information of 100,000 data subjects is only retaining access logs for one year."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 8 (Retention and Inspection of Access Records)"]}],"description": "The organization must define the types of logs, retention periods, and retention methods for user access records, system logs, and privilege grant records for information systems such as servers, applications, security systems, and network systems, and must securely retain and manage them to prevent tampering, theft, or loss.","checks_status": {"fail": 25,"pass": 15,"total": 81,"manual": 0}},"2.9.5": {"name": "Log and Access Record Inspection","checks": {"cloudtrail_insights_exist": null,"inspector2_active_findings_exist": "FAIL","trustedadvisor_errors_and_warnings": null,"guardduty_no_high_severity_findings": "FAIL","accessanalyzer_enabled_without_findings": "FAIL","cloudtrail_threat_detection_enumeration": null,"cloudwatch_log_group_no_secrets_in_logs": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"cloudwatch_cross_account_sharing_disabled": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"cognito_user_pool_client_prevent_user_existence_errors": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL","cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. Protection Control Requirements","Section": "2.9.5 Log and Access Record Inspection","Subdomain": "2.9. System and Service Operation Management","AuditEvidence": ["Log review and monitoring procedures","Log review and monitoring results (review details, reports, etc.)","Access record inspection details for personal information","Criteria and results for verifying reasons for personal information downloads","Evidence of responses to detected anomalies"],"AuditChecklist": ["Are there established log review and monitoring procedures, including the frequency, targets, and methods for detecting errors, misuse (unauthorized access, excessive queries, etc.), fraud, and other anomalies in the information system?","Are the results of log reviews and monitoring reported to the responsible person, and are responses taken following procedures when anomalies are detected?","Are access records of the personal information processing system regularly inspected according to the periods specified in relevant laws?"],"NonComplianceCases": ["Case 1: Monitoring and alert policies (criteria) for abnormal access (e.g., early morning access on holidays, access via bypass routes) or abnormal behaviors (e.g., large-scale data queries or continuous small data queries) on information systems processing important information have not been established.","Case 2: Although periodic inspection/monitoring criteria for access and usage are established in internal guidelines or systems, there is no record of actual review of abnormal access or behavior.","Case 3: The personal information processor sets the inspection frequency of access records for personal information processing systems to once per quarter.","Case 4: The internal management plan for the personal information processor sets criteria for verifying reasons when more than 1,000 items of personal information are downloaded, but the reasons are not verified when such downloads occur."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 8 (Retention and Inspection of Access Records)"]}],"description": "To ensure normal use of the information system and prevent misuse (unauthorized access, excessive queries, etc.) by users, log review criteria for access and usage must be established and inspected periodically, and post-event actions must be taken promptly if issues arise.","checks_status": {"fail": 6,"pass": 0,"total": 26,"manual": 0}},"2.9.6": {"name": "Time Synchronization","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Control Requirements","Section": "2.9.6 Time Synchronization","Subdomain": "2.9. System and Service Operation Management","AuditEvidence": ["Time synchronization settings","Evidence of time synchronization for key systems"],"AuditChecklist": ["Is the system time synchronized with the standard time?","Is regular inspection conducted to ensure that time synchronization is functioning properly?"],"NonComplianceCases": ["Case 1: The time of some critical systems (e.g., security systems, CCTV, etc.) is not synchronized with the standard time, and regular inspections for synchronization are not being conducted.","Case 2: Although internal NTP servers are configured for time synchronization, some systems are not synchronized, and there has been no cause analysis or response."],"RelatedRegulations": []}],"description": "To ensure the accuracy of logs and access records and provide reliable log analysis, the system time must be synchronized with a standard time and regularly maintained.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.9.7": {"name": "Reuse and Disposal of Information Assets","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Control Requirements","Section": "2.9.7 Reuse and Disposal of Information Assets","Subdomain": "2.9. System and Service Operation Management","AuditEvidence": ["Procedures for the disposal and reuse of information assets","Storage media management ledger","Evidence of the disposal of information assets and storage media","Disposal-related outsourcing contracts for information assets and storage media"],"AuditChecklist": ["Are secure reuse and disposal procedures for information assets established and implemented?","When reusing or disposing of information assets and storage media, are personal and critical information processed to be irrecoverable?","If information assets and storage media are disposed of internally, are the disposal records maintained in a management ledger along with evidence of disposal?","If disposal is outsourced to an external company, are disposal procedures specified in the contract and is the complete disposal confirmed?","Are measures in place to protect data on storage media during maintenance, repairs, or replacements of systems and PCs?"],"NonComplianceCases": ["Case 1: Although the policy and procedure require the complete deletion of data using a data deletion program when reusing PCs used by personal information handlers, in practice, PCs are reused without complete deletion or are only formatted before reuse, indicating that procedures are not being followed.","Case 2: Although storage media are disposed of through an external company, the contract lacks details on secure disposal procedures and protective measures, and there is no verification or supervision of the disposal process or evidence of compliance.","Case 3: Instead of recording the serial numbers of disposed HDDs, the system names are recorded, or the disposal ledger is not maintained, making it impossible to verify the disposal history and traceability.","Case 4: Discarded hard disks are left unsecured in an area without locks, and the data has not been fully deleted."],"RelatedRegulations": ["Personal Information Protection Act, Article 21 (Destruction of Personal Information)","Standards for Ensuring the Safety of Personal Information, Article 13 (Destruction of Personal Information)"]}],"description": "To prevent the recovery or regeneration of personal and critical information during the reuse and disposal process, secure reuse and disposal procedures for information assets must be established and implemented.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.1": {"name": "Collection and Use of Personal Information","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.1.1 Collection and Use of Personal Information","Subdomain": "3.1. Protection Measures during Personal Information Collection","AuditEvidence": ["Online personal information collection forms (e.g., website sign-up screens, mobile app registration screens, event participation forms)","Offline personal information collection forms (e.g., membership application forms)","Records of personal information collection consent (e.g., member databases)","Records of legal guardian consent","Privacy policy"],"AuditChecklist": ["When collecting personal information, is it collected in accordance with lawful requirements such as obtaining the data subjectโ€™s consent, complying with legal obligations, or concluding and fulfilling contracts?","When obtaining consent from the data subject for the collection of personal information, are the method and timing of obtaining consent appropriate?","When obtaining consent from the data subject for the collection of personal information, are the relevant details clearly communicated, and are significant points required by law highlighted in a way that is easy to understand?","When collecting, using, or providing personal information from children under the age of 14, are necessary details notified to their legal representatives, and is consent obtained?","When obtaining the consent of a legal representative, is only the minimum necessary personal information collected, and are procedures and methods in place to verify the qualifications of the legal representative?","When notifying children under the age of 14 about matters related to the processing of their personal information, are the notifications presented in a format and language that is clear and easy to understand?","Are records of consent obtained from data subjects and legal representatives being retained?","For personal information that can be processed without the consent of the data subject, are the relevant items and legal basis for processing disclosed in the privacy policy or communicated to the data subject separately from the personal information processed with consent?","When personal information is used for additional purposes without the consent of the data subject, are criteria established and implemented to assess the relevance to the original purpose, predictability, impact on the data subject, and safety measures? If additional usage continues to occur, are these criteria disclosed in the privacy policy and regularly reviewed?"],"NonComplianceCases": ["Case 1: A personal information processor subject to the Personal Information Protection Act failed to include the 'right to refuse consent and the consequences of refusal' in the notifications when obtaining consent to collect personal information.","Case 2: During the process of obtaining consent for the collection of personal information, the items of personal information to be collected were not specified in detail, and were instead described in general terms like 'etc.'","Case 3: On a shopping mall website, personal information necessary for membership registration was collected alongside payment and delivery information required for future purchases, even though such information was not necessary at the time of registration.","Case 4: Personal information (e.g., name, email, phone number) was collected through Q&A boards without obtaining the data subject's consent.","Case 5: Personal information of children under the age of 14 was collected without obtaining the consent of their legal guardians.","Case 6: Although the service was not intended for children under 14, some members were under 14 because the website did not check birthdates during registration, allowing them to register without legal guardian consent.","Case 7: The procedure for verifying the authenticity of the legal representative was insufficient, allowing individuals who were not legal guardians to provide consent.","Case 8: Personal information (e.g., name, phone number) of legal guardians was collected for the purpose of obtaining their consent to collect personal information from children under the age of 14, but the consent of the legal guardian was not confirmed for an extended period, and the information was retained without being destroyed.","Case 9: Personal information of children under 14 was collected based on the consent of their legal guardians, but records of this consent were not maintained, making it impossible to verify the details related to legal guardian consent (e.g., legal guardianโ€™s name, time of consent)."],"RelatedRegulations": ["Personal Information Protection Act, Article 15 (Collection and Use of Personal Information), Article 22 (Methods for Obtaining Consent), Article 22-2 (Protection of Personal Information of Children)","Notice on the Processing of Personal Information"]}],"description": "Personal information must be collected and used lawfully and fairly. When collecting personal information based on the consent of the data subject, such consent must be obtained through legal means. Additionally, when collecting personal information from children under the age of 14, consent must be obtained from their legal representative, and it must be verified that such consent was given by the legal representative.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.2": {"name": "Restrictions on the Collection of Personal Information","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.1.2 Restrictions on the Collection of Personal Information","Subdomain": "3.1. Protection Measures during Personal Information Collection","AuditEvidence": ["Online personal information collection forms (e.g., website sign-up screens, event participation forms)","Offline personal information collection forms (e.g., membership application forms)","Privacy policy"],"AuditChecklist": ["When collecting personal information, is only the minimum amount of information necessary for the intended purpose being collected?","When collecting personal information based on the data subjectโ€™s consent, is the data subject clearly informed that they can refuse to consent to the collection of additional personal information beyond the minimum required?","Is the data subject not denied goods or services for refusing to consent to the collection of additional personal information beyond the minimum necessary for the intended purpose?"],"NonComplianceCases": ["Case 1: Although personal information was being collected based on the fulfillment of a contract, excessive personal information not essential to the fulfillment of the contract was being collected.","Case 2: During the process of obtaining consent from the data subject for optional information, the data subject was not explicitly informed that they could refuse to provide such information.","Case 3: Although the sign-up form distinguished between required and optional information, the data subject was not clearly informed that they could complete registration without providing optional information (e.g., there was no indication on the personal information entry form of which fields were required and which were optional).","Case 4: On the website registration page, the data subject was unable to proceed or complete registration if they refused to provide optional information or consent to optional matters.","Case 5: During the hiring process, excessive personal information unrelated to the job position (e.g., family details) was collected."],"RelatedRegulations": ["Personal Information Protection Act, Article 16 (Restrictions on the Collection of Personal Information), Article 22 (Methods for Obtaining Consent)"]}],"description": "When collecting personal information, only the minimum amount of personal information necessary for the intended purpose may be collected, and the data subject must not be denied the provision of goods or services for refusing to consent to optional matters.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.3": {"name": "Restrictions on the Processing of Resident Registration Numbers","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.1.3 Restrictions on the Processing of Resident Registration Numbers","Subdomain": "3.1. Protection Measures during Personal Information Collection","AuditEvidence": ["Personal information collection forms (e.g., website sign-up screens, event participation forms, membership application forms)","Online personal information collection forms (alternative registration methods for identity verification)","Evidence of legal grounds for processing resident registration numbers","Privacy policy"],"AuditChecklist": ["Are resident registration numbers only processed when there is a clear legal basis?","Is the legal provision that forms the basis for the collection of resident registration numbers clearly identified?","When processing resident registration numbers under a legal basis, does the organization provide a method for data subjects to register without using their resident registration number during the membership registration process on an internet website?"],"NonComplianceCases": ["Case 1: Resident registration numbers were collected for simple membership management purposes, such as identity verification, during website sign-up based on the data subject's consent.","Case 2: Resident registration numbers were collected based on provisions in enforcement rules or local ordinances.","Case 3: The last 6 digits of the resident registration number were collected for identity verification, such as during password recovery, without any legal basis.","Case 4: Resident registration numbers were collected from job applicants during the hiring process without a legal basis.","Case 5: Resident registration numbers were collected during customer service inquiries at a call center for identity verification purposes.","Case 6: Even when there was a legal basis for the collection of resident registration numbers, alternative registration methods were not provided during the membership registration process on the website, and resident registration numbers were required for identity verification and membership registration."],"RelatedRegulations": ["Personal Information Protection Act, Article 24-2 (Restrictions on the Processing of Resident Registration Numbers)","Information and Communications Network Act, Article 23-2 (Restrictions on the Use of Resident Registration Numbers)"]}],"description": "Resident registration numbers may not be collected, used, or processed unless there is a legal basis for doing so. Even when the processing of resident registration numbers is permitted, alternative methods must be provided, such as through an internet website.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.4": {"name": "Restriction on Processing of Sensitive and Unique Identifying Information","checks": {},"status": "PASS","attributes": [{"Domain": "3. Personal Information Processing Requirements","Section": "3.1.4 Restriction on Processing of Sensitive and Unique Identifying Information","Subdomain": "3.1. Protection Measures for Personal Information Collection","AuditEvidence": ["Online personal information collection forms (e.g., membership sign-up pages, event participation forms)","Offline personal information collection forms (e.g., membership application forms)","Privacy policy"],"AuditChecklist": ["Is sensitive information processed only with the separate consent of the data subject or when legally required?","Is unique identifying information (excluding resident registration numbers) processed only with the separate consent of the data subject or when there is a specific legal basis?","If there is a risk of invasion of privacy due to the disclosure of sensitive information during the provision of goods or services, is the data subject clearly informed of the possibility of such disclosure and how to opt for non-disclosure before the provision of goods or services?"],"NonComplianceCases": ["Case 1: Collecting sensitive information such as disability status for discounts or benefits for disabled individuals, and obtaining blanket consent for all personal information items.","Case 2: Collecting foreign registration numbers only from foreigners during membership registration, and obtaining blanket consent for all personal information items.","Case 3: When obtaining separate consent for the collection of sensitive or unique identifying information, failing to inform or incorrectly informing the data subject about the four key points that must be disclosed (e.g., the right to refuse consent and the consequences of refusal)."],"RelatedRegulations": ["Personal Information Protection Act, Article 23 (Restrictions on Processing of Sensitive Information), Article 24 (Restrictions on Processing of Unique Identifying Information)"]}],"description": "In order to process sensitive information and unique identifying information (excluding resident registration numbers), separate consent from the data subject must be obtained unless the processing is specifically required or permitted by law.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.5": {"name": "Indirect Collection of Personal Information","checks": {},"status": "PASS","attributes": [{"Domain": "3. Personal Information Processing Requirements","Section": "3.1.5 Indirect Collection of Personal Information","Subdomain": "3.1. Protection Measures for Personal Information Collection","AuditEvidence": ["Contracts related to the provision of personal information (agreements with providers)","Records of notifications to data subjects about the source of personal information","Privacy policy"],"AuditChecklist": ["When receiving personal information from a third party, is it clearly stated in the contract that the responsibility for obtaining consent for the collection of personal information lies with the party providing the information?","When collecting personal information from public media or places, is the collection limited to the scope recognized as having the data subject's consent, based on common societal standards?","Even for personal information collected or generated through automated collection devices during the process of providing services, is the principle of minimum collection applied?","When personal information is collected from a source other than the data subject and the data subject requests it, is the required information immediately provided to the data subject?","When personal information collected from a source other than the data subject meets legal requirements in terms of type or scale, is the required information provided to the data subject?","Is there a record of informing the data subject about the source of personal information, and is this record maintained until the personal information is destroyed?"],"NonComplianceCases": ["Case 1: In the case of collecting personal information published on websites or social media, there is no procedure for handling requests from data subjects about the source of the information.","Case 2: Personal information provided by another business entity was received based on consent for the provision of personal information under Article 17(1)(1) of the Personal Information Protection Act, but the data subjects were not notified within three months (note: this applies to cases where the recipient handles the personal information of more than 50,000 data subjects, sensitive information, or unique identifying information, or processes personal information of over 1 million data subjects).","Case 3: The data subject was informed about the source of the personal information as required by law, but some mandatory notification items were omitted, such as the purpose of processing or the right to withdraw consent.","Case 4: The data subject was informed about the source of the personal information, but the record of this notification was not maintained until the personal information was destroyed, in violation of legal obligations."],"RelatedRegulations": ["Personal Information Protection Act, Article 16 (Restrictions on the Collection of Personal Information), Article 19 (Restrictions on Use and Provision of Personal Information Provided by a Third Party), Article 20 (Notification of the Source, Purpose, etc. of Indirectly Collected Personal Information)"]}],"description": "When collecting personal information from sources other than the data subject or when receiving personal information from a third party, only the minimum amount of personal information necessary for the task should be collected or received. If there is a legal basis or at the request of the data subject, the source of the personal information, the purpose of processing, and the right to request a suspension of processing must be disclosed.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.6": {"name": "Installation and Operation of Video Information Processing Devices","checks": {},"status": "PASS","attributes": [{"Domain": "3. Personal Information Processing Requirements","Section": "3.1.6 Installation and Operation of Video Information Processing Devices","Subdomain": "3.1. Protection Measures for Personal Information Collection","AuditEvidence": ["Status of video information processing device operation","Signs for video information processing devices","Video information processing device operation and management policies","Management screens for video information processing devices (e.g., account/permission details, video retention periods)","Contracts with operators of video information processing devices and inspection records"],"AuditChecklist": ["When installing and operating fixed video information processing devices in public places, is it reviewed whether the installation meets legal requirements?","If public institutions install and operate fixed video information processing devices in public places, are public hearings or explanation sessions held to gather opinions from relevant experts and stakeholders, as required by law?","When installing and operating fixed video information processing devices, are necessary measures taken, such as installing signs to ensure the data subject can easily recognize the presence of the devices?","When operating mobile video information processing devices in public places for business purposes, is it reviewed whether the operation meets legal requirements?","When operating mobile video information processing devices in public places for business purposes, is the fact that the video is being recorded indicated and informed to the public through lights, sounds, or signs?","Is there an operation and management policy in place for the safe management of video information processing devices and the video information they record, and is it being implemented?","Is the retention period for video information set, and is the information destroyed without delay after the retention period expires?","When outsourcing the operation of video information processing devices, are the related procedures and requirements reflected in the contract?"],"NonComplianceCases": ["Case 1: The wording on the signs for video information processing devices is incomplete, or there is no established and implemented policy for the operation and management of video information processing devices.","Case 2: Although there is a policy for the operation and management of video information processing devices, the policy is not followed, such as failing to comply with the retention period or failing to implement access control and logging as described in the policy.","Case 3: The operation of video information processing devices is outsourced, but the legal requirements, such as inspection of the video information management status and provisions regarding liability for damages, are not reflected in the contract.","Case 4: The operation of video information processing devices is outsourced, but the signs for the devices do not include the name and contact information of the contractor."],"RelatedRegulations": ["Personal Information Protection Act, Article 25 (Restrictions on the Installation and Operation of Fixed Video Information Processing Devices), Article 25-2 (Restrictions on the Operation of Mobile Video Information Processing Devices)"]}],"description": "When installing and operating fixed video information processing devices in public places or operating mobile video information processing devices in public places for business purposes, legal requirements must be followed according to the purpose and location of the installation, and appropriate protection measures must be established and implemented.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.7": {"name": "Collection and Use of Personal Information for Marketing Purposes","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.1.7 Collection and Use of Personal Information for Marketing Purposes","Subdomain": "3.1. Protection Measures When Collecting Personal Information","AuditEvidence": ["Online personal information collection forms (e.g., website membership sign-up, mobile app sign-up, event participation)","Offline personal information collection forms (e.g., membership application forms)","Marketing consent records","Records of consent for receiving advertising information and confirmation of consent","Administrator screen for advertising information transmission systems (e.g., email, SMS, app push notifications)","Advertising information transmission content","Personal information processing policy"],"AuditChecklist": ["When obtaining consent from data subjects to process personal information for the purpose of promoting or recommending goods or services, is the data subject clearly informed, and is separate consent obtained?","When sending advertising information for profit using electronic transmission media, is the recipient's explicit prior consent obtained, and is the consent reconfirmed every two years?","When a recipient indicates refusal or withdraws prior consent for receiving advertising information for profit, is the transmission of such advertising information stopped?","When sending advertising information for profit, is the sender's name, method for opting out, etc., clearly stated, and are such messages not sent during nighttime hours?"],"NonComplianceCases": ["Case 1: When collecting personal information for 'promotion and marketing' purposes, the purpose is vaguely explained (e.g., 'providing additional services', 'providing partner services') or blanket consent is obtained without distinguishing between different purposes.","Case 2: Even after a user has expressed refusal to receive advertising push notifications via a mobile app, such notifications are sent due to a program error.","Case 3: The option to receive advertising information via text messages or email is pre-selected by default on the online sign-up page.","Case 4: The recipient's consent for receiving advertising information is not reconfirmed every two years.","Case 5: When sending advertising information for profit via email, the subject line does not begin with '(Advertisement)'."],"RelatedRegulations": ["Personal Information Protection Act, Article 22 (Method of Obtaining Consent)","Information and Communications Network Act, Article 50 (Restrictions on Transmission of Advertising Information)"]}],"description": "When collecting and using personal information for marketing purposes, such as promoting goods or services, sales recommendations, or sending advertising information, the purpose must be clearly communicated to the data subject, and their consent must be obtained.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.2.1": {"name": "Management of Personal Information Status","checks": {"macie_is_enabled": "PASS","s3_bucket_lifecycle_enabled": "FAIL"},"status": "FAIL","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.2.1 Management of Personal Information Status","Subdomain": "3.2. Protection Measures When Retaining and Using Personal Information","AuditEvidence": ["Personal information status table","Personal information flowchart","Registration status of personal information files","Personal information file management ledger","Personal information processing policy-related personal information files","Personal information files related to investigations under the Punishment of Tax Offenses Act and the Customs Act","Personal information files for one-time operations deemed to have a low need for continuous management as determined by Presidential Decree","Personal information files for simple tasks such as attending meetings, sending documents or materials, and financial settlements, which have a low need for continuous management","Personal information files processed temporarily for public health or public safety emergencies","Other personal information files collected for one-time tasks that are not stored or recorded","Personal information files classified as confidential under other laws","Personal information files collected or requested for analysis related to national security","Personal video information files processed via video information processing devices","Personal information files retained by financial institutions for handling financial transactions under the Real Name Financial Transactions and Guarantee of Secrecy Act"],"AuditChecklist": ["Is the status of collected and retained personal information, including the items, volume, purpose and method of processing, and retention period, regularly managed?","When a public institution operates or modifies personal information files, are the relevant matters registered with the head of the relevant agency as required by law?","Does the public institution disclose the status of personal information files in the personal information processing policy?"],"NonComplianceCases": ["Case 1: Although personal information files are managed through the website's personal information file registration menu, some personal information files related to website services are missing from the personal information processing policy.","Case 2: Although two months have passed since a new personal information file was created, it has not been registered with the Personal Information Protection Commission.","Case 3: The content of personal information files registered and disclosed with the Personal Information Protection Commission (e.g., items of personal information collected) does not match the actual status of personal information files being processed.","Case 4: A public institution has not registered personal information files with the Personal Information Protection Commission, even though the files do not qualify for exceptions such as employee personal information files or personal information files collected under the Statistics Act."],"RelatedRegulations": ["Personal Information Protection Act, Article 32 (Registration and Disclosure of Personal Information Files)"]}],"description": "The items, volume, purpose and method of processing, and retention period of collected and retained personal information must be regularly managed. In the case of public institutions, such information must be registered with the head of the relevant agency as stipulated by law.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"3.2.2": {"name": "Personal Information Quality Assurance","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.2.2 Personal Information Quality Assurance","Subdomain": "3.2. Protection Measures for Retention and Use of Personal Information","AuditEvidence": ["Form for data subjects to modify/update their personal information (online, offline)","Procedures to maintain the up-to-date status of personal information"],"AuditChecklist": ["Are procedures and methods in place to maintain personal information in an accurate and up-to-date state?","Is there a method provided for data subjects to ensure the accuracy, completeness, and up-to-dateness of their personal information?"],"NonComplianceCases": ["Case 1: Although an identity verification process is implemented when changing member information through the website, the identity verification process is insufficient when changing member information via customer service, making unauthorized changes possible.","Case 2: While an online method is provided for online members to change their personal information, no such method is provided for offline members."],"RelatedRegulations": ["Personal Information Protection Act, Article 3 (Principles of Personal Information Protection)"]}],"description": "Collected personal information must be managed to ensure its accuracy, completeness, and up-to-dateness within the scope necessary for the processing purpose, and procedures must be provided to data subjects to manage their information.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.2.3": {"name": "Protection of User Device Access","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.2.3 Protection of User Device Access","Subdomain": "3.2. Protection Measures for Retention and Use of Personal Information","AuditEvidence": ["App access rights consent screen","App access rights settings"],"AuditChecklist": ["When accessing information stored on the user's mobile device or functions installed on the device, are users clearly informed and their consent obtained?","Is it ensured that services are not denied if users do not consent to access rights that are not essential for the service?","Are methods provided for users to consent to or withdraw access rights on their mobile devices?"],"NonComplianceCases": ["Case 1: A smartphone app requests excessive access to personal information areas such as contacts, photos, and messages, even though such access is unnecessary for the service.","Case 2: A service provider's smartphone app accesses information stored on the smartphone and installed functions without notifying the user and obtaining their consent.","Case 3: Consent is obtained for app access rights by informing users that optional permissions are required as essential permissions.","Case 4: A smartphone app supports Android versions below 6.0, where individual consent for access rights is not possible, making it impossible for users to reject optional access rights."],"RelatedRegulations": ["Information and Communications Network Act, Article 22-2 (Consent for Access Rights)"]}],"description": "When accessing information stored on the user's mobile device or functions installed on the mobile device, it is necessary to notify the user clearly and obtain their consent.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.2.4": {"name": "Use and Provision of Personal Information Beyond Purpose","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.2.4 Use and Provision of Personal Information Beyond Purpose","Subdomain": "3.2. Protection Measures for Retention and Use of Personal Information","AuditEvidence": ["Records of personal information use and provision beyond the original purpose (including related evidence such as requests)","Log of personal information use and provision beyond the original purpose (for public institutions)","Records of publication in the official gazette or on the website (for public institutions)","Guidelines for handling information provision requests","Official documents requesting information provision and records of personal information provision"],"AuditChecklist": ["Is personal information used or provided only within the scope of the purpose consented to by the data subject at the time of collection or as permitted by law?","When receiving personal information from a personal information processor, is the information used or provided only within the scope of the purpose for which it was provided?","If personal information is used or provided beyond the scope of the purpose of collection or the purpose for which it was received from a personal information processor, is additional consent obtained from the data subject or limited to cases with a legal basis?","When providing personal information to a third party for purposes beyond the original purpose, is the recipient required to take necessary actions to restrict the use of personal information and ensure safety?","When public institutions use or provide personal information beyond the original purpose, are the legal basis, purpose, and scope published in the official gazette or on the internet?","When public institutions use or provide personal information beyond the original purpose, is there a record of such use or provision and are procedures in place for managing it?"],"NonComplianceCases": ["Case 1: Personal information collected for product delivery is used for telemarketing of other company products without prior consent.","Case 2: Personal information collected for customer satisfaction surveys or sweepstakes entries is used for advertising other promotional events without consent.","Case 3: A public institution provides personal information to another institution for purposes outside the scope of the original purpose based on legal grounds but does not publish the information in the official gazette or on the internet.","Case 4: A public institution provides personal information to a police department for criminal investigation purposes but fails to record the details in the log of personal information use and provision beyond the original purpose."],"RelatedRegulations": ["Personal Information Protection Act, Article 18 (Restriction on the Use and Provision of Personal Information Beyond the Original Purpose), Article 19 (Restriction on Use and Provision by Recipients of Personal Information)"]}],"description": "Personal information must only be used or provided within the scope notified and consented to by the data subject at the time of collection or as permitted by law. If personal information is to be used or provided beyond this scope, additional consent must be obtained from the data subject or the legality must be verified, and appropriate protective measures must be established and implemented.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.2.5": {"name": "Processing of Pseudonymized Information","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.2.5 Processing of Pseudonymized Information","Subdomain": "3.2. Protection Measures for Retention and Use of Personal Information","AuditEvidence": ["Procedures and results of the adequacy review of pseudonymization/anonymization","Records of pseudonymized information processing","Privacy policy (regarding the use and provision of pseudonymized information)"],"AuditChecklist": ["When processing pseudonymized information, are procedures established for purpose limitation, pseudonymization methods and standards, adequacy review, prohibition of re-identification, and actions in case of re-identification?","When using or providing pseudonymized personal information, is the information pseudonymized to a level where individuals cannot be identified without using or combining additional information?","When combining pseudonymized information with that of other personal information processors, is the combination conducted through a specialized agency or data professional organization?","When processing pseudonymized information, are technical, administrative, and physical measures taken to ensure safety, such as deleting or separately storing additional information and keeping records?","Is the processing period for pseudonymized information set to an appropriate period considering the processing purpose, and is the information destroyed without delay when that period expires?","When anonymizing personal information, is the information anonymized to a level where individuals cannot be identified even with the use of additional information, considering the time, cost, and technology available?"],"NonComplianceCases": ["Case 1: When processing pseudonymized information for statistical purposes or scientific research without obtaining consent from data subjects, records of the pseudonymization process were not kept, or the privacy policy was not updated to include relevant information.","Case 2: Additional information was not stored separately from pseudonymized information in the same database, or access rights to both sets of information were not appropriately segregated.","Case 3: Although pseudonymized personal information was used, the pseudonymization process was not sufficient, making it possible to identify individuals by combining the information with other data without using additional information.","Case 4: Personal information was anonymized for generating test data or for public release, but due to outliers or other factors, it was still possible to identify individuals, indicating that the anonymization process was not sufficient."],"RelatedRegulations": ["Personal Information Protection Act, Article 2 (Definitions), Article 28-2 (Processing of Pseudonymized Information), Article 28-3 (Restrictions on Combining Pseudonymized Information), Article 28-4 (Obligations for the Safe Processing of Pseudonymized Information), Article 28-5 (Prohibition of Re-identification in Processing Pseudonymized Information), Article 28-7 (Scope of Application), Article 58-2 (Exemptions)"]}],"description": "When processing pseudonymized information, legal requirements such as purpose limitation, combination restrictions, safety measures, and prohibition obligations must be met, and procedures must be established and implemented to ensure an appropriate level of pseudonymization.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.3.1": {"name": "Provision of Personal Information to Third Parties","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.3.1 Provision of Personal Information to Third Parties","Subdomain": "3.3. Protective Measures When Providing Personal Information","AuditEvidence": ["Forms related to the provision of personal information to third parties online (e.g., membership registration page, consent for third-party provision on websites)","Forms related to the provision of personal information to third parties offline (e.g., membership application forms, consent forms for third-party provision)","Records of third-party provisions","Privacy policy"],"AuditChecklist": ["When providing personal information to third parties, are legal requirements such as consent from the data subject or compliance with legal obligations clearly identified and followed?","When obtaining consent from the data subject for the provision of personal information to third parties, are the related matters clearly communicated, and is consent legally obtained by distinguishing it from other consents?","When obtaining consent from the data subject for the provision of personal information to third parties, are important matters clearly indicated and made easily understandable as required by law?","When providing personal information to third parties, is the information limited to the minimum necessary for the intended purpose?","When providing personal information to third parties, is it done through secure procedures and methods, and is the provision recorded and stored?","When allowing third parties to access personal information, is control implemented in accordance with protection procedures to securely protect the personal information?","When providing additional personal information without the data subject's consent, are criteria for determining the relevance to the original purpose of collection, predictability, potential harm, and safety measures established and followed? If such provisions continue, are these criteria disclosed in the privacy policy and periodically reviewed?"],"NonComplianceCases": ["Case 1: When obtaining consent from the data subject for the provision of personal information to third parties, some necessary information (e.g., the right to refuse consent, the items provided) was omitted.","Case 2: In the process of providing personal information to third parties, personal information from data subjects who did not consent was provided due to improper verification of consent.","Case 3: When obtaining consent for the provision of personal information, the recipient was not specifically identified and was vaguely referred to as สป~ etc.สผ in the consent.","Case 4: Although third-party provision consent was optional during the membership registration process, if the data subject did not agree to third-party provision, the registration process could not be completed.","Case 5: An excessive amount of personal information was provided beyond what was necessary for the recipient's purpose of use."],"RelatedRegulations": ["Personal Information Protection Act, Article 17 (Provision of Personal Information), Article 22 (Methods of Obtaining Consent)","Notification on the Methods of Processing Personal Information"]}],"description": "When providing personal information to third parties, there must be a legal basis or consent from the data subject, and protection measures must be established and implemented to securely protect personal information during the process of providing access to third parties.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.3.2": {"name": "Outsourcing of Personal Information Processing","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.3.2 Outsourcing of Personal Information Processing","Subdomain": "3.3. Protective Measures When Providing Personal Information","AuditEvidence": ["Privacy policy (disclosing details related to the outsourcing of personal information processing)","Personal information collection forms","Contracts for outsourcing personal information processing","Records of notifications to data subjects regarding outsourced tasks related to promoting or selling goods or services"],"AuditChecklist": ["When outsourcing personal information processing tasks (including sub-outsourcing) to third parties, are the details of the outsourced tasks and the trustees regularly updated and disclosed on the website?","When outsourcing tasks related to promoting or selling goods or services, is the data subject notified of the details of the outsourced tasks and the trustees through methods such as written notice, email, or text messages?"],"NonComplianceCases": ["Case 1: Although the details of the outsourcing of personal information processing tasks were disclosed on the website's privacy policy, some trustees and the details of the outsourced tasks were missing.","Case 2: When outsourcing tasks related to promoting or selling goods or services, the details of the outsourced tasks and trustees were not notified to the data subject through written methods, and instead, the information was disclosed only in the privacy policy.","Case 3: After terminating a contract with an existing trustee for personal information processing, the new trustee was not promptly reflected in the privacy policy.","Case 4: Although the trustee sub-outsourced the personal information processing tasks to a third party, the sub-outsourcing details were not disclosed on the website."],"RelatedRegulations": ["Personal Information Protection Act, Article 26 (Restrictions on the Processing of Personal Information by Outsourcing)"]}],"description": "When outsourcing personal information processing tasks to third parties, the details of the outsourced tasks and the trustee must be disclosed. Additionally, if the task involves promoting or selling goods or services, the details of the outsourced task and the trustee must be notified to the data subject.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.3.3": {"name": "Transfer of Personal Information Due to Business Transfers","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.3.3 Transfer of Personal Information Due to Business Transfers","Subdomain": "3.3. Protective Measures When Providing Personal Information","AuditEvidence": ["Records of notifications to data subjects regarding the transfer of personal information (during business transfers)","Privacy policy"],"AuditChecklist": ["When transferring personal information to another party due to the transfer or merger of all or part of the business, are the necessary matters communicated to the data subjects in advance?","When receiving personal information, does the recipient notify the data subjects without delay regarding the fact that personal information has been received and other necessary matters, if legally required?","Does the recipient of the personal information use the information only for its original purpose at the time of transfer, or provide it to third parties in compliance with the original purpose?"],"NonComplianceCases": ["Case 1: When receiving personal information through business acquisition, the data subjects were not notified of the transfer of personal information, even though the data provider failed to notify them of the transfer.","Case 2: When receiving personal information through business acquisition or merger, no procedures or methods were provided to allow data subjects to opt-out of the transfer, nor were such options communicated to the data subjects."],"RelatedRegulations": ["Personal Information Protection Act, Article 27 (Restrictions on the Transfer of Personal Information Due to Business Transfers)"]}],"description": "When transferring or receiving personal information due to business transfers or mergers, appropriate protection measures such as notifying the data subjects must be established and implemented.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.3.4": {"name": "Transfer of Personal Information Abroad","checks": {"s3_bucket_cross_region_replication": "FAIL"},"status": "FAIL","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.3.4 Transfer of Personal Information Abroad","Subdomain": "3.3. Protection Measures When Providing Personal Information","AuditEvidence": ["Consent form for personal information transfer abroad","Contract related to personal information transfer abroad","Privacy policy","Notification or disclosure records regarding outsourcing or storage of personal information abroad"],"AuditChecklist": ["When transferring personal information abroad, has the data subject been fully informed of all notification requirements and obtained separate consent, or complied with certification or recognition, as required by law?","When informing the data subject about the outsourcing or storage of personal information abroad for the purpose of contract execution, are all necessary details included and communicated appropriately?","Has a contract for the transfer of personal information abroad been established, including compliance with personal information protection laws?","Are necessary measures being taken to protect personal information when transferring it abroad?"],"NonComplianceCases": ["Case 1: Personal information was provided to a foreign company during processing, but separate consent for the transfer of personal information abroad was not obtained, even though the conditions for consent exemption (such as certification or recognition by the recipient country) were not met.","Case 2: While using foreign cloud services (foreign regions) for outsourcing and storing personal information, the relevant details, such as the destination country and transfer method, were not disclosed in the privacy policy or communicated to the data subject.","Case 3: While obtaining consent for the transfer of personal information abroad, only the name of the recipient (company name) was disclosed, and the destination country was not properly notified."],"RelatedRegulations": ["Personal Information Protection Act, Articles 28-8 (Transfer of Personal Information Abroad), 28-9 (Order to Suspend Transfer of Personal Information Abroad), 28-10 (Reciprocity), 28-11 (Applicable Provisions)","Regulations on the Operation of Personal Information Transfer Abroad"]}],"description": "When transferring personal information abroad, appropriate protective measures such as obtaining consent and disclosing relevant details about the transfer must be established and implemented.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.4.1": {"name": "Destruction of Personal Information","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.4.1 Destruction of Personal Information","Subdomain": "3.4. Protection Measures When Destroying Personal Information","AuditEvidence": ["Regulations regarding the retention period and destruction of personal information","Destruction results (e.g., from member databases)","Personal information destruction management records"],"AuditChecklist": ["Has an internal policy been established regarding the retention period and destruction of personal information?","Is personal information being destroyed without delay when the processing purpose is achieved or the retention period has expired?","Is personal information destroyed using safe methods that prevent recovery or reconstruction?","Are records kept of the destruction of personal information and managed properly?"],"NonComplianceCases": ["Case 1: When a member withdraws or the purpose of retention is achieved, personal information was destroyed from the member database, but not from associated systems (CRM, DW) where duplicate personal information was stored.","Case 2: Personal information collected during a specific event was not destroyed or no destruction policy was established, even after the event ended.","Case 3: Personal information collected through a call center (such as call logs, recordings) is retained for three years under the Electronic Commerce Act, but the information was not destroyed even after three years had passed.","Case 4: Due to technical limitations, such as using blockchain, it was not possible to completely destroy personal information, so it was anonymized instead. However, the anonymization process was not done properly, allowing partial re-identification of personal information."],"RelatedRegulations": ["Personal Information Protection Act, Article 21 (Destruction of Personal Information)","Standards for Ensuring the Safety of Personal Information, Article 13 (Destruction of Personal Information)"]}],"description": "The organization must establish an internal policy regarding retention periods and destruction of personal information. When the retention period has expired or the purpose of processing has been achieved, personal information must be destroyed without delay using methods that ensure safety and completeness.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.4.2": {"name": "Measures When Retaining Personal Information After Purpose Is Achieved","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.4.2 Measures When Retaining Personal Information After Purpose Is Achieved","Subdomain": "3.4. Protection Measures When Destroying Personal Information","AuditEvidence": ["Regulations regarding the retention period and destruction of personal information","Current status of separated databases (table structure, etc.)","Access permissions for separated databases"],"AuditChecklist": ["When personal information is retained beyond the retention period or after the processing purpose has been achieved, in accordance with relevant laws, is it limited to the minimum necessary period and only the minimum necessary information?","When personal information is retained beyond the retention period or after the processing purpose has been achieved, is it stored separately from other personal information?","Is personal information that is stored separately processed only within the scope allowed by law?","Is access to separately stored personal information limited to the minimum number of personnel?"],"NonComplianceCases": ["Case 1: Information from withdrawn members was not destroyed but kept for a certain period under the Electronic Commerce Act, with only the flag value changed, and stored in the same table as other member information.","Case 2: Records related to consumer complaints and disputes were kept for five years instead of the required three years, due to misinterpretation of legal requirements.","Case 3: Although a separate database was set up, access permissions were not appropriately configured, allowing personnel who did not require access to view the separated database.","Case 4: Information from withdrawn members was stored separately in accordance with the Electronic Commerce Act, but excessive optional information was also stored, even though there was no legal obligation to do so."],"RelatedRegulations": ["Personal Information Protection Act, Article 21 (Destruction of Personal Information)"]}],"description": "If personal information is retained beyond the retention period or after the purpose of processing has been achieved, as permitted by relevant laws, it must be limited to the minimum necessary items and stored separately from other personal information.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.5.1": {"name": "Disclosure of Privacy Policy","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.5.1 Disclosure of Privacy Policy","Subdomain": "3.5. Protection of Data Subject's Rights","AuditEvidence": ["Privacy policy","Records of privacy policy amendments (e.g., board notices)"],"AuditChecklist": ["Is the privacy policy written in clear and easy-to-understand language, covering all the contents required by law?","Is the privacy policy continuously updated and made easily accessible to data subjects via the internet or other means?","When the privacy policy is updated, are the reasons for the changes and the contents of the changes promptly notified, and can the data subjects easily recognize the changes at any time?"],"NonComplianceCases": ["Case 1: The privacy policy discloses information about the collection and provision of personal information, but the actual details differ from what is being collected and provided.","Case 2: Changes such as the replacement of the privacy officer or changes in subcontractors have occurred, but these changes have not been reflected in the privacy policy.","Case 3: The privacy policy is disclosed, but it is labeled 'Privacy Protection Policy' instead of 'Privacy Policy,' and its visibility is not enhanced with larger font sizes or color to make it easy for data subjects to find.","Case 4: Several amendments have been made to the privacy policy, but older versions of the policy are not made available for review.","Case 5: Although personal information is retained in compliance with laws such as the Electronic Commerce Act and the Commercial Act, the legal grounds for retention and the retained personal information items are not disclosed in the privacy policy."],"RelatedRegulations": ["Personal Information Protection Act, Article 30 (Establishment and Disclosure of Privacy Policy), Article 30-2 (Evaluation and Improvement Recommendations for Privacy Policy)"]}],"description": "A privacy policy must be established to include all necessary information, such as the purpose of personal information processing, in a way that is easy for data subjects to understand. The policy must be disclosed through appropriate methods so that data subjects can easily access it at any time, and it must be continuously updated.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.5.2": {"name": "Guaranteeing Data Subject's Rights","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.5.2 Guaranteeing Data Subject's Rights","Subdomain": "3.5. Protection of Data Subject's Rights","AuditEvidence": ["Privacy policy","Procedures and forms for handling Requests for Access, etc.","Records of actions taken in response to Requests for Access, etc.","Procedures for member withdrawal and consent withdrawal"],"AuditChecklist": ["Are procedures in place to ensure that data subjects or their representatives can exercise their rights (hereinafter referred to as 'Requests for Access, etc.') to access, rectify, delete, or suspend the processing of their personal information in a way that is not more difficult than the process used for collecting it?","When data subjects or their representatives submit Requests for Access, etc., are the necessary measures taken within the required time frame?","When data subjects withdraw their consent to the collection, use, or provision of their personal information, are the collected personal information and associated data promptly deleted or otherwise handled appropriately?","Are appropriate procedures in place to allow data subjects to object to the actions taken regarding their Requests for Access, etc., and are they informed of these procedures?","Are records kept of data subjects' Requests for Access, etc., and the resulting actions?","When the rights of others, such as privacy or honor, are violated on information networks, does the organization have procedures for the affected individuals to request the deletion of the information from service providers, and are these procedures being implemented?"],"NonComplianceCases": ["Case 1: The method for requesting access, rectification, deletion, or suspension of personal information is not disclosed in a way that data subjects can easily find.","Case 2: There has been no response to access requests for personal information within 10 days, without any valid reason.","Case 3: Records of actions taken in response to personal information access requests are not maintained.","Case 4: Access notifications are being sent without verifying whether the requester is the data subject or their legitimate representative.","Case 5: There has been a failure to respond to rectification or deletion requests within 10 days.","Case 6: It was easy to sign up online as a member, but to withdraw membership, additional documents such as ID must be submitted, or in-person visits are required."],"RelatedRegulations": ["Personal Information Protection Act, Article 34-2 (Deletion or Blocking of Exposed Personal Information), Article 35 (Access to Personal Information), Article 35-2 (Right to Data Portability), Article 36 (Rectification or Deletion of Personal Information), Article 37 (Suspension of Processing, etc.), Article 37-2 (Right of Data Subjects to Contest Automated Decisions), Article 38 (Methods and Procedures for Exercising Rights)","Information and Communications Network Act, Article 44 (Protection of Rights in Information Networks), Article 44-2 (Request for Deletion of Information, etc.), Article 44-3 (Temporary Measures)"]}],"description": "Procedures must be established and implemented to ensure that data subjects can easily exercise their rights, such as requesting access, rectification, deletion, suspension of processing, objection, or withdrawal of consent, through simpler processes than those used for collecting their information. When a request is received, it must be processed without delay, and records must be kept. Measures such as deletion requests and temporary actions must be taken to prevent the distribution of information that infringes on the rights of others, such as invasion of privacy or defamation.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.5.3": {"name": "Notification to Data Subjects","checks": {},"status": "PASS","attributes": [{"Domain": "3. Requirements for Each Stage of Personal Information Processing","Section": "3.5.3 Notification to Data Subjects","Subdomain": "3.5. Protection of Data Subject's Rights","AuditEvidence": ["Records of notifications regarding the use and provision of personal information","Forms and wording used for notifications regarding the use and provision of personal information"],"AuditChecklist": ["If the organization is legally obligated to do so, does it periodically notify data subjects of the use and provision of their personal information, or provide them with access to an information system where they can review such details?","Do the notification items regarding the use and provision of personal information include all legally required elements?"],"NonComplianceCases": ["Case 1: Although the organization is required to notify data subjects of the use and provision of their personal information, no notifications have been sent during the year despite being obligated due to handling personal information of more than 1 million people on a daily average for the past three months at the end of the previous year.","Case 2: Instead of directly notifying individual data subjects, notifications about the use and provision of personal information were made through simple pop-ups or general announcements on the website."],"RelatedRegulations": ["Personal Information Protection Act, Article 20-2 (Notification of Use and Provision of Personal Information)"]}],"description": "The organization must identify matters that must be notified to data subjects, such as the use and provision of personal information, and periodically inform the data subjects of these matters.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.10.1": {"name": "Security System Operation","checks": {"kms_cmk_are_used": null,"macie_is_enabled": "PASS","securityhub_enabled": "PASS","fms_policy_compliant": null,"guardduty_is_enabled": "PASS","inspector2_is_enabled": "FAIL","elbv2_waf_acl_attached": "FAIL","kms_cmk_rotation_enabled": null,"ec2_securitygroup_not_used": "FAIL","guardduty_centrally_managed": "FAIL","wafv2_webacl_logging_enabled": "FAIL","ssm_managed_compliant_patching": "FAIL","kms_key_not_publicly_accessible": null,"ssmincidents_enabled_with_plans": null,"inspector2_active_findings_exist": "FAIL","cloudfront_distributions_using_waf": null,"cognito_user_pool_waf_acl_attached": null,"trustedadvisor_errors_and_warnings": null,"apigateway_restapi_waf_acl_attached": "FAIL","config_recorder_all_regions_enabled": null,"guardduty_no_high_severity_findings": "FAIL","ec2_securitygroup_from_launch_wizard": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","organizations_delegated_administrators": null,"ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_instance_port_ftp_exposed_to_internet": "PASS","ec2_instance_port_rdp_exposed_to_internet": "PASS","ec2_instance_port_ssh_exposed_to_internet": "PASS","secretsmanager_automatic_rotation_enabled": "FAIL","ec2_instance_port_cifs_exposed_to_internet": "PASS","ec2_instance_port_ldap_exposed_to_internet": "PASS","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_instance_port_kafka_exposed_to_internet": "PASS","ec2_instance_port_mysql_exposed_to_internet": "PASS","ec2_instance_port_redis_exposed_to_internet": "PASS","ec2_instance_port_oracle_exposed_to_internet": "PASS","ec2_instance_port_telnet_exposed_to_internet": "PASS","ec2_instance_port_mongodb_exposed_to_internet": "PASS","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","ec2_instance_port_kerberos_exposed_to_internet": "PASS","ec2_instance_port_cassandra_exposed_to_internet": "PASS","ec2_instance_port_memcached_exposed_to_internet": "PASS","ec2_instance_port_sqlserver_exposed_to_internet": "PASS","cloudwatch_log_metric_filter_sign_in_without_mfa": null,"ec2_instance_port_postgresql_exposed_to_internet": "PASS","ec2_securitygroup_with_many_ingress_egress_rules": "PASS","shield_advanced_protection_in_global_accelerators": null,"ec2_instance_internet_facing_with_instance_profile": "FAIL","shield_advanced_protection_in_route53_hosted_zones": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_authentication_failures": null,"shield_advanced_protection_in_associated_elastic_ips": null,"shield_advanced_protection_in_classic_load_balancers": null,"shield_advanced_protection_in_cloudfront_distributions": null,"ec2_securitygroup_allow_ingress_from_internet_to_any_port": "PASS","ec2_instance_port_elasticsearch_kibana_exposed_to_internet": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","shield_advanced_protection_in_internet_facing_load_balancers": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. Security Control Requirements","Section": "2.10.1 Security System Operation","Subdomain": "2.10. System and Service Security Management","AuditEvidence": ["Security system configuration","Network configuration","Security system operational procedures","Firewall policies","Firewall policy setup/modification request forms","Exception list for the security system","Management screens for each security system (firewall, IPS, server access control, DLP, DRM, etc.)","Security system policy review history"],"AuditChecklist": ["Has the organization established and implemented operational procedures for the security systems in use?","Is access to the security system administrators limited to a minimum, and is unauthorized access strictly controlled?","Has the organization established and implemented formal procedures for registering, modifying, and deleting policies for each security system?","Are exception policies for the security system managed according to procedures, and are users of exception policies managed with the minimum privileges?","Is the validity of the policies set on the security system periodically reviewed?","Has the organization installed and operated security systems that perform functions specified by law to prevent illegal access and data leakage in personal information processing systems?"],"NonComplianceCases": ["Case 1: Regular reviews of the security policies for the intrusion prevention system were not conducted, resulting in unnecessary or excessively permissive policies.","Case 2: There are no procedures or criteria for applying, modifying, or deleting security policies, or such procedures exist but are not followed.","Case 3: The assignment and supervision of administrators for the security system were not properly implemented.","Case 4: Although internal guidelines stipulate that the information security officer must record and maintain the history of security policy changes for the security system, the policy management ledger was not periodically maintained, or the policies recorded in the ledger did not match those actually applied in the operating system."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 6 (Access Control)"]}],"description": "For each type of security system, an administrator must be designated, and operational procedures such as updating to the latest policies, modifying rule sets, and monitoring events must be established and implemented. The status of policy application for each security system must be managed.","checks_status": {"fail": 16,"pass": 39,"total": 75,"manual": 0}},"2.10.2": {"name": "Cloud Security","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protective Measure Requirements","Section": "2.10.2 Cloud Security","Subdomain": "2.10. System and Service Security Management","AuditEvidence": ["Cloud service-related contracts and SLA","Cloud service risk analysis results","Cloud service security control policies","Cloud service administrator privilege assignment status","Cloud service architecture diagram","Cloud service security setting status","Cloud service security setting appropriateness review history"],"AuditChecklist": ["Is the responsibility and role for information protection and personal information protection clearly defined with the cloud service provider, and is it reflected in contracts (such as SLA)?","Are security risks based on the service type evaluated when using cloud services, and are security control policies established and implemented, including security configurations and setting standards, security setting changes and approval procedures, secure connection methods, and authority systems to prevent unauthorized access and configuration errors?","Are administrator privileges for cloud services granted minimally according to roles, and are enhanced protection measures such as strengthened authentication, encryption, access control, and audit logs applied to prevent unauthorized access and abuse of privileges?","Is the monitoring of cloud service security setting changes and operation status conducted, and is the appropriateness of these settings reviewed regularly?"],"NonComplianceCases": ["Case 1: The cloud service contract does not include responsibilities and roles related to security.","Case 2: Employees without a business need have been excessively granted permissions to change the security settings of the cloud service.","Case 3: Internal guidelines require security officer approval when changing access control rules in the private network of the cloud, but many access control rules were registered or changed without following the approval procedure.","Case 4: Due to security setting errors in the cloud service, internal log files were exposed to the internet."],"RelatedRegulations": []}],"description": "When using cloud services, protection measures must be established and implemented for administrator access and security settings to prevent unauthorized access and configuration errors that could lead to the leakage or exposure of critical information and personal data, depending on the service type (SaaS, PaaS, IaaS, etc.).","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.10.3": {"name": "Public Server Security","checks": {"elb_ssl_listeners": "FAIL","elbv2_ssl_listeners": "FAIL","ssm_document_secrets": "PASS","elbv2_waf_acl_attached": "FAIL","elb_insecure_ssl_ciphers": "PASS","apigateway_restapi_public": "FAIL","lightsail_database_public": null,"lightsail_instance_public": null,"elbv2_insecure_ssl_ciphers": "PASS","lightsail_static_ip_unused": null,"networkfirewall_in_all_vpc": "FAIL","ec2_instance_imdsv2_enabled": "PASS","elbv2_desync_mitigation_mode": "FAIL","awslambda_function_inside_vpc": "FAIL","awslambda_function_url_public": null,"ec2_instance_secrets_user_data": "PASS","ec2_launch_template_no_secrets": "PASS","ssm_managed_compliant_patching": "FAIL","inspector2_active_findings_exist": "FAIL","acm_certificates_expiration_check": "PASS","awslambda_function_url_cors_policy": null,"cloudfront_distributions_using_waf": null,"vpc_subnet_separate_private_public": "FAIL","apigateway_restapi_waf_acl_attached": "FAIL","apigatewayv2_api_authorizers_enabled": "FAIL","awslambda_function_no_secrets_in_code": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","apigateway_restapi_authorizers_enabled": "PASS","cloudfront_distributions_https_enabled": null,"ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","apigateway_restapi_public_with_authorizer": "FAIL","ec2_instance_port_ftp_exposed_to_internet": "PASS","ec2_instance_port_rdp_exposed_to_internet": "PASS","ec2_instance_port_ssh_exposed_to_internet": "PASS","awslambda_function_no_secrets_in_variables": "PASS","awslambda_function_not_publicly_accessible": "PASS","ec2_instance_port_cifs_exposed_to_internet": "PASS","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","route53_domains_privacy_protection_enabled": null,"ec2_instance_port_kafka_exposed_to_internet": "PASS","ec2_instance_port_mysql_exposed_to_internet": "PASS","ec2_instance_port_redis_exposed_to_internet": "PASS","ec2_instance_port_oracle_exposed_to_internet": "PASS","ec2_instance_port_telnet_exposed_to_internet": "PASS","apigateway_restapi_client_certificate_enabled": "FAIL","ec2_instance_port_mongodb_exposed_to_internet": "PASS","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","ec2_instance_port_kerberos_exposed_to_internet": "PASS","ec2_instance_port_cassandra_exposed_to_internet": "PASS","ec2_instance_port_memcached_exposed_to_internet": "PASS","ec2_instance_port_sqlserver_exposed_to_internet": "PASS","kafka_cluster_mutual_tls_authentication_enabled": null,"ec2_instance_port_postgresql_exposed_to_internet": "PASS","ec2_securitygroup_with_many_ingress_egress_rules": "PASS","autoscaling_find_secrets_ec2_launch_configuration": "PASS","ec2_instance_internet_facing_with_instance_profile": "FAIL","cloudfront_distributions_using_deprecated_ssl_protocols": null,"ec2_securitygroup_allow_ingress_from_internet_to_any_port": "PASS","ec2_instance_port_elasticsearch_kibana_exposed_to_internet": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. Protective Measure Requirements","Section": "2.10.3 Public Server Security","Subdomain": "2.10. System and Service Security Management","AuditEvidence": ["Network diagram","Website information disclosure procedures and history (e.g., requests, approvals, posting history)","Inspection history of personal and sensitive information exposure"],"AuditChecklist": ["Are protective measures established and implemented for the operation of public servers?","Are public servers installed in a DMZ separated from internal networks and protected by security systems such as firewalls?","When posting or storing personal or sensitive information on public servers, are approval and posting procedures, including obtaining approval from the responsible person, established and followed?","Does the organization regularly check whether sensitive information is being exposed through websites and web servers, and if exposure is detected, are measures taken immediately to block it?"],"NonComplianceCases": ["Case 1: Due to vulnerabilities in publicly exposed websites, unauthorized individuals were able to access others' personal information through Google search.","Case 2: Although internal regulations require approval procedures before posting personal information on websites, there were multiple cases where personal information was posted without following these procedures.","Case 3: In web applications such as bulletin boards, it was possible to arbitrarily modify or delete posts made by others, or view password-protected posts."],"RelatedRegulations": []}],"description": "For servers exposed to external networks, protective measures must be established and implemented, including separating them from internal networks, conducting vulnerability assessments, access control, authentication, and establishing procedures for information collection, storage, and disclosure.","checks_status": {"fail": 19,"pass": 47,"total": 76,"manual": 0}},"2.10.4": {"name": "Security for Electronic Transactions and FinTech","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.10.4 Security for Electronic Transactions and FinTech","Subdomain": "2.10. System and Service Security Management","AuditEvidence": ["Protection measures for electronic transaction and FinTech services","Security review results for payment system integration"],"AuditChecklist": ["Are protection measures established and implemented to ensure the safety and reliability of transactions when providing electronic transaction and FinTech services?","Are protection measures established and implemented to protect transmitted information when integrating with external systems, such as payment systems, and is the security of the integration checked?"],"NonComplianceCases": ["Case 1: While a contract was made with a payment service provider and integration was established, all payment-related information was transmitted in plain text through a specific URL without appropriate authentication or access restrictions.","Case 2: Although the external payment system was connected via a dedicated network, internal business systems were not properly controlled by firewalls or other security measures.","Case 3: Although internal guidelines required a security review by the information protection team before integrating external FinTech services, the review was skipped due to scheduling reasons when integrating a new FinTech service."],"RelatedRegulations": []}],"description": "When providing electronic transaction and FinTech services, protection measures such as authentication and encryption must be established to prevent data leakage, data tampering, or fraud. The security of external systems, such as payment systems, must be checked when integrated.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.10.5": {"name": "Secure Information Transmission","checks": {"elb_ssl_listeners": "FAIL","elbv2_ssl_listeners": "FAIL","elb_insecure_ssl_ciphers": "PASS","elbv2_insecure_ssl_ciphers": "PASS","rds_instance_transport_encrypted": "FAIL","s3_bucket_secure_transport_policy": "FAIL","glue_database_connections_ssl_enabled": null,"cloudfront_distributions_https_enabled": null,"sns_subscription_not_using_http_endpoints": "PASS","kafka_cluster_in_transit_encryption_enabled": null,"apigateway_restapi_client_certificate_enabled": "FAIL","kafka_cluster_mutual_tls_authentication_enabled": null,"directoryservice_radius_server_security_protocol": null,"cloudfront_distributions_using_deprecated_ssl_protocols": null,"elasticache_redis_cluster_in_transit_encryption_enabled": null,"opensearch_service_domains_https_communications_enforced": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.10.5 Secure Information Transmission","Subdomain": "2.10. System and Service Security Management","AuditEvidence": ["Information transmission agreement or contract","Technical standards for information transmission","System diagrams and interface definitions related to information transmission"],"AuditChecklist": ["Has a secure transmission policy been established when transmitting personal and critical information to external organizations?","When exchanging personal and critical information between organizations for business purposes, are agreements and protection measures for secure transmission established and implemented?"],"NonComplianceCases": ["Case 1: Although a dedicated network or VPN is applied when integrating with external organizations, there is inadequate management of the timing, method, responsible person, transmitted information, and legal basis for each integration.","Case 2: There is a lack of implementation of security reviews, security standards, and action plans for using weak encryption algorithms (e.g., DES, 3DES) or decrypting during intermediate transmission stages."],"RelatedRegulations": []}],"description": "When transmitting personal or critical information to other organizations, a secure transmission policy must be established, and agreements must be made between organizations regarding management responsibilities, transmission methods, and technical protection measures for personal and critical information.","checks_status": {"fail": 5,"pass": 3,"total": 17,"manual": 0}},"2.10.6": {"name": "Security for Business Devices","checks": {"workspaces_volume_encryption_enabled": null,"appstream_fleet_maximum_session_duration": null,"appstream_fleet_session_disconnect_timeout": null,"workspaces_vpc_2private_1public_subnets_nat": null,"appstream_fleet_session_idle_disconnect_timeout": null,"appstream_fleet_default_internet_access_disabled": null},"status": "PASS","attributes": [{"Domain": "2. Protection Measure Requirements","Section": "2.10.6 Security for Business Devices","Subdomain": "2.10. System and Service Security Management","AuditEvidence": ["Security control guidelines and procedures for business devices","Registration status of business devices","Security settings for business devices","Authentication and approval history for business devices","Security check status for business devices"],"AuditChecklist": ["Are security control policies, such as device authentication, approval, access scope, and security settings, established and implemented for devices used for business purposes, such as PCs, laptops, virtual PCs, and tablets?","Are policies established and implemented to prevent the leakage of personal and critical information through business devices by prohibiting the use of file-sharing programs, limiting shared settings, and controlling wireless network usage?","Are security measures applied to prevent the leakage of personal and critical information in case of loss or theft of business mobile devices?","Is the appropriateness of access control measures for business devices periodically reviewed?"],"NonComplianceCases": ["Case 1: Although laptops and tablet PCs are used for business purposes, there are no policies established for device approval, usage scope, approval procedures, or authentication methods.","Case 2: The security management guidelines for mobile devices prohibit the use of mobile devices for business purposes unless specifically approved, but unapproved mobile devices are still being used to access internal information systems.","Case 3: Personal and critical information is handled on mobile devices, but security measures such as password protection are not applied to prevent leaks due to loss or theft.","Case 4: Although internal regulations prohibit the use of shared folders on business devices, periodic checks are not conducted, resulting in excessive use of shared folders on many business devices."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 6 (Access Control)"]}],"description": "When connecting devices such as PCs and mobile devices to the network for business purposes, access control measures such as device authentication, approval, access scope, and security settings must be established and periodically checked.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"2.10.7": {"name": "Management of Removable Media","checks": {},"status": "PASS","attributes": [{"Domain": "2. Security Control Requirements","Section": "2.10.7 Management of Removable Media","Subdomain": "2.10. System and Service Security Management","AuditEvidence": ["Policy on blocking removable media (USB, CD, etc.)","Removable media management log","Inspection records of removable media status"],"AuditChecklist": ["Are policies and procedures established and implemented for handling (use), storage, disposal, and reuse of removable media such as external hard drives, USB memory, and CDs?","Is the status of ownership, use, and management of removable media periodically checked?","Is the use of removable media restricted in controlled areas, such as key information systems or important restricted areas?","Are measures in place to prevent the infection of malware and the leakage of important information through removable media?","Are removable media containing personal or important information stored in a secure location with locking mechanisms?"],"NonComplianceCases": ["Case 1: Although there is a policy restricting the use of removable media in controlled areas like server rooms, several cases were found where removable media was used without following the exception approval process, and periodic inspections of the removable media management status were not conducted, resulting in inadequate updates to the management log.","Case 2: Removable media containing personal information was not stored in a secure location with locking mechanisms and was left unattended in office drawers.","Case 3: Although a solution to control removable media was implemented, some users were granted write access without appropriate approval procedures.","Case 4: Some common PCs and IT equipment in the server room allowed writing to standard USB memory devices, but controls such as media import and usage restrictions, usage history records, and reviews were not applied."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 10 (Physical Safety Measures)"]}],"description": "Procedures must be established and implemented to prevent the leakage of personal or important information or infection by malware through removable media. Removable media containing personal or important information must be stored in a secure location.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.10.8": {"name": "Patch Management","checks": {"ssm_managed_compliant_patching": "FAIL","kafka_cluster_uses_latest_version": null,"ec2_instance_account_imdsv2_enabled": null,"redshift_cluster_automatic_upgrades": null,"eks_cluster_uses_a_supported_version": null,"ec2_instance_older_than_specific_days": "FAIL","rds_instance_deprecated_engine_version": "PASS","rds_cluster_minor_version_upgrade_enabled": "PASS","dms_instance_minor_version_upgrade_enabled": null,"rds_instance_minor_version_upgrade_enabled": "PASS","awslambda_function_using_supported_runtimes": "FAIL","elasticache_redis_cluster_auto_minor_version_upgrades": null,"cloudfront_distributions_using_deprecated_ssl_protocols": null,"opensearch_service_domains_updated_to_the_latest_service_software_version": null},"status": "FAIL","attributes": [{"Domain": "2. Security Control Requirements","Section": "2.10.8 Patch Management","Subdomain": "2.10. System and Service Security Management","AuditEvidence": ["Patch management policies and procedures","Patch status of each system","Impact analysis results related to patch application"],"AuditChecklist": ["Are patch management policies and procedures for operating systems (OS) and software established and implemented according to the characteristics and importance of each asset, such as servers, network systems, security systems, and PCs?","Are the patch status of installed OS and software on key servers, network systems, and security systems periodically managed?","If applying the latest patches to address vulnerabilities is difficult due to service impact, are alternative measures implemented?","Is the application of patches via public internet access restricted for key servers, network systems, and security systems?","When using a patch management system, are sufficient protection measures, such as access control, established?"],"NonComplianceCases": ["Case 1: In some systems, OS patches were not applied for a long period without valid reasons or approval from the responsible personnel.","Case 2: Some systems were using OS versions that were no longer supported (EOS), but no response plans or alternative measures were in place.","Case 3: Although the latest patches were applied to commercial software and OS, there were no procedures or personnel assigned to confirm and apply the latest patches for open-source programs (e.g., OpenSSL, OpenSSH, Apache), resulting in the lack of application of the latest security patches."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 9 (Prevention of Malware, etc.)"]}],"description": "To prevent security incidents due to vulnerabilities in software, operating systems, or security systems, the latest patches must be applied. However, if the application of the latest patches is difficult due to service impact considerations, alternative measures must be implemented.","checks_status": {"fail": 3,"pass": 3,"total": 14,"manual": 0}},"2.10.9": {"name": "Malware Control","checks": {},"status": "PASS","attributes": [{"Domain": "2. Security Control Requirements","Section": "2.10.9 Malware Control","Subdomain": "2.10. System and Service Security Management","AuditEvidence": ["Guidelines, procedures, and manuals for malware response","Antivirus program installation status","Antivirus program configuration screens","Malware response history (e.g., response reports)"],"AuditChecklist": ["Are protection measures established and implemented to protect information systems and business terminals from malware such as viruses, worms, Trojans, and ransomware?","Are prevention and detection activities for the latest malware continuously performed using security programs such as antivirus software?","Are security programs such as antivirus software kept up to date, and are emergency security updates performed when necessary?","Are procedures for response, such as minimizing the spread of malware and mitigating damage, established and implemented when malware infections are discovered?"],"NonComplianceCases": ["Case 1: Some PCs and servers do not have antivirus software installed, or the antivirus engine has not been updated to the latest version for a long time.","Case 2: Although users can change the antivirus program settings (e.g., real-time scanning, scheduled scanning, update settings) at their discretion, no additional protection measures were established to address this.","Case 3: Insufficient protection measures, such as access control, were in place for the central antivirus management system, leading to the possibility of security incidents through the central management system, or no integrity verification of the antivirus pattern was performed, making it possible for malware to spread through malicious users.","Case 4: Although multiple malware infections were confirmed on some internal network PCs and servers, there was no confirmation of the infection status, infection routes, cause analysis, or resulting actions."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 9 (Prevention of Malware, etc.)"]}],"description": "To protect personal and important information, information systems, and business terminals from malware such as viruses, worms, Trojans, and ransomware, prevention, detection, and response measures must be established and implemented.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.11.1": {"name": "Establishment of Incident Prevention and Response System","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protective Measures Requirements","Section": "2.11.1 Establishment of Incident Prevention and Response System","Subdomain": "2.11. Incident Prevention and Response","AuditEvidence": ["Incident response guidelines/procedures/manual","Incident response organization chart and emergency contact list","Security monitoring service contract (SLA, etc.)"],"AuditChecklist": ["Has the organization established procedures and systems to prevent security breaches and personal information leaks and to respond quickly and effectively when incidents occur?","If the organization is operating an incident response system through an external institution, such as a security monitoring service, are the details of the incident response procedures reflected in the contract?","Has the organization established a cooperative system with external experts, specialized companies, or institutions for monitoring, responding to, and handling security incidents?"],"NonComplianceCases": ["Case 1: Failure to clearly define the incident response organization and procedures for responding to security breaches.","Case 2: Although internal guidelines and procedures specify incident response steps for different phases (before, during, after detection, recovery, reporting, etc.), some or all of the response and recovery procedures for specific incident types and severity levels are not established.","Case 3: Failure to keep the incident response organization chart and emergency contact list up to date, or the roles and responsibilities of each team member are not clearly defined.","Case 4: Errors or outdated information in the contact details for external agencies responsible for incident reporting, notification, and cooperation, or failure to keep some agency details current.","Case 5: When outsourcing incident detection and response to an external security monitoring company or related institution, failure to clearly define the roles and responsibilities for both parties in the contract or SLA.","Case 6: Although incident response procedures are in place, they do not meet the legal requirements for reporting and notifying personal data breaches, such as criteria and timing."],"RelatedRegulations": ["Personal Information Protection Act, Article 34 (Notification and Reporting of Personal Information Leaks, etc.)","Information and Communications Network Act, Article 48-3 (Reporting of Security Incidents), Article 48-4 (Analysis of Causes of Security Incidents, etc.)"]}],"description": "To prevent incidents such as security breaches and personal information leaks, and to respond quickly and effectively in the event of an incident, the organization must establish procedures for detecting, responding to, analyzing, and sharing internal and external intrusion attempts. In addition, the organization must establish a cooperative system with relevant external institutions and experts.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.11.2": {"name": "Vulnerability Inspection and Remediation","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ssm_document_secrets": "PASS","inspector2_is_enabled": "FAIL","ec2_instance_imdsv2_enabled": "PASS","guardduty_centrally_managed": "FAIL","ec2_instance_secrets_user_data": "PASS","ec2_launch_template_no_secrets": "PASS","inspector2_active_findings_exist": "FAIL","trustedadvisor_errors_and_warnings": null,"guardduty_no_high_severity_findings": "FAIL","awslambda_function_no_secrets_in_code": "PASS","cloudwatch_log_group_no_secrets_in_logs": "FAIL","ecr_registry_scan_images_on_push_enabled": "PASS","cloudformation_stack_outputs_find_secrets": "PASS","codebuild_project_no_secrets_in_variables": "PASS","awslambda_function_no_secrets_in_variables": "PASS","ecs_task_definitions_no_environment_secrets": "PASS","ecr_repositories_scan_images_on_push_enabled": "FAIL","trustedadvisor_premium_support_plan_subscribed": null,"autoscaling_find_secrets_ec2_launch_configuration": "PASS","ecr_repositories_scan_vulnerabilities_in_latest_image": null,"codebuild_project_source_repo_url_no_sensitive_credentials": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. Protective Measures Requirements","Section": "2.11.2 Vulnerability Inspection and Remediation","Subdomain": "2.11. Incident Prevention and Response","AuditEvidence": ["Vulnerability inspection plan","Vulnerability inspection report (for web, mobile apps, servers, network systems, security systems, DBMS, etc.)","Vulnerability inspection records","Vulnerability remediation plan","Vulnerability remediation completion report","Penetration testing plan/results report"],"AuditChecklist": ["Has the organization established and implemented procedures for conducting regular vulnerability inspections of information systems?","Are actions taken to address identified vulnerabilities, and are the results reported to the responsible authorities?","Does the organization continuously monitor for new security vulnerabilities and assess their impact on the information systems, taking appropriate actions?","Is a record of vulnerability inspections maintained, and are protective measures implemented to address recurring vulnerabilities identified in previous years?"],"NonComplianceCases": ["Case 1: Although internal regulations require annual technical vulnerability inspections for major systems, some major systems were excluded from the inspection.","Case 2: Failure to implement corrective actions for identified vulnerabilities, or failure to provide justification and approval records for vulnerabilities that cannot be addressed promptly."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 4 (Establishment, Implementation, and Inspection of Internal Management Plans), Article 6 (Access Control)"]}],"description": "Regular vulnerability inspections must be conducted to verify whether information systems have exposed vulnerabilities, and any identified vulnerabilities must be promptly addressed. In addition, the organization must continuously monitor for new security vulnerabilities, assess their impact on the information systems, and take necessary actions.","checks_status": {"fail": 6,"pass": 14,"total": 23,"manual": 0}},"2.11.3": {"name": "Abnormal Behavior Analysis and Monitoring","checks": {"securityhub_enabled": "PASS","fms_policy_compliant": null,"vpc_flow_logs_enabled": "FAIL","cloudtrail_insights_exist": null,"networkfirewall_in_all_vpc": "FAIL","trustedadvisor_errors_and_warnings": null,"guardduty_no_high_severity_findings": "FAIL","cloudtrail_threat_detection_enumeration": null,"cloudwatch_log_group_no_secrets_in_logs": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"cloudwatch_cross_account_sharing_disabled": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"cognito_user_pool_client_prevent_user_existence_errors": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL","cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. Protection Measures Requirements","Section": "2.11.3 Abnormal Behavior Analysis and Monitoring","Subdomain": "2.11. Incident Prevention and Response","AuditEvidence": ["Status of abnormal behavior analysis and monitoring","Evidence of responses taken when abnormal behaviors were detected"],"AuditChecklist": ["Is the organization collecting, analyzing, and monitoring network traffic, data flows, and event logs from major information systems, applications, networks, and security systems to detect abnormal behaviors such as intrusion attempts, personal information leakage attempts, or fraudulent activities?","Has the organization defined criteria and thresholds to determine abnormal behaviors, and is follow-up action, such as the determination and investigation of abnormal activities, taken in a timely manner?"],"NonComplianceCases": ["Case 1: Failure to establish a real-time or regular monitoring system and procedures to detect intrusion attempts on servers, networks, databases, and security systems from external sources.","Case 2: Although the organization has outsourced monitoring tasks to an external security monitoring agency, there is no record of reviewing the reports provided by the agency, and the organization does not have its own monitoring system for systems excluded from the outsourced service.","Case 3: Although abnormal traffic exceeding internally defined thresholds has been continuously detected, no response measures have been taken."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 6 (Access Control)"]}],"description": "To quickly detect and respond to intrusion attempts, personal information leakage attempts, and fraudulent activities from internal or external sources, the organization must collect and analyze network and data flows. Post-monitoring and inspection actions must be timely.","checks_status": {"fail": 6,"pass": 1,"total": 28,"manual": 0}},"2.11.4": {"name": "Incident Response Training and Improvement","checks": {"ssmincidents_enabled_with_plans": null},"status": "PASS","attributes": [{"Domain": "2. Protection Measures Requirements","Section": "2.11.4 Incident Response Training and Improvement","Subdomain": "2.11. Incident Prevention and Response","AuditEvidence": ["Simulation training plan for responding to security and personal information leakage incidents","Simulation training result reports for responding to security and personal information leakage incidents","Incident response procedures"],"AuditChecklist": ["Has the organization established a simulation training plan for responding to security incidents and personal information leakage incidents, and are such training exercises conducted at least once a year?","Is the organization reflecting the results of security incident and personal information leakage incident training to improve its response system?"],"NonComplianceCases": ["Case 1: Failure to conduct simulation training or provide related training plans and result reports.","Case 2: Although an annual simulation training plan for security incidents was established, it was not conducted within the planned period without valid reason or approval.","Case 3: Simulation training was conducted, but it was not performed according to the procedures and forms defined in the internal guidelines."],"RelatedRegulations": []}],"description": "The organization must conduct at least one simulation training per year based on scenarios to ensure that employees and stakeholders are familiar with the procedures for responding to security incidents and personal information leakage incidents. The response system must be improved based on the training results.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"2.11.5": {"name": "Incident Response and Recovery","checks": {},"status": "PASS","attributes": [{"Domain": "2. Protection Measures Requirements","Section": "2.11.5 Incident Response and Recovery","Subdomain": "2.11. Incident Prevention and Response","AuditEvidence": ["Incident response procedures","Incident response reports","Incident management logs","Personal information leakage reports","Emergency contact list"],"AuditChecklist": ["When signs of or actual incidents of security breaches or personal information leakage are detected, is the organization responding and reporting promptly according to the defined incident response procedures?","Is the organization notifying data subjects and reporting to relevant authorities as required by law in case of a personal information breach?","After the incident is resolved, is the organization analyzing the cause, reporting the results, and sharing them with relevant departments and personnel?","Is the organization utilizing the information obtained from incident analysis to establish preventive measures to prevent similar incidents from recurring, and if necessary, modifying its incident response procedures?"],"NonComplianceCases": ["Case 1: Although internal incident response guidelines require that security incidents be reported to the internal information protection committee and relevant departments, the department in charge responded to the incident independently without reporting to the information protection committee or relevant departments.","Case 2: Although a service outage suspected to be caused by a DDoS attack occurred recently, the organization did not analyze the cause or establish preventive measures.","Case 3: Although a personal information leakage incident occurred due to external hacking, notification and reporting were not made within 72 hours, citing the small number of affected personal information records as the reason.","Case 4: Although personal information of more than 1,000 individuals was leaked due to an employee's mistake on the company website, the affected data subjects were not notified."],"RelatedRegulations": ["Personal Information Protection Act, Article 34 (Notification and Reporting of Personal Information Leakage)","Information and Communications Network Act, Article 48-3 (Reporting of Security Incidents), Article 48-4 (Analysis of Causes of Security Incidents)"]}],"description": "When signs of or actual incidents of security breaches or personal information leakage are detected, the organization must comply with legal notification and reporting obligations, respond and recover promptly according to established procedures, and analyze the incident to establish preventive measures to reflect in the response system.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.12.1": {"name": "Safety Measures for Disaster Preparedness","checks": {"drs_job_exist": "FAIL","backup_plans_exist": "PASS","rds_cluster_multi_az": "FAIL","elb_is_in_multiple_az": "FAIL","rds_instance_multi_az": "FAIL","s3_bucket_object_lock": "FAIL","vpc_different_regions": null,"efs_have_backup_enabled": "FAIL","elbv2_is_in_multiple_az": "PASS","vpc_subnet_different_az": "PASS","backup_reportplans_exist": null,"neptune_cluster_multi_az": null,"elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_lifecycle_enabled": "FAIL","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"autoscaling_group_multiple_az": null,"dms_instance_multi_az_enabled": null,"ec2_ebs_volume_snapshots_exists": "FAIL","rds_cluster_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL","documentdb_cluster_backup_enabled": null,"s3_bucket_cross_region_replication": "FAIL","kms_cmk_not_deleted_unintentionally": null,"neptune_cluster_deletion_protection": null,"redshift_cluster_automated_snapshot": null,"elb_cross_zone_load_balancing_enabled": "PASS","lightsail_instance_automated_snapshots": null,"dlm_ebs_snapshot_lifecycle_policy_exists": "FAIL","documentdb_cluster_cloudwatch_log_export": null,"elasticache_redis_cluster_backup_enabled": null,"elasticache_redis_cluster_multi_az_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. Protective Measure Requirements","Section": "2.12.1 Safety Measures for Disaster Preparedness","Subdomain": "2.12. Disaster Recovery","AuditEvidence": ["IT disaster recovery guidelines/procedures","IT disaster recovery plans (including RTO and RPO definitions)","Emergency contact list","Crisis response manual for personal information processing systems"],"AuditChecklist": ["Has the organization identified IT disaster types that could threaten the continuity of core services (businesses) and analyzed the expected scale of damage and impact on operations to identify core IT services (businesses) and systems?","Has the organization defined recovery time objectives (RTO) and recovery point objectives (RPO) based on the importance and characteristics of core IT services and systems?","Has the organization established and implemented disaster recovery plans, including recovery strategies, emergency recovery teams, emergency contact networks, and recovery procedures, to ensure the continuity of core services and systems during disasters?"],"NonComplianceCases": ["Case 1: The IT disaster recovery procedures lack critical details such as the definition of IT disaster recovery teams and roles, emergency contact systems, and recovery procedures and methods.","Case 2: Although a backup center has been established to ensure the continuity of information systems and minimize damage during emergencies, the relevant policies do not include disaster recovery procedures using the backup center, making disaster recovery tests and actual recovery efforts ineffective.","Case 3: Recovery time objectives for some critical systems related to service operations have not been defined, and appropriate recovery strategies are not in place.","Case 4: The disaster recovery guidelines do not define the recovery priorities, RTO, or RPO for IT services or systems.","Case 5: Unrealistic recovery objectives have been set, either too high or too low, and the RPO and backup policies (e.g., targets, frequency) are not appropriately linked, making it difficult to ensure the effectiveness of recovery."],"RelatedRegulations": ["Personal Information Protection Act, Article 29 (Obligation to Take Safety Measures)","Standards for Ensuring the Safety of Personal Information, Article 11 (Safety Measures for Disaster Preparedness)"]}],"description": "Identify types of disasters that could threaten the operational continuity of the organization's core services and systems, such as natural disasters, communication or power failures, and hacking. Analyze the expected scale of damage and impact for each type, define the recovery time objective (RTO) and recovery point objective (RPO), and establish a disaster recovery system including recovery strategies, emergency recovery teams, emergency contact networks, and recovery procedures.","checks_status": {"fail": 14,"pass": 5,"total": 33,"manual": 0}},"2.12.2": {"name": "Disaster Recovery Testing and Improvement","checks": {"drs_job_exist": "FAIL","backup_plans_exist": "PASS","rds_cluster_multi_az": "FAIL","elb_is_in_multiple_az": "FAIL","rds_instance_multi_az": "FAIL","s3_bucket_object_lock": "FAIL","vpc_different_regions": null,"efs_have_backup_enabled": "FAIL","elbv2_is_in_multiple_az": "PASS","vpc_subnet_different_az": "PASS","backup_reportplans_exist": null,"neptune_cluster_multi_az": null,"elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_lifecycle_enabled": "FAIL","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"autoscaling_group_multiple_az": null,"dms_instance_multi_az_enabled": null,"ec2_ebs_volume_snapshots_exists": "FAIL","rds_cluster_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL","documentdb_cluster_backup_enabled": null,"s3_bucket_cross_region_replication": "FAIL","kms_cmk_not_deleted_unintentionally": null,"neptune_cluster_deletion_protection": null,"redshift_cluster_automated_snapshot": null,"elb_cross_zone_load_balancing_enabled": "PASS","lightsail_instance_automated_snapshots": null,"dlm_ebs_snapshot_lifecycle_policy_exists": "FAIL","documentdb_cluster_cloudwatch_log_export": null,"elasticache_redis_cluster_backup_enabled": null,"elasticache_redis_cluster_multi_az_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. Protective Measure Requirements","Section": "2.12.2 Disaster Recovery Testing and Improvement","Subdomain": "2.12. Disaster Recovery","AuditEvidence": ["IT disaster recovery procedures","IT disaster recovery test plans","IT disaster recovery test results"],"AuditChecklist": ["Has the organization established and implemented disaster recovery test plans to evaluate the effectiveness of the established IT disaster recovery system?","Are the disaster recovery strategies and plans regularly reviewed and supplemented to reflect test results, changes in the information system environment, and legal requirements?"],"NonComplianceCases": ["Case 1: Disaster recovery drills were not planned or conducted, and the related plans and result reports are not available.","Case 2: Although a disaster recovery drill plan was established, it was not conducted as planned or approved, and the related result reports are missing.","Case 3: Disaster recovery drills were conducted, but they did not follow the procedures and forms outlined in the internal guidelines, making it difficult to evaluate the adequacy and effectiveness of the disaster recovery procedures."],"RelatedRegulations": []}],"description": "Regularly test the adequacy of the disaster recovery strategies and plans, and supplement the recovery strategies and plans based on test results, changes in the information system environment, and legal requirements.","checks_status": {"fail": 14,"pass": 5,"total": 33,"manual": 0}}},"requirements_passed": 10,"requirements_failed": 27,"requirements_manual": 64,"total_requirements": 101,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "23a633ec-caa6-4021-809a-a247c6f177e6","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "nist_csf_1.1_aws","framework": "NIST-CSF","version": "1.1","description": "The NIST Cybersecurity Framework (CSF) is supported by governments and industries worldwide as a recommended baseline for use by any organization, regardless of sector or size. The NIST Cybersecurity Framework consists of three primary components: the framework core, the profiles, and the implementation tiers. The framework core contains desired cybersecurity activities and outcomes organized into 23 categories that cover the breadth of cybersecurity objectives for an organization. The profiles contain an organization's unique alignment of their organizational requirements and objectives, risk appetite, and resources using the desired outcomes of the framework core. The implementation tiers describe the degree to which an organizationโ€™s cybersecurity risk management practices exhibit the characteristics defined in the framework core.","region": "eu-west-1","requirements": {"ac_1": {"name": "PR.AC-1","checks": {"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_password_policy_reuse_24": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_1","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Identity Management and Access Control (PR.AC)"}],"description": "Identities and credentials are issued, managed, verified, revoked, and audited for authorized devices, users and processes.","checks_status": {"fail": 1,"pass": 0,"total": 9,"manual": 0}},"ac_3": {"name": "PR.AC-3","checks": {"iam_root_mfa_enabled": null,"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"iam_root_hardware_mfa_enabled": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"iam_user_mfa_enabled_console_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Identity Management and Access Control (PR.AC)"}],"description": "Remote access is managed.","checks_status": {"fail": 3,"pass": 6,"total": 20,"manual": 0}},"ac_4": {"name": "PR.AC-4","checks": {"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_4","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Identity Management and Access Control (PR.AC)"}],"description": "Access permissions and authorizations are managed, incorporating the principles of least privilege and separation of duties.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"ac_5": {"name": "PR.AC-5","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"acm_certificates_expiration_check": "PASS","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_5","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Identity Management and Access Control (PR.AC)"}],"description": "Network integrity is protected (e.g., network segregation, network segmentation).","checks_status": {"fail": 3,"pass": 7,"total": 17,"manual": 0}},"ac_6": {"name": "PR.AC-6","checks": {"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_6","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Identity Management and Access Control (PR.AC)"}],"description": "Identities are proofed and bound to credentials and asserted in interactions.","checks_status": {"fail": 1,"pass": 1,"total": 4,"manual": 0}},"ac_7": {"name": "PR.AC-7","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_7","Section": "Protect (PR)","Service": "iam","SubGroup": null,"SubSection": "Identity Management and Access Control (PR.AC)"}],"description": "Users, devices, and other assets are authenticated (e.g., single-factor, multi-factor) commensurate with the risk of the transaction (e.g., individualsโ€™ security and privacy risks and other organizational risks).","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ae_1": {"name": "DE.AE-1","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","s3_bucket_server_access_logging_enabled": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ae_1","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Anomalies and Events (DE.AE)"}],"description": "A baseline of network operations and expected data flows for users and systems is established and managed.","checks_status": {"fail": 7,"pass": 3,"total": 13,"manual": 0}},"ae_2": {"name": "DE.AE-2","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ae_2","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Anomalies and Events (DE.AE)"}],"description": "Detected events are analyzed to understand attack targets and methods.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"ae_3": {"name": "DE.AE-3","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ae_3","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Anomalies and Events (DE.AE)"}],"description": "Event data are collected and correlated from multiple sources and sensors.","checks_status": {"fail": 5,"pass": 2,"total": 11,"manual": 0}},"ae_4": {"name": "DE.AE-4","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ae_4","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Anomalies and Events (DE.AE)"}],"description": "Impact of events is determined.","checks_status": {"fail": 4,"pass": 3,"total": 10,"manual": 0}},"ae_5": {"name": "DE.AE-5","checks": {"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ae_5","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Anomalies and Events (DE.AE)"}],"description": "Incident alert thresholds are established.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"am_1": {"name": "ID.AM-1","checks": {"ec2_instance_managed_by_ssm": "FAIL","config_recorder_all_regions_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "am_1","Section": "Identify (ID)","Service": "aws","SubGroup": null,"SubSection": "Asset Management (ID.AM)"}],"description": "Physical devices and systems within the organization are inventoried.","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"am_2": {"name": "ID.AM-2","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "am_2","Section": "Identify (ID)","Service": "aws","SubGroup": null,"SubSection": "Asset Management (ID.AM)"}],"description": "Software platforms and applications within the organization are inventoried.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"am_3": {"name": "ID.AM-3","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "am_3","Section": "Identify (ID)","Service": "aws","SubGroup": null,"SubSection": "Asset Management (ID.AM)"}],"description": "Organizational communication and data flows are mapped.","checks_status": {"fail": 4,"pass": 2,"total": 8,"manual": 0}},"am_5": {"name": "ID.AM-5","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "am_5","Section": "Identify (ID)","Service": "aws","SubGroup": null,"SubSection": "Asset Management (ID.AM)"}],"description": "Resources (e.g., hardware, devices, data, time, personnel, and software) are prioritized based on their classification, criticality, and business value.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"am_6": {"name": "ID.AM-6","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "am_6","Section": "Identify (ID)","Service": "iam","SubGroup": null,"SubSection": "Asset Management (ID.AM)"}],"description": "Cybersecurity roles and responsibilities for the entire workforce and third-party stakeholders (e.g., suppliers, customers, partners) are established.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"an_2": {"name": "RS.AN-2","checks": {"guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "an_2","Section": "Respond (RS)","Service": "guardduty","SubGroup": null,"SubSection": "Analysis (RS.AN)"}],"description": "The impact of the incident is understood.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"be_5": {"name": "ID.BE-5","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "be_5","Section": "Identify (ID)","Service": "aws","SubGroup": null,"SubSection": "Business Environment (ID.BE)"}],"description": "Resilience requirements to support delivery of critical services are established for all operating states (e.g. under duress/attack, during recovery, normal operations)","checks_status": {"fail": 3,"pass": 1,"total": 4,"manual": 0}},"cm_1": {"name": "DE.CM-1","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_1","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Security Continuous Monitoring (DE.CM)"}],"description": "The network is monitored to detect potential cybersecurity events.","checks_status": {"fail": 4,"pass": 4,"total": 11,"manual": 0}},"cm_2": {"name": "DE.CM-2","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_imdsv2_enabled": "PASS","config_recorder_all_regions_enabled": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_2","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Security Continuous Monitoring (DE.CM)"}],"description": "The physical environment is monitored to detect potential cybersecurity events.","checks_status": {"fail": 2,"pass": 3,"total": 20,"manual": 0}},"cm_3": {"name": "DE.CM-3","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_multi_region_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_3","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Security Continuous Monitoring (DE.CM)"}],"description": "Personnel activity is monitored to detect potential cybersecurity events.","checks_status": {"fail": 1,"pass": 3,"total": 7,"manual": 0}},"cm_4": {"name": "DE.CM-4","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "cm_4","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Security Continuous Monitoring (DE.CM)"}],"description": "Malicious code is detected.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"cm_5": {"name": "DE.CM-5","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_waf_acl_attached": "FAIL","ec2_instance_imdsv2_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_5","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Security Continuous Monitoring (DE.CM)"}],"description": "Unauthorized mobile code is detected.","checks_status": {"fail": 3,"pass": 3,"total": 10,"manual": 0}},"cm_6": {"name": "DE.CM-6","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_multi_region_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_6","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Security Continuous Monitoring (DE.CM)"}],"description": "External service provider activity is monitored to detect potential cybersecurity events.","checks_status": {"fail": 1,"pass": 3,"total": 7,"manual": 0}},"cm_7": {"name": "DE.CM-7","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_7","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Security Continuous Monitoring (DE.CM)"}],"description": "Monitoring for unauthorized personnel, connections, devices, and software is performed.","checks_status": {"fail": 4,"pass": 4,"total": 11,"manual": 0}},"cp_4": {"name": "DE.DP-4","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_waf_acl_attached": "FAIL","ec2_instance_imdsv2_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_4","Section": "Detect (DE)","Service": "aws","SubGroup": null,"SubSection": "Detection Processes (DE.DP)"}],"description": "Event detection information is communicated.","checks_status": {"fail": 3,"pass": 3,"total": 10,"manual": 0}},"cp_5": {"name": "DE.DP-5","checks": {"ec2_instance_imdsv2_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "cp_5","Section": "Detect (DE)","Service": "ec2","SubGroup": null,"SubSection": "Detection Processes (DE.DP)"}],"description": "Detection processes are continuously improved.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ds_1": {"name": "PR.DS-1","checks": {"ec2_ebs_volume_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","cloudtrail_kms_encryption_enabled": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ds_1","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Data Security (PR.DS)"}],"description": "Data-at-rest is protected.","checks_status": {"fail": 5,"pass": 2,"total": 9,"manual": 0}},"ds_2": {"name": "PR.DS-2","checks": {"elb_ssl_listeners": "FAIL","acm_certificates_expiration_check": "PASS","s3_bucket_secure_transport_policy": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ds_2","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Data Security (PR.DS)"}],"description": "Data-in-transit is protected.","checks_status": {"fail": 2,"pass": 1,"total": 4,"manual": 0}},"ds_3": {"name": "PR.DS-3","checks": {"ec2_elastic_ip_unassigned": "FAIL","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ds_3","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Data Security (PR.DS)"}],"description": "Assets are formally managed throughout removal, transfers, and disposition.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"ds_4": {"name": "PR.DS-4","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ds_4","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Data Security (PR.DS)"}],"description": "Adequate capacity to ensure availability is maintained.","checks_status": {"fail": 4,"pass": 1,"total": 5,"manual": 0}},"ds_5": {"name": "PR.DS-5","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"cloudtrail_multi_region_enabled": "PASS","s3_bucket_policy_public_write_access": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"s3_account_level_public_access_blocks": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ds_5","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Data Security (PR.DS)"}],"description": "Protections against data leaks are implemented.","checks_status": {"fail": 4,"pass": 7,"total": 19,"manual": 0}},"ds_6": {"name": "PR.DS-6","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ds_6","Section": "Protect (PR)","Service": "cloudtrail","SubGroup": null,"SubSection": "Data Security (PR.DS)"}],"description": "Integrity checking mechanisms are used to verify software, firmware, and information integrity.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"ds_7": {"name": "PR.DS-7","checks": {"elbv2_deletion_protection": "FAIL","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","ec2_instance_older_than_specific_days": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ds_7","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Data Security (PR.DS)"}],"description": "The development and testing environment(s) are separate from the production environment.","checks_status": {"fail": 5,"pass": 1,"total": 6,"manual": 0}},"ds_8": {"name": "PR.DS-8","checks": {"securityhub_enabled": "PASS","ec2_instance_managed_by_ssm": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ds_8","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Data Security (PR.DS)"}],"description": "Integrity checking mechanisms are used to verify hardware integrity.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"ip_1": {"name": "PR.IP-1","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","ec2_instance_older_than_specific_days": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ip_1","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Information Protection Processes and Procedures (PR.IP)"}],"description": "A baseline configuration of information technology/industrial control systems is created and maintained incorporating security principles (e.g. concept of least functionality).","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"ip_2": {"name": "PR.IP-2","checks": {"ec2_instance_managed_by_ssm": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ip_2","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Information Protection Processes and Procedures (PR.IP)"}],"description": "A System Development Life Cycle to manage systems is implemented.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"ip_3": {"name": "PR.IP-3","checks": {"elbv2_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ip_3","Section": "Protect (PR)","Service": "elb","SubGroup": null,"SubSection": "Information Protection Processes and Procedures (PR.IP)"}],"description": "Configuration change control processes are in place.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"ip_4": {"name": "PR.IP-4","checks": {"rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ip_4","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Information Protection Processes and Procedures (PR.IP)"}],"description": "Backups of information are conducted, maintained, and tested periodically.","checks_status": {"fail": 1,"pass": 1,"total": 3,"manual": 0}},"ip_7": {"name": "PR.IP-7","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "ip_7","Section": "Protect (PR)","Service": "ec2","SubGroup": null,"SubSection": "Information Protection Processes and Procedures (PR.IP)"}],"description": "Protection processes are improved.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"ip_8": {"name": "PR.IP-8","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"eks_cluster_not_publicly_accessible": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ip_8","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Information Protection Processes and Procedures (PR.IP)"}],"description": "Effectiveness of protection technologies is shared.","checks_status": {"fail": 1,"pass": 4,"total": 13,"manual": 0}},"ip_9": {"name": "PR.IP-9","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ip_9","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Information Protection Processes and Procedures (PR.IP)"}],"description": "Response plans (Incident Response and Business Continuity) and recovery plans (Incident Recovery and Disaster Recovery) are in place and managed.","checks_status": {"fail": 4,"pass": 1,"total": 10,"manual": 0}},"ma_2": {"name": "PR.MA-2","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ma_2","Section": "Protect (PR)","Service": "cloudtrail","SubGroup": null,"SubSection": "Maintenance (PR.MA)"}],"description": "Remote maintenance of organizational assets is approved, logged, and performed in a manner that prevents unauthorized access.","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"mi_3": {"name": "RS.MI-3","checks": {"guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "mi_3","Section": "Respond (RS)","Service": "guardduty","SubGroup": null,"SubSection": "Mitigation (RS.MI)"}],"description": "Newly identified vulnerabilities are mitigated or documented as accepted risks.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"pt_1": {"name": "PR.PT-1","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "pt_1","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Protective Technology (PR.PT)"}],"description": "Audit/log records are determined, documented, implemented, and reviewed in accordance with policy.","checks_status": {"fail": 5,"pass": 2,"total": 8,"manual": 0}},"pt_3": {"name": "PR.PT-3","checks": {"iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","s3_account_level_public_access_blocks": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "pt_3","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Protective Technology (PR.PT)"}],"description": "The principle of least functionality is incorporated by configuring systems to provide only essential capabilities.","checks_status": {"fail": 0,"pass": 3,"total": 11,"manual": 0}},"pt_4": {"name": "PR.PT-4","checks": {"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "pt_4","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Protective Technology (PR.PT)"}],"description": "Communications and control networks are protected.","checks_status": {"fail": 1,"pass": 3,"total": 6,"manual": 0}},"pt_5": {"name": "PR.PT-5","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "pt_5","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Protective Technology (PR.PT)"}],"description": "Mechanisms (e.g., failsafe, load balancing, hot swap) are implemented to achieve resilience requirements in normal and adverse situations.","checks_status": {"fail": 3,"pass": 1,"total": 4,"manual": 0}},"ra_1": {"name": "ID.RA-1","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ra_1","Section": "Identify (ID)","Service": "aws","SubGroup": null,"SubSection": "Risk Assessment (ID.RA)"}],"description": "Asset vulnerabilities are identified and documented.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"ra_2": {"name": "ID.RA-2","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ra_2","Section": "Identify (ID)","Service": "aws","SubGroup": null,"SubSection": "Risk Assessment (ID.RA)"}],"description": "Cyber threat intelligence is received from information sharing forums and sources.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"ra_3": {"name": "ID.RA-3","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ra_3","Section": "Identify (ID)","Service": "aws","SubGroup": null,"SubSection": "Risk Assessment (ID.RA)"}],"description": "Threats, both internal and external, are identified and documented.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"ra_5": {"name": "ID.RA-5","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_imdsv2_enabled": "PASS","config_recorder_all_regions_enabled": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ra_5","Section": "Identify (ID)","Service": "aws","SubGroup": null,"SubSection": "Risk Assessment (ID.RA)"}],"description": "Threats, vulnerabilities, likelihoods, and impacts are used to determine risk.","checks_status": {"fail": 2,"pass": 3,"total": 20,"manual": 0}},"rp_1": {"name": "RS.RP-1","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "rp_1","Section": "Respond (RS)","Service": "aws","SubGroup": null,"SubSection": "Response Planning (RS.RP)"}],"description": "Response plan is executed during or after an incident.","checks_status": {"fail": 4,"pass": 1,"total": 11,"manual": 0}},"sc_4": {"name": "ID.SC-4","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_imdsv2_enabled": "PASS","config_recorder_all_regions_enabled": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_4","Section": "Identify (ID)","Service": "aws","SubGroup": null,"SubSection": "Supply Chain Risk Management (ID.SC)"}],"description": "Suppliers and third-party partners are routinely assessed using audits, test results, or other forms of evaluations to confirm they are meeting their contractual obligations.","checks_status": {"fail": 2,"pass": 3,"total": 16,"manual": 0}},"ip_12": {"name": "PR.IP-12","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","config_recorder_all_regions_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ip_12","Section": "Protect (PR)","Service": "aws","SubGroup": null,"SubSection": "Information Protection Processes and Procedures (PR.IP)"}],"description": "A vulnerability management plan is developed and implemented.","checks_status": {"fail": 2,"pass": 0,"total": 4,"manual": 0}}},"requirements_passed": 11,"requirements_failed": 42,"requirements_manual": 3,"total_requirements": 56,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "23ef3629-e1cd-4f16-af98-ab0daaff257e","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "fedramp_low_revision_4_aws","framework": "FedRAMP-Low-Revision-4","version": "","description": "The Federal Risk and Authorization Management Program (FedRAMP) was established in 2011. It provides a cost-effective, risk-based approach for the adoption and use of cloud services by the U.S. federal government. FedRAMP empowers federal agencies to use modern cloud technologies, with an emphasis on the security and protection of federal information.","region": "eu-west-1","requirements": {"ac-2": {"name": "Account Management (AC-2)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_hardware_mfa_enabled": null,"iam_user_console_access_unused": null,"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","iam_user_mfa_enabled_console_access": null,"cloudtrail_s3_dataevents_read_enabled": null,"iam_password_policy_minimum_length_14": null,"cloudtrail_log_file_validation_enabled": "FAIL","cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"iam_aws_attached_policy_no_administrative_privileges": null,"opensearch_service_domains_cloudwatch_logging_enabled": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac-2","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Manage system accounts, group memberships, privileges, workflow, notifications, deactivations, and authorizations.","checks_status": {"fail": 3,"pass": 4,"total": 26,"manual": 0}},"ac-3": {"name": "Account Management (AC-3)","checks": {"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac-3","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system enforces approved authorizations for logical access to information and system resources in accordance with applicable access control policies.","checks_status": {"fail": 1,"pass": 6,"total": 20,"manual": 0}},"au-2": {"name": "Audit Events (AU-2)","checks": {"elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_log_file_validation_enabled": "FAIL","cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au-2","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization: a. Determines that the information system is capable of auditing the following events: [Assignment: organization-defined auditable events]; b. Coordinates the security audit function with other organizational entities requiring audit- related information to enhance mutual support and to help guide the selection of auditable events; c. Provides a rationale for why the auditable events are deemed to be adequate support after- the-fact investigations of security incidents","checks_status": {"fail": 5,"pass": 2,"total": 11,"manual": 0}},"au-9": {"name": "Protection of Audit Information (AU-9)","checks": {"s3_bucket_object_versioning": "FAIL","cloudtrail_kms_encryption_enabled": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL","cloudwatch_log_group_kms_encryption_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au-9","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system protects audit information and audit tools from unauthorized access, modification, and deletion.","checks_status": {"fail": 4,"pass": 0,"total": 4,"manual": 0}},"ca-7": {"name": "Continuous Monitoring (CA-7)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_waf_acl_attached": "FAIL","ec2_instance_imdsv2_enabled": "PASS","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_changes_to_network_acls_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ca-7","Section": "Security Assessment And Authorization (CA)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Continuously monitor configuration management processes. Determine security impact, environment and operational risks.","checks_status": {"fail": 2,"pass": 4,"total": 11,"manual": 0}},"cm-2": {"name": "Baseline Configuration (CM-2)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"elbv2_deletion_protection": "FAIL","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"ssm_managed_compliant_patching": "FAIL","apigateway_restapi_waf_acl_attached": "FAIL","s3_bucket_policy_public_write_access": "PASS","ec2_instance_older_than_specific_days": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm-2","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization develops, documents, and maintains under configuration control, a current baseline configuration of the information system.","checks_status": {"fail": 7,"pass": 6,"total": 21,"manual": 0}},"cm-8": {"name": "Information System Component Inventory (CM-8)","checks": {"guardduty_is_enabled": "PASS","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm-8","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization develops and documents an inventory of information system components that accurately reflects the current information system, includes all components within the authorization boundary of the information system, is at the level of granularity deemed necessary for tracking and reporting and reviews and updates the information system component inventory.","checks_status": {"fail": 2,"pass": 1,"total": 4,"manual": 0}},"cp-9": {"name": "Information System Backup (CP-9)","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp-9","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization conducts backups of user-level information, system-level information and information system documentation including security-related documentation contained in the information system and protects the confidentiality, integrity, and availability of backup information at storage locations.","checks_status": {"fail": 2,"pass": 1,"total": 7,"manual": 0}},"ia-2": {"name": "Identification and Authentication (Organizational users) (IA-2)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia-2","Section": "Identification and Authentication (IA)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system uniquely identifies and authenticates organizational users (or processes acting on behalf of organizational users).","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"ir-4": {"name": "Incident Handling (IR-4)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ir-4","Section": "Incident Response (IR)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization implements an incident handling capability for security incidents that includes preparation, detection and analysis, containment, eradication, and recovery, coordinates incident handling activities with contingency planning activities and incorporates lessons learned from ongoing incident handling activities into incident response procedures, training, and testing, and implements the resulting changes accordingly.","checks_status": {"fail": 1,"pass": 2,"total": 7,"manual": 0}},"sa-3": {"name": "System Development Life Cycle (SA-3)","checks": {"ec2_instance_managed_by_ssm": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sa-3","Section": "System and Services Acquisition (SA)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization manages the information system using organization-defined system development life cycle, defines and documents information security roles and responsibilities throughout the system development life cycle, identifies individuals having information security roles and responsibilities and integrates the organizational information security risk management process into system development life cycle activities.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"sc-5": {"name": "Denial Of Service Protection (SC-5)","checks": {"guardduty_is_enabled": "PASS","rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"rds_instance_deletion_protection": "FAIL","redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-5","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system protects against or limits the effects of the following types of denial of service attacks: [Assignment: organization-defined types of denial of service attacks or references to sources for such information] by employing [Assignment: organization-defined security safeguards].","checks_status": {"fail": 4,"pass": 2,"total": 8,"manual": 0}},"sc-7": {"name": "Boundary Protection (SC-7)","checks": {"elb_ssl_listeners": "FAIL","ec2_instance_public_ip": "FAIL","elbv2_waf_acl_attached": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_secure_transport_policy": "FAIL","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-7","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system: a. Monitors and controls communications at the external boundary of the system and at key internal boundaries within the system; b. Implements subnetworks for publicly accessible system components that are [Selection: physically; logically] separated from internal organizational networks; and c. Connects to external networks or information systems only through managed interfaces consisting of boundary protection devices arranged in accordance with an organizational security architecture.","checks_status": {"fail": 6,"pass": 6,"total": 21,"manual": 0}},"ac-17": {"name": "Remote Access (AC-17)","checks": {"elb_ssl_listeners": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"acm_certificates_expiration_check": "PASS","s3_bucket_secure_transport_policy": "FAIL","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac-17","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Authorize remote access systems prior to connection. Enforce remote connection requirements to information systems.","checks_status": {"fail": 5,"pass": 9,"total": 21,"manual": 0}},"au-11": {"name": "Audit Record Retention (AU-11)","checks": {"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au-11","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization retains audit records for at least 90 days to provide support for after-the-fact investigations of security incidents and to meet regulatory and organizational information retention requirements.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"cp-10": {"name": "Information System Recovery And Reconstitution (CP-10)","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp-10","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization provides for the recovery and reconstitution of the information system to a known state after a disruption, compromise, or failure.","checks_status": {"fail": 4,"pass": 1,"total": 9,"manual": 0}},"sc-12": {"name": "Cryptographic Key Establishment And Management (SC-12)","checks": {"kms_cmk_rotation_enabled": null,"acm_certificates_expiration_check": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc-12","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization establishes and manages cryptographic keys for required cryptography employed within the information system in accordance with [Assignment: organization-defined requirements for key generation, distribution, storage, access, and destruction].","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"sc-13": {"name": "Use of Cryptography (SC-13)","checks": {"s3_bucket_default_encryption": "PASS","sns_topics_kms_encryption_at_rest_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"sagemaker_training_jobs_volume_and_output_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-13","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system implements FIPS-validated or NSA-approved cryptography in accordance with applicable federal laws, Executive Orders, directives, policies, regulations, and standards.","checks_status": {"fail": 1,"pass": 1,"total": 4,"manual": 0}}},"requirements_passed": 2,"requirements_failed": 16,"requirements_manual": 0,"total_requirements": 18,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "2d3bdafb-2503-4e04-a107-bdda7c4163ba","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "aws_audit_manager_control_tower_guardrails_aws","framework": "AWS-Audit-Manager-Control-Tower-Guardrails","version": "","description": "AWS Control Tower is a management and governance service that you can use to navigate through the setup process and governance requirements that are involved in creating a multi-account AWS environment.","region": "eu-west-1","requirements": {"1.0.1": {"name": "Disallow launch of EC2 instance types that are not EBS-optimized","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "1.0.1","Section": "EBS checks","Service": "ebs","SubGroup": null,"SubSection": null}],"description": "Checks whether EBS optimization is enabled for your EC2 instances that can be EBS-optimized","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.0.2": {"name": "Disallow EBS volumes that are unattached to an EC2 instance","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "1.0.2","Section": "EBS checks","Service": "ebs","SubGroup": null,"SubSection": null}],"description": "Checks whether EBS volumes are attached to EC2 instances","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.0.3": {"name": "Enable encryption for EBS volumes attached to EC2 instances","checks": {"ec2_ebs_default_encryption": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "1.0.3","Section": "EBS checks","Service": "ebs","SubGroup": null,"SubSection": null}],"description": "Checks whether EBS volumes that are in an attached state are encrypted","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.0.1": {"name": "Disallow internet connection through RDP","checks": {"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "2.0.1","Section": "Disallow Internet Connection","Service": "vpc","SubGroup": null,"SubSection": null}],"description": "Checks whether security groups that are in use disallow unrestricted incoming TCP traffic to the specified","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.0.2": {"name": "Disallow internet connection through SSH","checks": {"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "2.0.2","Section": "Disallow Internet Connection","Service": "vpc","SubGroup": null,"SubSection": null}],"description": "Checks whether security groups that are in use disallow unrestricted incoming SSH traffic.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"3.0.1": {"name": "Disallow access to IAM users without MFA","checks": {"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3.0.1","Section": "Multi-Factor Authentication","Service": "iam","SubGroup": null,"SubSection": null}],"description": "Checks whether the AWS Identity and Access Management users have multi-factor authentication (MFA) enabled.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.0.2": {"name": "Disallow console access to IAM users without MFA","checks": {"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3.0.2","Section": "Multi-Factor Authentication","Service": "iam","SubGroup": null,"SubSection": null}],"description": "Checks whether AWS Multi-Factor Authentication (MFA) is enabled for all AWS Identity and Access Management (IAM) users that use a console password.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.0.3": {"name": "Enable MFA for the root user","checks": {"iam_root_mfa_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "3.0.3","Section": "Multi-Factor Authentication","Service": "iam","SubGroup": null,"SubSection": null}],"description": "Checks whether the root user of your AWS account requires multi-factor authentication for console sign-in.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.0.1": {"name": "Disallow public access to RDS database instances","checks": {"rds_instance_no_public_access": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "4.0.1","Section": "Disallow Public Access","Service": "rds","SubGroup": null,"SubSection": null}],"description": "Checks whether the Amazon Relational Database Service (RDS) instances are not publicly accessible. The rule is non-compliant if the publiclyAccessible field is true in the instance configuration item.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"4.0.2": {"name": "Disallow public access to RDS database snapshots","checks": {"rds_snapshots_public_access": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "4.0.2","Section": "Disallow Public Access","Service": "rds","SubGroup": null,"SubSection": null}],"description": "Checks if Amazon Relational Database Service (Amazon RDS) snapshots are public. The rule is non-compliant if any existing and new Amazon RDS snapshots are public.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"4.1.1": {"name": "Disallow public read access to S3 buckets","checks": {"rds_instance_no_public_access": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "4.1.1","Section": "Disallow Public Access","Service": "s3","SubGroup": null,"SubSection": null}],"description": "Checks that your S3 buckets do not allow public read access.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"4.1.2": {"name": "Disallow public write access to S3 buckets","checks": {"s3_bucket_policy_public_write_access": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "4.1.2","Section": "Disallow Public Access","Service": "s3","SubGroup": null,"SubSection": null}],"description": "Checks that your S3 buckets do not allow public write access.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"5.0.1": {"name": "Disallow RDS database instances that are not storage encrypted ","checks": {"rds_instance_storage_encrypted": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "5.0.1","Section": "Disallow Instances","Service": "rds","SubGroup": null,"SubSection": null}],"description": "Checks whether storage encryption is enabled for your RDS DB instances.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"5.1.1": {"name": "Disallow S3 buckets that are not versioning enabled","checks": {"s3_bucket_object_versioning": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "5.1.1","Section": "Disallow Instances","Service": "s3","SubGroup": null,"SubSection": null}],"description": "Checks whether versioning is enabled for your S3 buckets.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}}},"requirements_passed": 10,"requirements_failed": 2,"requirements_manual": 2,"total_requirements": 14,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "3477e5c1-467e-4fb1-9b4b-1c2bc8fcd03e","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "pci_3.2.1_aws","framework": "PCI","version": "3.2.1","description": "The Payment Card Industry Data Security Standard (PCI DSS) is a proprietary information security standard. It's administered by the PCI Security Standards Council, which was founded by American Express, Discover Financial Services, JCB International, MasterCard Worldwide, and Visa Inc. PCI DSS applies to entities that store, process, or transmit cardholder data (CHD) or sensitive authentication data (SAD). This includes, but isn't limited to, merchants, processors, acquirers, issuers, and service providers. The PCI DSS is mandated by the card brands and administered by the Payment Card Industry Security Standards Council.","region": "eu-west-1","requirements": {"cw": {"name": "CloudWatch","checks": {"cloudwatch_log_metric_filter_root_usage": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "cw","Section": null,"Service": "cloudwatch","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring CloudWatch resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"s3": {"name": "S3","checks": {"s3_bucket_public_access": null,"s3_bucket_default_encryption": "PASS","s3_bucket_secure_transport_policy": "FAIL","s3_bucket_policy_public_write_access": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "s3","Section": null,"Service": "s3","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS S3 resources and options.","checks_status": {"fail": 1,"pass": 2,"total": 5,"manual": 0}},"dms": {"name": "DMS","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "dms","Section": null,"Service": "dms","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS DMS resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"ec2": {"name": "EC2","checks": {"vpc_flow_logs_enabled": "FAIL","ec2_ebs_public_snapshot": "PASS","ec2_elastic_ip_unassigned": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ec2","Section": null,"Service": "ec2","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring EC2 resources and options.","checks_status": {"fail": 3,"pass": 3,"total": 6,"manual": 0}},"iam": {"name": "IAM","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_root_hardware_mfa_enabled": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "iam","Section": null,"Service": "iam","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS IAM resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 15,"manual": 0}},"kms": {"name": "KMS","checks": {"kms_cmk_rotation_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "kms","Section": null,"Service": "kms","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS KMS resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"rds": {"name": "RDS","checks": {"rds_snapshots_public_access": "PASS","rds_instance_no_public_access": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "rds","Section": null,"Service": "rds","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS RDS resources and options.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"ssm": {"name": "SSM","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ssm","Section": null,"Service": "ssm","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS SSM resources and options.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"elbv2": {"name": "ELBV2","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "elbv2","Section": null,"Service": "elbv2","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring Elastic Load Balancer resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"config": {"name": "Config","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "config","Section": null,"Service": "config","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Config.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"lambda": {"name": "Lambda","checks": {"awslambda_function_url_public": null,"awslambda_function_not_publicly_accessible": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "lambda","Section": null,"Service": "lambda","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring Lambda resources and options.","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"redshift": {"name": "Redshift","checks": {"redshift_cluster_public_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "redshift","Section": null,"Service": "redshift","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Redshift resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"codebuild": {"name": "CodeBuild","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "codebuild","Section": null,"Service": "codebuild","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring CodeBuild resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"guardduty": {"name": "GuardDuty","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "guardduty","Section": null,"Service": "guardduty","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS GuardDuty resources and options.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"sagemaker": {"name": "SageMaker","checks": {"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "sagemaker","Section": null,"Service": "sagemaker","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Sagemaker resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"cloudtrail": {"name": "CloudTrail","checks": {"cloudtrail_multi_region_enabled": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cloudtrail","Section": null,"Service": "cloudtrail","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring CloudTrail resources and options.","checks_status": {"fail": 3,"pass": 1,"total": 4,"manual": 0}},"opensearch": {"name": "OpenSearch","checks": {"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "opensearch","Section": null,"Service": "opensearch","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring OpenSearch resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"autoscaling": {"name": "Auto Scaling","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "autoscaling","Section": null,"Service": "autoscaling","SubGroup": null,"SubSection": null}],"description": "This control checks whether your Auto Scaling groups that are associated with a load balancer are using Elastic Load Balancing health checks. PCI DSS does not require load balancing or highly available configurations. However, this check aligns with AWS best practices.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"elasticsearch": {"name": "Elasticsearch","checks": {"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "elasticsearch","Section": null,"Service": "elasticsearch","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring Elasticsearch resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}}},"requirements_passed": 11,"requirements_failed": 4,"requirements_manual": 4,"total_requirements": 19,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "34f5d2fe-fe37-4143-81ce-fdf21d9a9826","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "gxp_eu_annex_11_aws","framework": "GxP-EU-Annex-11","version": "","description": "The GxP EU Annex 11 framework is the European equivalent to the FDA 21 CFR part 11 framework in the United States. This annex applies to all forms of computerized systems that are used as part of Good Manufacturing Practices (GMP) regulated activities. A computerized system is a set of software and hardware components that together fulfill certain functionalities. The application should be validated and IT infrastructure should be qualified. Where a computerized system replaces a manual operation, there should be no resultant decrease in product quality, process control, or quality assurance. There should be no increase in the overall risk of the process.","region": "eu-west-1","requirements": {"5-data": {"name": "5 Data","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "5-data","Section": "Operational Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Computerised systems exchanging data electronically with other systems should include appropriate built-in checks for the correct and secure entry and processing of data, in order to minimize the risks.","checks_status": {"fail": 2,"pass": 1,"total": 8,"manual": 0}},"17-archiving": {"name": "17 Archiving","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "17-archiving","Section": "Operational Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Data may be archived. This data should be checked for accessibility, readability and integrity. If relevant changes are to be made to the system (e.g. computer equipment or programs), then the ability to retrieve the data should be ensured and tested.","checks_status": {"fail": 2,"pass": 1,"total": 9,"manual": 0}},"9-audit-trails": {"name": "9 Audit Trails","checks": {"cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "9-audit-trails","Section": "Operational Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Consideration should be given, based on a risk assessment, to building into the system the creation of a record of all GMP-relevant changes and deletions (a system generated 'audit trail'). For change or deletion of GMP-relevant data the reason should be documented. Audit trails need to be available and convertible to a generally intelligible form and regularly reviewed.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"1-risk-management": {"name": "1 Risk Management","checks": {"securityhub_enabled": "PASS","cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "1-risk-management","Section": "General","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Risk management should be applied throughout the lifecycle of the computerised system taking into account patient safety, data integrity and product quality. As part of a risk management system, decisions on the extent of validation and data integrity controls should be based on a justified and documented risk assessment of the computerised system.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"16-business-continuity": {"name": "16 Business Continuity","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "16-business-continuity","Section": "Operational Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "For the availability of computerised systems supporting critical processes, provisions should be made to ensure continuity of support for those processes in the event of a system breakdown (e.g. a manual or alternative system). The time required to bring the alternative arrangements into use should be based on risk and appropriate for a particular system and the business process it supports. These arrangements should be adequately documented and tested.","checks_status": {"fail": 2,"pass": 1,"total": 9,"manual": 0}},"7.2-data-storage-backups": {"name": "7.2 Data Storage - Backups","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "7.2-data-storage-backups","Section": "Operational Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Regular back-ups of all relevant data should be done. Integrity and accuracy of backup data and the ability to restore the data should be checked during validation and monitored periodically.","checks_status": {"fail": 2,"pass": 1,"total": 8,"manual": 0}},"12.4-security-audit-trail": {"name": "12.4 Security - Audit Trail","checks": {"cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "12.4-security-audit-trail","Section": "Operational Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Management systems for data and for documents should be designed to record the identity of operators entering, changing, confirming or deleting data including date and time.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"8.2-printouts-data-changes": {"name": "8.2 Printouts - Data Changes","checks": {"cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "8.2-printouts-data-changes","Section": "Operational Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "For records supporting batch release it should be possible to generate printouts indicating if any of the data has been changed since the original entry.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"4.8-validation-data-transfer": {"name": "4.8 Validation - Data Transfer","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "4.8-validation-data-transfer","Section": "Project Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "If data are transferred to another data format or system, validation should include checks that data are not altered in value and/or meaning during this migration process.","checks_status": {"fail": 2,"pass": 1,"total": 9,"manual": 0}},"4.5-validation-development-quality": {"name": "4.5 Validation - Development Quality","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "4.5-validation-development-quality","Section": "Project Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The regulated user should take all reasonable steps, to ensure that the system has been developed in accordance with an appropriate quality management system. The supplier should be assessed appropriately.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.6-validation-quality-performance": {"name": "4.6 Validation - Quality and Performance","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "4.6-validation-quality-performance","Section": "Project Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "For the validation of bespoke or customised computerised systems there should be a process in place that ensures the formal assessment and reporting of quality and performance measures for all the life-cycle stages of the system.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"7.1-data-storage-damage-protection": {"name": "7.1 Data Storage - Damage Protection","checks": {"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","redshift_cluster_automated_snapshot": null,"sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null,"eks_cluster_kms_cmk_encryption_in_secrets_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "7.1-data-storage-damage-protection","Section": "Operational Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Data should be secured by both physical and electronic means against damage. Stored data should be checked for accessibility, readability and accuracy. Access to data should be ensured throughout the retention period.","checks_status": {"fail": 6,"pass": 4,"total": 22,"manual": 0}},"10-change-and-configuration-management": {"name": "10 Change and Configuration Management","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "10-change-and-configuration-management","Section": "Operational Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Any changes to a computerised system including system configurations should only be made in a controlled manner in accordance with a defined procedure.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.2-validation-documentation-change-control": {"name": "4.2 Validation - Documentation Change Control","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "4.2-validation-documentation-change-control","Section": "Project Phase","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Validation documentation should include change control records (if applicable) and reports on any deviations observed during the validation process.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}}},"requirements_passed": 8,"requirements_failed": 6,"requirements_manual": 0,"total_requirements": 14,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "376854be-93cd-44ab-a070-1e996b24184d","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "cis_1.5_aws","framework": "CIS","version": "1.5","description": "The CIS Amazon Web Services Foundations Benchmark provides prescriptive guidance for configuring security options for a subset of Amazon Web Services with an emphasis on foundational, testable, and architecture agnostic settings.","region": "eu-west-1","requirements": {"1.1": {"name": "1.1","checks": {"account_maintain_current_contact_details": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/manage-account-payment.html#contact-info","Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization.An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.","DefaultValue": null,"AuditProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing )1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose `My Account`. 3. On the `Account Settings` page, review and verify the current details. 4. Under `Contact Information`, review and verify the current details.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question, so it is in both the customers' and AWS' best interests that prompt contact can be established. This is best achieved by setting AWS account contact details to point to resources which have multiple individuals as recipients, such as email aliases and PABX hunt groups.","RemediationProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing ).1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose `My Account`. 3. On the `Account Settings` page, next to `Account Settings`, choose `Edit`. 4. Next to the field that you need to update, choose `Edit`. 5. After you have entered your changes, choose `Save changes`. 6. After you have made your changes, choose `Done`. 7. To edit your contact information, under `Contact Information`, choose `Edit`. 8. For the fields that you want to change, type your updated information, and then choose `Update`.","AdditionalInformation": ""}],"description": "Maintain current contact details","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.2": {"name": "1.2","checks": {"account_security_contact_information_is_registered": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "","Description": "AWS provides customers with the option of specifying the contact information for account's security team. It is recommended that this information be provided.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if security contact information is present:**From Console:**1. Click on your account name at the top right corner of the console 2. From the drop-down menu Click `My Account`3. Scroll down to the `Alternate Contacts` section 4. Ensure contact information is specified in the `Security` section","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them.","RemediationProcedure": "Perform the following to establish security contact information:**From Console:**1. Click on your account name at the top right corner of the console. 2. From the drop-down menu Click `My Account`3. Scroll down to the `Alternate Contacts` section 4. Enter contact information in the `Security` section**Note:** Consider specifying an internal email distribution list to ensure emails are regularly monitored by more than one individual.","AdditionalInformation": ""}],"description": "Ensure security contact information is registered","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.3": {"name": "1.3","checks": {"account_security_questions_are_registered_in_the_aws_account": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "","Description": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS account as the 'root' user 2. On the top right you will see the __ 3. Click on the __ 4. From the drop-down menu Click `My Account`5. In the `Configure Security Challenge Questions` section on the `Personal Information` page, configure three security challenge questions. 6. Click `Save questions` .","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "When creating a new AWS account, a default super user is automatically created. This account is referred to as the 'root user' or 'root' account. It is recommended that the use of this account be limited and highly controlled. During events in which the 'root' password is no longer accessible or the MFA token associated with 'root' is lost/destroyed it is possible, through authentication using secret questions and associated answers, to recover 'root' user login access.","RemediationProcedure": "**From Console:**1. Login to the AWS Account as the 'root' user 2. Click on the __ from the top right of the console 3. From the drop-down menu Click _My Account_ 4. Scroll down to the `Configure Security Questions` section 5. Click on `Edit`6. Click on each `Question` - From the drop-down select an appropriate question- Click on the `Answer` section- Enter an appropriate answer - Follow process for all 3 questions 7. Click `Update` when complete 8. Save Questions and Answers and place in a secure physical location","AdditionalInformation": ""}],"description": "Ensure security questions are registered in the AWS account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.4": {"name": "1.4","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html:http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetAccountSummary.html:https://aws.amazon.com/blogs/security/an-easier-way-to-determine-the-presence-of-aws-account-access-keys/","Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be removed.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has access keys:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on `Credential Report`5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `` user, ensure the `access_key_1_active` and `access_key_2_active` fields are set to `FALSE` .**From Command Line:**Run the following command: ```aws iam get-account-summary | grep \"AccountAccessKeysPresent\"``` If no 'root' access keys exist the output will show \"AccountAccessKeysPresent\": 0,. If the output shows a \"1\" than 'root' keys exist, refer to the remediation procedure below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Removing access keys associated with the 'root' user account limits vectors by which the account can be compromised. Additionally, removing the 'root' access keys encourages the creation and use of role based accounts that are least privileged.","RemediationProcedure": "Perform the following to delete or disable active 'root' user access keys**From Console:**1. Sign in to the AWS Management Console as 'root' and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. Click on __ at the top right and select `My Security Credentials` from the drop down list 3. On the pop out screen Click on `Continue to Security Credentials`4. Click on `Access Keys` _(Access Key ID and Secret Access Key)_ 5. Under the `Status` column if there are any Keys which are Active- Click on `Make Inactive` - (Temporarily disable Key - may be needed again)- Click `Delete` - (Deleted keys cannot be recovered)","AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions is not enabled by default. However, on request to AWS support enables 'root' access only through access-keys (CLI, API methods) for us-gov cloud region."}],"description": "Ensure no 'root' user account access key exists","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.5": {"name": "1.5","checks": {"iam_root_mfa_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html#enable-virt-mfa-for-root","Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device.**Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has MFA setup:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on `Credential Report`5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `` user, ensure the `mfa_active` field is set to `TRUE` .**From Command Line:**1. Run the following command: ```aws iam get-account-summary | grep \"AccountMFAEnabled\" ``` 2. Ensure the AccountMFAEnabled property is set to 1","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that emits a time-sensitive key and have knowledge of a credential.","RemediationProcedure": "Perform the following to establish MFA for the 'root' user account:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the 'root' AWS account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials.2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA`4. In the wizard, choose `A virtual MFA` device and then choose `Next Step` . 5. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes. 6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see [Virtual MFA Applications](http://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications).) If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.- In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application.When you are finished, the virtual MFA device starts generating one-time passwords.In the Manage MFA Device wizard, in the Authentication Code 1 box, type the one-time password that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second one-time password into the Authentication Code 2 box. Choose Assign Virtual MFA.","AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions does not have console access. This recommendation is not applicable for us-gov cloud regions."}],"description": "Ensure MFA is enabled for the 'root' user account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.6": {"name": "1.6","checks": {"iam_root_hardware_mfa_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html#enable-hw-mfa-for-root","Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the 'root' user account be protected with a hardware MFA.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has a hardware MFA setup:1. Run the following command to determine if the 'root' account has MFA setup: ```aws iam get-account-summary | grep \"AccountMFAEnabled\" ```The `AccountMFAEnabled` property is set to `1` will ensure that the 'root' user account has MFA (Virtual or Hardware) Enabled. If `AccountMFAEnabled` property is set to `0` the account is not compliant with this recommendation.2. If `AccountMFAEnabled` property is set to `1`, determine 'root' account has Hardware MFA enabled. Run the following command to list all virtual MFA devices: ```aws iam list-virtual-mfa-devices``` If the output contains one MFA with the following Serial Number, it means the MFA is virtual, not hardware and the account is not compliant with this recommendation: `\"SerialNumber\": \"arn:aws:iam::__:mfa/root-account-mfa-device\"`","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "A hardware MFA has a smaller attack surface than a virtual MFA. For example, a hardware MFA does not suffer the attack surface introduced by the mobile smartphone on which a virtual MFA resides.**Note**: Using hardware MFA for many, many AWS accounts may create a logistical device management issue. If this is the case, consider implementing this Level 2 recommendation selectively to the highest security AWS accounts and the Level 1 recommendation applied to the remaining accounts.","RemediationProcedure": "Perform the following to establish a hardware MFA for the 'root' user account:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the AWS 'root' user account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials. 2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA`4. In the wizard, choose `A hardware MFA` device and then choose `Next Step` . 5. In the `Serial Number` box, enter the serial number that is found on the back of the MFA device. 6. In the `Authentication Code 1` box, enter the six-digit number displayed by the MFA device. You might need to press the button on the front of the device to display the number. 7. Wait 30 seconds while the device refreshes the code, and then enter the next six-digit number into the `Authentication Code 2` box. You might need to press the button on the front of the device again to display the second number. 8. Choose `Next Step` . The MFA device is now associated with the AWS account. The next time you use your AWS account credentials to sign in, you must type a code from the hardware MFA device.Remediation for this recommendation is not available through AWS CLI.","AdditionalInformation": "IAM User account 'root' for us-gov cloud regions does not have console access. This control is not applicable for us-gov cloud regions."}],"description": "Ensure hardware MFA is enabled for the 'root' user account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.7": {"name": "1.7","checks": {"iam_avoid_root_usage": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html:https://docs.aws.amazon.com/general/latest/gr/aws_tasks-that-require-root.html","Description": "With the creation of an AWS account, a 'root user' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console at `https://console.aws.amazon.com/iam/` 2. In the left pane, click `Credential Report` 3. Click on `Download Report` 4. Open of Save the file locally 5. Locate the `` under the user column 6. Review `password_last_used, access_key_1_last_used_date, access_key_2_last_used_date` to determine when the 'root user' was last used.**From Command Line:**Run the following CLI commands to provide a credential report for determining the last time the 'root user' was used: ``` aws iam generate-credential-report ``` ``` aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,5,11,16 | grep -B1 '' ```Review `password_last_used`, `access_key_1_last_used_date`, `access_key_2_last_used_date` to determine when the _root user_ was last used.**Note:** There are a few conditions under which the use of the 'root' user account is required. Please see the reference links for all of the tasks that require use of the 'root' user.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "The 'root user' has unrestricted access to and control over all account resources. Use of it is inconsistent with the principles of least privilege and separation of duties, and can lead to unnecessary harm due to error or account compromise.","RemediationProcedure": "If you find that the 'root' user account is being used for daily activity to include administrative tasks that do not require the 'root' user:1. Change the 'root' user password. 2. Deactivate or delete any access keys associate with the 'root' user.**Remember, anyone who has 'root' user credentials for your AWS account has unrestricted access to and control of all the resources in your account, including billing information.","AdditionalInformation": "The 'root' user for us-gov cloud regions is not enabled by default. However, on request to AWS support, they can enable the 'root' user and grant access only through access-keys (CLI, API methods) for us-gov cloud region. If the 'root' user for us-gov cloud regions is enabled, this recommendation is applicable.Monitoring usage of the 'root' user can be accomplished by implementing recommendation 3.3 Ensure a log metric filter and alarm exist for usage of the 'root' user."}],"description": "Eliminate use of the 'root' user for administrative and daily tasks","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.8": {"name": "1.8","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy","Description": "Password policies are, in part, used to enforce password complexity requirements. IAM password policies can be used to ensure password are at least a given length. It is recommended that the password policy require a minimum password length 14.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Minimum password length\" is set to 14 or greater.**From Command Line:** ``` aws iam get-account-password-policy ``` Ensure the output of the above command includes \"MinimumPasswordLength\": 14 (or higher)","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Setting a password complexity policy increases account resiliency against brute force login attempts.","RemediationProcedure": "Perform the following to set the password policy as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Set \"Minimum password length\" to `14` or greater. 5. Click \"Apply password policy\"**From Command Line:** ```aws iam update-account-password-policy --minimum-password-length 14 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.","AdditionalInformation": ""}],"description": "Ensure IAM password policy requires minimum length of 14 or greater","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.9": {"name": "1.9","checks": {"iam_password_policy_reuse_24": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy","Description": "IAM password policies can prevent the reuse of a given password by the same user. It is recommended that the password policy prevent the reuse of passwords.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Prevent password reuse\" is checked 5. Ensure \"Number of passwords to remember\" is set to 24**From Command Line:** ``` aws iam get-account-password-policy``` Ensure the output of the above command includes \"PasswordReusePrevention\": 24","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Preventing password reuse increases account resiliency against brute force login attempts.","RemediationProcedure": "Perform the following to set the password policy as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Check \"Prevent password reuse\" 5. Set \"Number of passwords to remember\" is set to `24` **From Command Line:** ```aws iam update-account-password-policy --password-reuse-prevention 24 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.","AdditionalInformation": ""}],"description": "Ensure IAM password policy prevents password reuse","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.1": {"name": "3.1","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html?icmpid=docs_cloudtrail_console#logging-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-services.html#cloud-trail-supported-services-data-events","Description": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail provides a history of AWS API calls for an account, including API calls made via the Management Console, SDKs, command line tools, and higher-level AWS services (such as CloudFormation).","DefaultValue": null,"AuditProcedure": "Perform the following to determine if CloudTrail is enabled for all regions:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane- You will be presented with a list of trails across all regions 3. Ensure at least one Trail has `All` specified in the `Region` column 4. Click on a trail via the link in the _Name_ column 5. Ensure `Logging` is set to `ON`6. Ensure `Apply trail to all regions` is set to `Yes` 7. In section `Management Events` ensure `Read/Write Events` set to `ALL`**From Command Line:** ```aws cloudtrail describe-trails ``` Ensure `IsMultiRegionTrail` is set to `true```` aws cloudtrail get-trail-status --name ``` Ensure `IsLogging` is set to `true` ``` aws cloudtrail get-event-selectors --trail-name ``` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`","ImpactStatement": "S3 lifecycle features can be used to manage the accumulation and management of logs over time. See the following AWS resource for more information on these features:1. https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html","AssessmentStatus": "Automated","RationaleStatement": "The AWS API call history produced by CloudTrail enables security analysis, resource change tracking, and compliance auditing. Additionally, - ensuring that a multi-regions trail exists will ensure that unexpected activity occurring in otherwise unused regions is detected- ensuring that a multi-regions trail exists will ensure that `Global Service Logging` is enabled for a trail by default to capture recording of events generated onAWS global services- for a multi-regions trail, ensuring that management events configured for all type of Read/Writes ensures recording of management operations that are performed on all resources in an AWS account","RemediationProcedure": "Perform the following to enable global (Multi-region) CloudTrail logging:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on _Trails_ on the left navigation pane 3. Click `Get Started Now` , if presented- Click `Add new trail` - Enter a trail name in the `Trail name` box- Set the `Apply trail to all regions` option to `Yes` - Specify an S3 bucket name in the `S3 bucket` box- Click `Create`4. If 1 or more trails already exist, select the target trail to enable for global logging 5. Click the edit icon (pencil) next to `Apply trail to all regions` , Click `Yes` and Click `Save`. 6. Click the edit icon (pencil) next to `Management Events` click `All` for setting `Read/Write Events` and Click `Save`.**From Command Line:** ``` aws cloudtrail create-trail --name --bucket-name --is-multi-region-trailaws cloudtrail update-trail --name --is-multi-region-trail ```Note: Creating CloudTrail via CLI without providing any overriding options configures `Management Events` to set `All` type of `Read/Writes` by default.","AdditionalInformation": ""}],"description": "Ensure CloudTrail is enabled in all regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"3.2": {"name": "3.2","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-enabling.html","Description": "CloudTrail log file validation creates a digitally signed digest file containing a hash of each log that CloudTrail writes to S3. These digest files can be used to determine whether a log file was changed, deleted, or unchanged after CloudTrail delivered the log. It is recommended that file validation be enabled on all CloudTrails.","DefaultValue": null,"AuditProcedure": "Perform the following on each trail to determine if log file validation is enabled:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. For Every Trail: - Click on a trail via the link in the _Name_ column - Under the `General details` section, ensure `Log file validation` is set to `Enabled` **From Command Line:** ``` aws cloudtrail describe-trails ``` Ensure `LogFileValidationEnabled` is set to `true` for each trail","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling log file validation will provide additional integrity checking of CloudTrail logs.","RemediationProcedure": "Perform the following to enable log file validation on a given trail:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. Click on target trail 4. Within the `General details` section click `edit` 5. Under the `Advanced settings` section 6. Check the enable box under `Log file validation`7. Click `Save changes` **From Command Line:** ``` aws cloudtrail update-trail --name --enable-log-file-validation ``` Note that periodic validation of logs using these digests can be performed by running the following command: ``` aws cloudtrail validate-logs --trail-arn --start-time --end-time ```","AdditionalInformation": ""}],"description": "Ensure CloudTrail log file validation is enabled","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.3": {"name": "3.3","checks": {"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html","Description": "CloudTrail logs a record of every API call made in your AWS account. These logs file are stored in an S3 bucket. It is recommended that the bucket policy or access control list (ACL) applied to the S3 bucket that CloudTrail logs to prevent public access to the CloudTrail logs.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if any public access is granted to an S3 bucket via an ACL or S3 bucket policy:**From Console:**1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home) 2. In the `API activity history` pane on the left, click `Trails`3. In the `Trails` pane, note the bucket names in the `S3 bucket` column 4. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home) 5. For each bucket noted in step 3, right-click on the bucket and click `Properties`6. In the `Properties` pane, click the `Permissions` tab. 7. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. 8. Ensure no rows exists that have the `Grantee` set to `Everyone` or the `Grantee` set to `Any Authenticated User.`9. If the `Edit bucket policy` button is present, click it to review the bucket policy. 10. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}**From Command Line:**1. Get the name of the S3 bucket that CloudTrail is logging to: ```aws cloudtrail describe-trails --query 'trailList[*].S3BucketName' ``` 2. Ensure the `AllUsers` principal is not granted privileges to that `` : ```aws s3api get-bucket-acl --bucket --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/AllUsers` ]' ``` 3. Ensure the `AuthenticatedUsers` principal is not granted privileges to that ``: ```aws s3api get-bucket-acl --bucket --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/Authenticated Users` ]' ``` 4. Get the S3 Bucket Policy ```aws s3api get-bucket-policy --bucket ``` 5. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}**Note:** Principal set to \"\\*\" or {\"AWS\" : \"\\*\"} allows anonymous access.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Allowing public access to CloudTrail log content may aid an adversary in identifying weaknesses in the affected account's use or configuration.","RemediationProcedure": "Perform the following to remove any public access that has been granted to the bucket via an ACL or S3 bucket policy:1. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home) 2. Right-click on the bucket and click Properties 3. In the `Properties` pane, click the `Permissions` tab. 4. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. 5. Select the row that grants permission to `Everyone` or `Any Authenticated User`6. Uncheck all the permissions granted to `Everyone` or `Any Authenticated User` (click `x` to delete the row). 7. Click `Save` to save the ACL. 8. If the `Edit bucket policy` button is present, click it. 9. Remove any `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}.","AdditionalInformation": ""}],"description": "Ensure the S3 bucket used to store CloudTrail logs is not publicly accessible","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"3.4": {"name": "3.4","checks": {"cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-user-guide.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/how-cloudtrail-works.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-aws-service-specific-topics.html","Description": "AWS CloudTrail is a web service that records AWS API calls made in a given AWS account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs within a specified S3 bucket for long term analysis, realtime analysis can be performed by configuring CloudTrail to send logs to CloudWatch Logs. For a trail that is enabled in all regions in an account, CloudTrail sends log files from all those regions to a CloudWatch Logs log group. It is recommended that CloudTrail logs be sent to CloudWatch Logs.Note: The intent of this recommendation is to ensure AWS account activity is being captured, monitored, and appropriately alarmed on. CloudWatch Logs is a native way to accomplish this using AWS services but does not preclude the use of an alternate solution.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure CloudTrail is configured as prescribed:**From Console:**1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/` 2. Under `Trails` , click on the CloudTrail you wish to evaluate 3. Under the `CloudWatch Logs` section. 4. Ensure a `CloudWatch Logs` log group is configured and listed. 5. Under `General details` confirm `Last log file delivered` has a recent (~one day old) timestamp.**From Command Line:**1. Run the following command to get a listing of existing trails: ```aws cloudtrail describe-trails ``` 2. Ensure `CloudWatchLogsLogGroupArn` is not empty and note the value of the `Name` property. 3. Using the noted value of the `Name` property, run the following command: ```aws cloudtrail get-trail-status --name ``` 4. Ensure the `LatestcloudwatchLogdDeliveryTime` property is set to a recent (~one day old) timestamp.If the `CloudWatch Logs` log group is not setup and the delivery time is not recent refer to the remediation below.","ImpactStatement": "Note: By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods:1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html","AssessmentStatus": "Automated","RationaleStatement": "Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user, API, resource, and IP address, and provides opportunity to establish alarms and notifications for anomalous or sensitivity account activity.","RemediationProcedure": "Perform the following to establish the prescribed state:**From Console:**1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/` 2. Select the `Trail` the needs to be updated. 3. Scroll down to `CloudWatch Logs` 4. Click `Edit` 5. Under `CloudWatch Logs` click the box `Enabled` 6. Under `Log Group` pick new or select an existing log group 7. Edit the `Log group name` to match the CloudTrail or pick the existing CloudWatch Group. 8. Under `IAM Role` pick new or select an existing. 9. Edit the `Role name` to match the CloudTrail or pick the existing IAM Role. 10. Click `Save changes.**From Command Line:** ``` aws cloudtrail update-trail --name --cloudwatch-logs-log-group-arn --cloudwatch-logs-role-arn ```","AdditionalInformation": ""}],"description": "Ensure CloudTrail trails are integrated with CloudWatch Logs","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.5": {"name": "3.5","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/cli/latest/reference/configservice/describe-configuration-recorder-status.html","Description": "AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items (AWS resources), any configuration changes between resources. It is recommended AWS Config be enabled in all regions.","DefaultValue": null,"AuditProcedure": "Process to evaluate AWS Config configuration per region**From Console:**1. Sign in to the AWS Management Console and open the AWS Config console at [https://console.aws.amazon.com/config/](https://console.aws.amazon.com/config/). 2. On the top right of the console select target Region. 3. If presented with Setup AWS Config - follow remediation procedure: 4. On the Resource inventory page, Click on edit (the gear icon). The Set Up AWS Config page appears. 5. Ensure 1 or both check-boxes under \"All Resources\" is checked.- Include global resources related to IAM resources - which needs to be enabled in 1 region only 6. Ensure the correct S3 bucket has been defined. 7. Ensure the correct SNS topic has been defined. 8. Repeat steps 2 to 7 for each region.**From Command Line:**1. Run this command to show all AWS Config recorders and their properties: ``` aws configservice describe-configuration-recorders ``` 2. Evaluate the output to ensure that there's at least one recorder for which `recordingGroup` object includes `\"allSupported\": true` AND `\"includeGlobalResourceTypes\": true`Note: There is one more parameter \"ResourceTypes\" in recordingGroup object. We don't need to check the same as whenever we set \"allSupported\": true, AWS enforces resource types to be empty (\"ResourceTypes\":[])Sample Output:``` {\"ConfigurationRecorders\": [{\"recordingGroup\": {\"allSupported\": true,\"resourceTypes\": [],\"includeGlobalResourceTypes\": true},\"roleARN\": \"arn:aws:iam:::role/service-role/\",\"name\": \"default\"}] } ```3. Run this command to show the status for all AWS Config recorders: ``` aws configservice describe-configuration-recorder-status ``` 4. In the output, find recorders with `name` key matching the recorders that met criteria in step 2. Ensure that at least one of them includes `\"recording\": true` and `\"lastStatus\": \"SUCCESS\"`","ImpactStatement": "It is recommended AWS Config be enabled in all regions.","AssessmentStatus": "Automated","RationaleStatement": "The AWS configuration item history captured by AWS Config enables security analysis, resource change tracking, and compliance auditing.","RemediationProcedure": "To implement AWS Config configuration:**From Console:**1. Select the region you want to focus on in the top right of the console 2. Click `Services`3. Click `Config`4. Define which resources you want to record in the selected region 5. Choose to include global resources (IAM resources) 6. Specify an S3 bucket in the same account or in another managed AWS account 7. Create an SNS Topic from the same AWS account or another managed AWS account**From Command Line:**1. Ensure there is an appropriate S3 bucket, SNS topic, and IAM role per the [AWS Config Service prerequisites](http://docs.aws.amazon.com/config/latest/developerguide/gs-cli-prereq.html). 2. Run this command to set up the configuration recorder ``` aws configservice subscribe --s3-bucket my-config-bucket --sns-topic arn:aws:sns:us-east-1:012345678912:my-config-notice --iam-role arn:aws:iam::012345678912:role/myConfigRole ``` 3. Run this command to start the configuration recorder: ``` start-configuration-recorder --configuration-recorder-name ```","AdditionalInformation": ""}],"description": "Ensure AWS Config is enabled in all regions","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.6": {"name": "3.6","checks": {"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html","Description": "S3 Bucket Access Logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that bucket access logging be enabled on the CloudTrail S3 bucket.","DefaultValue": null,"AuditProcedure": "Perform the following ensure the CloudTrail S3 bucket has access logging is enabled:**From Console:**1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home) 2. In the API activity history pane on the left, click Trails 3. In the Trails pane, note the bucket names in the S3 bucket column 4. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 5. Under `All Buckets` click on a target S3 bucket 6. Click on `Properties` in the top right of the console 7. Under `Bucket:` _ `` _ click on `Logging`8. Ensure `Enabled` is checked.**From Command Line:**1. Get the name of the S3 bucket that CloudTrail is logging to: ```aws cloudtrail describe-trails --query 'trailList[*].S3BucketName'``` 2. Ensure Bucket Logging is enabled: ``` aws s3api get-bucket-logging --bucket ``` Ensure command does not returns empty output.Sample Output for a bucket with logging enabled:``` {\"LoggingEnabled\": {\"TargetPrefix\": \"\",\"TargetBucket\": \"\"} } ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "By enabling S3 bucket logging on target S3 buckets, it is possible to capture all events which may affect objects within any target buckets. Configuring logs to be placed in a separate bucket allows access to log information which can be useful in security and incident response workflows.","RemediationProcedure": "Perform the following to enable S3 bucket logging:**From Console:**1. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 2. Under `All Buckets` click on the target S3 bucket 3. Click on `Properties` in the top right of the console 4. Under `Bucket:` click on `Logging`5. Configure bucket logging- Click on the `Enabled` checkbox- Select Target Bucket from list- Enter a Target Prefix 6. Click `Save`.**From Command Line:**1. Get the name of the S3 bucket that CloudTrail is logging to: ``` aws cloudtrail describe-trails --region --query trailList[*].S3BucketName ``` 2. Copy and add target bucket name at ``, Prefix for logfile at `` and optionally add an email address in the following template and save it as ``: ``` {\"LoggingEnabled\": {\"TargetBucket\": \"\",\"TargetPrefix\": \"\",\"TargetGrants\": [{\"Grantee\": {\"Type\": \"AmazonCustomerByEmail\",\"EmailAddress\": \"\"},\"Permission\": \"FULL_CONTROL\"}]}} ``` 3. Run the `put-bucket-logging` command with bucket name and `` as input, for more information refer at [put-bucket-logging](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-logging.html): ``` aws s3api put-bucket-logging --bucket --bucket-logging-status file:// ```","AdditionalInformation": ""}],"description": "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.7": {"name": "3.7","checks": {"cloudtrail_kms_encryption_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html:https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html","Description": "AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if CloudTrail is configured to use SSE-KMS:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Select a Trail 4. Under the `S3` section, ensure `Encrypt log files` is set to `Yes` and a KMS key ID is specified in the `KSM Key Id` field.**From Command Line:**1. Run the following command: ```aws cloudtrail describe-trails``` 2. For each trail listed, SSE-KMS is enabled if the trail has a `KmsKeyId` property defined.","ImpactStatement": "Customer created keys incur an additional cost. See https://aws.amazon.com/kms/pricing/ for more information.","AssessmentStatus": "Automated","RationaleStatement": "Configuring CloudTrail to use SSE-KMS provides additional confidentiality controls on log data as a given user must have S3 read permission on the corresponding log bucket and must be granted decrypt permission by the CMK policy.","RemediationProcedure": "Perform the following to configure CloudTrail to use SSE-KMS:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Click on a Trail 4. Under the `S3` section click on the edit button (pencil icon) 5. Click `Advanced`6. Select an existing CMK from the `KMS key Id` drop-down menu- Note: Ensure the CMK is located in the same region as the S3 bucket- Note: You will need to apply a KMS Key policy on the selected CMK in order for CloudTrail as a service to encrypt and decrypt log files using the CMK provided. Steps are provided [here](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-kms-key-policy-for-cloudtrail.html) for editing the selected CMK Key policy 7. Click `Save`8. You will see a notification message stating that you need to have decrypt permissions on the specified KMS key to decrypt log files. 9. Click `Yes` **From Command Line:** ``` aws cloudtrail update-trail --name --kms-id aws kms put-key-policy --key-id --policy ```","AdditionalInformation": "3 statements which need to be added to the CMK policy:1\\. Enable Cloudtrail to describe CMK properties ```
{\"Sid\": \"Allow CloudTrail access\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"cloudtrail.amazonaws.com\"},\"Action\": \"kms:DescribeKey\",\"Resource\": \"*\" } ``` 2\\. Granting encrypt permissions ``` 
{\"Sid\": \"Allow CloudTrail to encrypt logs\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"cloudtrail.amazonaws.com\"},\"Action\": \"kms:GenerateDataKey*\",\"Resource\": \"*\",\"Condition\": {\"StringLike\": {\"kms:EncryptionContext:aws:cloudtrail:arn\": [\"arn:aws:cloudtrail:*:aws-account-id:trail/*\"]}} } ``` 3\\. Granting decrypt permissions ``` 
{\"Sid\": \"Enable CloudTrail log decrypt permissions\",\"Effect\": \"Allow\",\"Principal\": {\"AWS\": \"arn:aws:iam::aws-account-id:user/username\"},\"Action\": \"kms:Decrypt\",\"Resource\": \"*\",\"Condition\": {\"Null\": {\"kms:EncryptionContext:aws:cloudtrail:arn\": \"false\"}} } ```"}],"description": "Ensure CloudTrail logs are encrypted at rest using KMS CMKs","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.8": {"name": "3.8","checks": {"kms_cmk_rotation_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://aws.amazon.com/kms/pricing/:https://csrc.nist.gov/publications/detail/sp/800-57-part-1/rev-5/final","Description": "AWS Key Management Service (KMS) allows customers to rotate the backing key which is key material stored within the KMS which is tied to the key ID of the Customer Created customer master key (CMK). It is the backing key that is used to perform cryptographic operations such as encryption and decryption. Automated key rotation currently retains all prior backing keys so that decryption of encrypted data can take place transparently. It is recommended that CMK key rotation be enabled for symmetric keys. Key rotation can not be enabled for any asymmetric CMK.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam). 2. In the left navigation pane, choose `Customer managed keys` 3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT` 4. Underneath the `General configuration` panel open the tab `Key rotation` 5. Ensure that the checkbox `Automatically rotate this KMS key every year.` is activated 6. Repeat steps 3 - 5 for all customer managed CMKs where \"Key spec = SYMMETRIC_DEFAULT\"**From Command Line:**1. Run the following command to get a list of all keys and their associated `KeyIds````aws kms list-keys ``` 2. For each key, note the KeyId and run the following command ``` describe-key --key-id  ``` 3. If the response contains \"KeySpec = SYMMETRIC_DEFAULT\" run the following command ```aws kms get-key-rotation-status --key-id  ``` 4. Ensure `KeyRotationEnabled` is set to `true` 5. Repeat steps 2 - 4 for all remaining CMKs","ImpactStatement": "Creation, management, and storage of CMKs may require additional time from and administrator.","AssessmentStatus": "Automated","RationaleStatement": "Rotating encryption keys helps reduce the potential impact of a compromised key as data encrypted with a new key cannot be accessed with a previous key that may have been exposed. Keys should be rotated every year, or upon event that would result in the compromise of that key.","RemediationProcedure": "**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam). 2. In the left navigation pane, choose `Customer managed keys` . 3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT` 4. Underneath the \"General configuration\" panel open the tab \"Key rotation\" 5. Check the \"Automatically rotate this KMS key every year.\" checkbox**From Command Line:**1. Run the following command to enable key rotation: ```aws kms enable-key-rotation --key-id  ```","AdditionalInformation": ""}],"description": "Ensure rotation for customer created symmetric CMKs is enabled","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.9": {"name": "3.9","checks": {"vpc_flow_logs_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html","Description": "VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. After you've created a flow log, you can view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow Logs be enabled for packet \"Rejects\" for VPCs.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if VPC Flow logs are enabled:**From Console:**1. Sign into the management console 2. Select `Services` then `VPC`3. In the left navigation pane, select `Your VPCs`4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. Ensure a Log Flow exists that has `Active` in the `Status` column.**From Command Line:**1. Run `describe-vpcs` command (OSX/Linux/UNIX) to list the VPC networks available in the current AWS region: ``` aws ec2 describe-vpcs --region  --query Vpcs[].VpcId ``` 2. The command output returns the `VpcId` available in the selected region. 3. Run `describe-flow-logs` command (OSX/Linux/UNIX) using the VPC ID to determine if the selected virtual network has the Flow Logs feature enabled: ``` aws ec2 describe-flow-logs --filter \"Name=resource-id,Values=\" ``` 4. If there are no Flow Logs created for the selected VPC, the command output will return an `empty list []`. 5. Repeat step 3 for other VPCs available in the same region. 6. Change the region by updating `--region` and repeat steps 1 - 5 for all the VPCs.","ImpactStatement": "By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods:1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html","AssessmentStatus": "Automated","RationaleStatement": "VPC Flow Logs provide visibility into network traffic that traverses the VPC and can be used to detect anomalous traffic or insight during security workflows.","RemediationProcedure": "Perform the following to determine if VPC Flow logs is enabled:**From Console:**1. Sign into the management console 2. Select `Services` then `VPC`3. In the left navigation pane, select `Your VPCs`4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. If no Flow Log exists, click `Create Flow Log`7. For Filter, select `Reject` 8. Enter in a `Role` and `Destination Log Group`9. Click `Create Log Flow`10. Click on `CloudWatch Logs Group` **Note:** Setting the filter to \"Reject\" will dramatically reduce the logging data accumulation for this recommendation and provide sufficient information for the purposes of breach detection, research and remediation. However, during periods of least privilege security group engineering, setting this the filter to \"All\" can be very helpful in discovering existing traffic flows required for proper operation of an already running environment.**From Command Line:**1. Create a policy document and name it as `role_policy_document.json` and paste the following content: ``` {\"Version\": \"2012-10-17\",\"Statement\": [{\"Sid\": \"test\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"ec2.amazonaws.com\"},\"Action\": \"sts:AssumeRole\"}] } ``` 2. Create another policy document and name it as `iam_policy.json` and paste the following content: ``` {\"Version\": \"2012-10-17\",\"Statement\": [{\"Effect\": \"Allow\",\"Action\":[\"logs:CreateLogGroup\",\"logs:CreateLogStream\",\"logs:DescribeLogGroups\",\"logs:DescribeLogStreams\",\"logs:PutLogEvents\",\"logs:GetLogEvents\",\"logs:FilterLogEvents\"],\"Resource\": \"*\"}] } ``` 3. Run the below command to create an IAM role: ``` aws iam create-role --role-name  --assume-role-policy-document file://role_policy_document.json``` 4. Run the below command to create an IAM policy: ``` aws iam create-policy --policy-name  --policy-document file://iam-policy.json ``` 5. Run `attach-group-policy` command using the IAM policy ARN returned at the previous step to attach the policy to the IAM role (if the command succeeds, no output is returned): ``` aws iam attach-group-policy --policy-arn arn:aws:iam:::policy/ --group-name  ``` 6. Run `describe-vpcs` to get the VpcId available in the selected region: ``` aws ec2 describe-vpcs --region  ``` 7. The command output should return the VPC Id available in the selected region. 8. Run `create-flow-logs` to create a flow log for the vpc: ``` aws ec2 create-flow-logs --resource-type VPC --resource-ids  --traffic-type REJECT --log-group-name  --deliver-logs-permission-arn  ``` 9. Repeat step 8 for other vpcs available in the selected region. 10. Change the region by updating --region and repeat remediation procedure for other vpcs.","AdditionalInformation": ""}],"description": "Ensure VPC flow logging is enabled in all VPCs","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"4.1": {"name": "4.1","checks": {"cloudwatch_log_metric_filter_unauthorized_api_calls": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://aws.amazon.com/sns/:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with \"Name\":` note ``- From value associated with \"CloudWatchLogsLogGroupArn\" note Example: for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:::log-group:NewGroup:*,  would be NewGroup- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name <\"Name\" as shown in describe-trails>`Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this `` that you captured in step 1:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.errorCode = *UnauthorizedOperation) || ($.errorCode = AccessDenied*) || ($.sourceIPAddress!=delivery.logs.amazonaws.com) || ($.eventName!=HeadBucket) }\", ```4. Note the \"filterName\" `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName == `unauthorized_api_calls_metric`]\" ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "This alert may be triggered by normal read-only console activities that attempt to opportunistically gather optional information, but gracefully fail if they don't have permissions.If an excessive number of alerts are being generated then an organization may wish to consider adding read access to the limited IAM user permissions simply to quiet the alerts.In some cases doing this may allow the users to actually view some areas of the system - any additional access given should be reviewed for alignment with the original limited IAM user intent.","AssessmentStatus": "Automated","RationaleStatement": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for unauthorized API calls and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"cloudtrail_log_group_name\" --filter-name \"\" --metric-transformations metricName=unauthorized_api_calls_metric,metricNamespace=CISBenchmark,metricValue=1 --filter-pattern \"{ ($.errorCode = \"*UnauthorizedOperation\") || ($.errorCode = \"AccessDenied*\") || ($.sourceIPAddress!=\"delivery.logs.amazonaws.com\") || ($.eventName!=\"HeadBucket\") }\" ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. **Note**: Capture the TopicArn displayed when creating the SNS Topic in Step 2.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"unauthorized_api_calls_alarm\" --metric-name \"unauthorized_api_calls_metric\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for unauthorized API calls","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.2": {"name": "4.2","checks": {"cloudwatch_log_metric_filter_sign_in_without_mfa": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/viewing_metrics_with_cloudwatch.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all `CloudTrails`:``` aws cloudtrail describe-trails ```- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region `CloudTrail` is active``` aws cloudtrail get-trail-status --name  ```Ensure in the output that `IsLogging` is set to `TRUE`- Ensure identified Multi-region 'Cloudtrail' captures all Management Events``` aws cloudtrail get-event-selectors --trail-name  ```Ensure in the output there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }\" ```Or (To reduce false positives incase Single Sign-On (SSO) is used in organization):``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring for single-factor console logins will increase visibility into accounts that are not protected by MFA.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Management Console sign-in without MFA and the `` taken from audit step 1.Use Command: ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }' ```Or (To reduce false positives incase Single Sign-On (SSO) is used in organization):``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored -Filter pattern set to `{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\"}` reduces false alarms raised when user logs in via SSO account."}],"description": "Ensure a log metric filter and alarm exist for Management Console sign-in without MFA","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.3": {"name": "4.3","checks": {"cloudwatch_log_metric_filter_root_usage": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for 'root' login attempts.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails:`aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring for 'root' account logins will provide visibility into the use of a fully privileged account and an opportunity to reduce the use of it.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for 'Root' account usage and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `` --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "**Configuring log metric filter and alarm on Multi-region (global) CloudTrail**- ensures that activities from all regions (used as well as unused) are monitored- ensures that activities on all supported global services are monitored- ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for usage of 'root' account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.4": {"name": "4.4","checks": {"cloudwatch_log_metric_filter_policy_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established changes made to Identity and Access Management (IAM) policies.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails:`aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to IAM policies will help ensure authentication and authorization controls remain intact.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for IAM policy changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `` --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for IAM policy changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.5": {"name": "4.5","checks": {"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to CloudTrail's configuration will help ensure sustained visibility to activities performed in the AWS account.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for cloudtrail configuration changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for CloudTrail configuration changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.6": {"name": "4.6","checks": {"cloudwatch_log_metric_filter_authentication_failures": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for failed console authentication attempts.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring failed console logins may decrease lead time to detect an attempt to brute force a credential, which may provide an indicator, such as source IP, that can be used in other event correlation.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS management Console Login Failures and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for AWS Management Console authentication failures","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.7": {"name": "4.7","checks": {"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for customer created CMKs which have changed state to disabled or scheduled deletion.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Data encrypted with disabled or deleted keys will no longer be accessible.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for disabled or scheduled for deletion CMK's and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for disabling or scheduled deletion of customer created CMKs","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.8": {"name": "4.8","checks": {"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes to S3 bucket policies.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to S3 bucket policies may reduce time to detect and correct permissive policies on sensitive S3 buckets.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for S3 bucket policy changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for S3 bucket policy changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.9": {"name": "4.9","checks": {"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to AWS Config configuration will help ensure sustained visibility of configuration items within the AWS account.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Configuration changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for AWS Config configuration changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"5.1": {"name": "5.1","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Security.html#VPC_Security_Comparison","Description": "The Network Access Control List (NACL) function provide stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.","DefaultValue": null,"AuditProcedure": "**From Console:**Perform the following to determine if the account is configured as prescribed: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL, perform the following:- Select the network ACL- Click the `Inbound Rules` tab- Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` and shows `ALLOW`**Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.","RemediationProcedure": "**From Console:**Perform the following: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL to remediate, perform the following:- Select the network ACL- Click the `Inbound Rules` tab- Click `Edit inbound rules`- Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule- Click `Save`","AdditionalInformation": ""}],"description": "Ensure no Network ACLs allow ingress from 0.0.0.0/0 to remote server administration ports","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"5.2": {"name": "5.2","checks": {"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#deleting-security-group-rule","Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.","ImpactStatement": "When updating an existing environment, ensure that administrators have access to remote server administration ports through another mechanism before removing access by deleting the 0.0.0.0/0 inbound rule.","AssessmentStatus": "Automated","RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.","RemediationProcedure": "Perform the following to implement the prescribed state:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Click the `Edit inbound rules` button 4. Identify the rules to be edited or removed 5. Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule 6. Click `Save rules`","AdditionalInformation": ""}],"description": "Ensure no security groups allow ingress from 0.0.0.0/0 to remote server administration ports","checks_status": {"fail": 0,"pass": 3,"total": 3,"manual": 0}},"5.3": {"name": "5.3","checks": {"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#deleting-security-group-rule","Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `::/0` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.","ImpactStatement": "When updating an existing environment, ensure that administrators have access to remote server administration ports through another mechanism before removing access by deleting the ::/0 inbound rule.","AssessmentStatus": "Automated","RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.","RemediationProcedure": "Perform the following to implement the prescribed state:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Click the `Edit inbound rules` button 4. Identify the rules to be edited or removed 5. Either A) update the Source field to a range other than ::/0, or, B) Click `Delete` to remove the offending inbound rule 6. Click `Save rules`","AdditionalInformation": ""}],"description": "Ensure no security groups allow ingress from ::/0 to remote server administration ports","checks_status": {"fail": 0,"pass": 3,"total": 3,"manual": 0}},"5.4": {"name": "5.4","checks": {"ec2_securitygroup_default_restrict_traffic": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "5. Networking","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#default-security-group","Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic.The default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation.**NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:Security Group State1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exist 4. Click the `Outbound Rules` tab 5. Ensure no rules existSecurity Group Members1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all default groups in all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. Copy the id of the default security group. 5. Change to the EC2 Management Console at https://console.aws.amazon.com/ec2/v2/home 6. In the filter column type 'Security Group ID : < security group id from #4 >'","ImpactStatement": "Implementing this recommendation in an existing VPC containing operating resources requires extremely careful migration planning as the default security groups are likely to be enabling many ports that are unknown. Enabling VPC flow logging (of accepts) in an existing environment that is known to be breach free will reveal the current pattern of ports being used for each instance to communicate successfully.","AssessmentStatus": "Automated","RationaleStatement": "Configuring all VPC default security groups to restrict all traffic will encourage least privilege security group development and mindful placement of AWS resources into security groups which will in-turn reduce the exposure of those resources.","RemediationProcedure": "Security Group MembersPerform the following to implement the prescribed state:1. Identify AWS resources that exist within the default security group 2. Create a set of least privilege security groups for those resources 3. Place the resources in those security groups 4. Remove the resources noted in #1 from the default security groupSecurity Group State1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Remove any inbound rules 4. Click the `Outbound Rules` tab 5. Remove any Outbound rulesRecommended:IAM groups allow you to edit the \"name\" field. After remediating default groups rules for all VPCs in all regions, edit this field to add text similar to \"DO NOT USE. DO NOT ADD RULES\"","AdditionalInformation": ""}],"description": "Ensure the default security group of every VPC restricts all traffic","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"5.5": {"name": "5.5","checks": {"vpc_peering_routing_tables_with_least_privilege": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "5. Networking","References": "https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/peering-configurations-partial-access.html:https://docs.aws.amazon.com/cli/latest/reference/ec2/create-vpc-peering-connection.html","Description": "Once a VPC peering connection is established, routing tables must be updated to establish any connections between the peered VPCs. These routes can be as specific as desired - even peering a VPC to only a single host on the other side of the connection.","DefaultValue": null,"AuditProcedure": "Review routing tables of peered VPCs for whether they route all subnets of each VPC and whether that is necessary to accomplish the intended purposes for peering the VPCs.**From Command Line:**1. List all the route tables from a VPC and check if \"GatewayId\" is pointing to a __ (e.g. pcx-1a2b3c4d) and if \"DestinationCidrBlock\" is as specific as desired. ``` aws ec2 describe-route-tables --filter \"Name=vpc-id,Values=\" --query \"RouteTables[*].{RouteTableId:RouteTableId, VpcId:VpcId, Routes:Routes, AssociatedSubnets:Associations[*].SubnetId}\" ```","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Being highly selective in peering routing tables is a very effective way of minimizing the impact of breach as resources outside of these routes are inaccessible to the peered VPC.","RemediationProcedure": "Remove and add route table entries to ensure that the least number of subnets or hosts as is required to accomplish the purpose for peering are routable.**From Command Line:**1. For each __ containing routes non compliant with your routing policy (which grants more than desired \"least access\"), delete the non compliant route: ``` aws ec2 delete-route --route-table-id  --destination-cidr-block  ```2. Create a new compliant route: ``` aws ec2 create-route --route-table-id  --destination-cidr-block  --vpc-peering-connection-id  ```","AdditionalInformation": "If an organization has AWS transit gateway implemented in their VPC architecture they should look to apply the recommendation above for \"least access\" routing architecture at the AWS transit gateway level in combination with what must be implemented at the standard VPC route table. More specifically, to route traffic between two or more VPCs via a transit gateway VPCs must have an attachment to a transit gateway route table as well as a route, therefore to avoid routing traffic between VPCs an attachment to the transit gateway route table should only be added where there is an intention to route traffic between the VPCs. As transit gateways are able to host multiple route tables it is possible to group VPCs by attaching them to a common route table."}],"description": "Ensure routing tables for VPC peering are \"least access\"","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.10": {"name": "1.10","checks": {"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://tools.ietf.org/html/rfc6238:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#enable-mfa-for-privileged-users:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://blogs.aws.amazon.com/security/post/Tx2SJJYE082KBUK/How-to-Delegate-Management-of-Multi-Factor-Authentication-to-AWS-IAM-Users","Description": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if a MFA device is enabled for all IAM users having a console password:**From Console:**1. Open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left pane, select `Users`3. If the `MFA` or `Password age` columns are not visible in the table, click the gear icon at the upper right corner of the table and ensure a checkmark is next to both, then click `Close`. 4. Ensure that for each user where the `Password age` column shows a password age, the `MFA` column shows `Virtual`, `U2F Security Key`, or `Hardware`.**From Command Line:**1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their password and MFA status: ```aws iam generate-credential-report ``` ```aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,8``` 2. The output of this command will produce a table similar to the following: ```user,password_enabled,mfa_activeelise,false,falsebrandon,true,truerakesh,false,falsehelene,false,falseparas,true,trueanitha,false,false``` 3. For any column having `password_enabled` set to `true` , ensure `mfa_active` is also set to `true.`","ImpactStatement": "AWS will soon end support for SMS multi-factor authentication (MFA). New customers are not allowed to use this feature. We recommend that existing customers switch to one of the following alternative methods of MFA.","AssessmentStatus": "Automated","RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that displays a time-sensitive key and have knowledge of a credential.","RemediationProcedure": "Perform the following to enable MFA:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at 'https://console.aws.amazon.com/iam/' 2. In the left pane, select `Users`. 3. In the `User Name` list, choose the name of the intended MFA user. 4. Choose the `Security Credentials` tab, and then choose `Manage MFA Device`. 5. In the `Manage MFA Device wizard`, choose `Virtual MFA` device, and then choose `Continue`. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes.6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see Virtual MFA Applications at https://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications). If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.- In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application. When you are finished, the virtual MFA device starts generating one-time passwords.8. In the `Manage MFA Device wizard`, in the `MFA Code 1 box`, type the `one-time password` that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second `one-time password` into the `MFA Code 2 box`.9. Click `Assign MFA`.","AdditionalInformation": "**Forced IAM User Self-Service Remediation**Amazon has published a pattern that forces users to self-service setup MFA before they have access to their complete permissions set. Until they complete this step, they cannot access their full permissions. This pattern can be used on new AWS accounts. It can also be used on existing accounts - it is recommended users are given instructions and a grace period to accomplish MFA enrollment before active enforcement on existing AWS accounts."}],"description": "Ensure multi-factor authentication (MFA) is enabled for all IAM users that have a console password","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.11": {"name": "1.11","checks": {"iam_user_no_setup_initial_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/cli/latest/reference/iam/delete-access-key.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html","Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. Programmatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. AWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if access keys were created upon user creation and are being used and rotated as prescribed:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on a User where column `Password age` and `Access key age` is not set to `None` 5. Click on `Security credentials` Tab 6. Compare the user 'Creation time` to the Access Key `Created` date. 6. For any that match, the key was created during initial user setup.- Keys that were created at the same time as the user profile and do not have a last used date should be deleted. Refer to the remediation below.**From Command Line:**1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their access keys utilization: ```aws iam generate-credential-report ``` ```aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,9,11,14,16 ``` 2. The output of this command will produce a table similar to the following: ``` user,password_enabled,access_key_1_active,access_key_1_last_used_date,access_key_2_active,access_key_2_last_used_dateelise,false,true,2015-04-16T15:14:00+00:00,false,N/Abrandon,true,true,N/A,false,N/Arakesh,false,false,N/A,false,N/Ahelene,false,true,2015-11-18T17:47:00+00:00,false,N/Aparas,true,true,2016-08-28T12:04:00+00:00,true,2016-03-04T10:11:00+00:00anitha,true,true,2016-06-08T11:43:00+00:00,true,N/A``` 3. For any user having `password_enabled` set to `true` AND `access_key_last_used_date` set to `N/A` refer to the remediation below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Requiring the additional steps be taken by the user for programmatic access after their profile has been created will give a stronger indication of intent that access keys are [a] necessary for their work and [b] once the access key is established on an account that the keys may be in use somewhere in the organization.**Note**: Even if it is known the user will need access keys, require them to create the keys themselves or put in a support ticket to have them created as a separate step from user creation.","RemediationProcedure": "Perform the following to delete access keys that do not pass the audit:**From Console:**1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. As an Administrator - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used. 7. As an IAM User- Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used.**From Command Line:** ``` aws iam delete-access-key --access-key-id  --user-name  ```","AdditionalInformation": "Credential report does not appear to contain \"Key Creation Date\""}],"description": "Do not setup access keys during initial user setup for all IAM users that have a console password","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.12": {"name": "1.12","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#remove-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_admin-change-user.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html","Description": "AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in 45 or greater days be deactivated or removed.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if unused credentials exist:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM` 4. Click on `Users` 5. Click the `Settings` (gear) icon. 6. Select `Console last sign-in`, `Access key last used`, and `Access Key Id` 7. Click on `Close`8. Check and ensure that `Console last sign-in` is less than 45 days ago.**Note** - `Never` means the user has never logged in.9. Check and ensure that `Access key age` is less than 45 days and that `Access key last used` does not say `None`If the user hasn't signed into the Console in the last 45 days or Access keys are over 45 days old refer to the remediation.**From Command Line:****Download Credential Report:**1. Run the following commands: ```aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,5,6,9,10,11,14,15,16 | grep -v '^' ```**Ensure unused credentials do not exist:**2. For each user having `password_enabled` set to `TRUE` , ensure `password_last_used_date` is less than `45` days ago.- When `password_enabled` is set to `TRUE` and `password_last_used` is set to `No_Information` , ensure `password_last_changed` is less than 45 days ago.3. For each user having an `access_key_1_active` or `access_key_2_active` to `TRUE` , ensure the corresponding `access_key_n_last_used_date` is less than `45` days ago.- When a user having an `access_key_x_active` (where x is 1 or 2) to `TRUE` and corresponding access_key_x_last_used_date is set to `N/A', ensure `access_key_x_last_rotated` is less than 45 days ago.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Disabling or removing unnecessary credentials will reduce the window of opportunity for credentials associated with a compromised or abandoned account to be used.","RemediationProcedure": "**From Console:**Perform the following to manage Unused Password (IAM user console access)1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. Select user whose `Console last sign-in` is greater than 45 days 7. Click `Security credentials` 8. In section `Sign-in credentials`, `Console password` click `Manage`9. Under Console Access select `Disable` 10.Click `Apply`Perform the following to deactivate Access Keys:1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. Select any access keys that are over 45 days old and that have been used and - Click on `Make Inactive` 7. Select any access keys that are over 45 days old and that have not been used and - Click the X to `Delete`","AdditionalInformation": " is excluded in the audit since the root account should not be used for day to day business and would likely be unused for more than 45 days."}],"description": "Ensure credentials unused for 45 days or greater are disabled","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"1.13": {"name": "1.13","checks": {"iam_user_two_active_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html","Description": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)","DefaultValue": null,"AuditProcedure": "**From Console:**1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. Under `Access Keys` section, in the Status column, check the current status for each access key associated with the IAM user. If the selected IAM user has more than one access key activated then the users access configuration does not adhere to security best practices and the risk of accidental exposures increases. - Repeat steps no. 3 โ€“ 5 for each IAM user in your AWS account.**From Command Line:**1. Run `list-users` command to list all IAM users within your account: ``` aws iam list-users --query \"Users[*].UserName\" ``` The command output should return an array that contains all your IAM user names.2. Run `list-access-keys` command using the IAM user name list to return the current status of each access key associated with the selected IAM user: ``` aws iam list-access-keys --user-name  ``` The command output should expose the metadata `(\"Username\", \"AccessKeyId\", \"Status\", \"CreateDate\")` for each access key on that user account.3. Check the `Status` property value for each key returned to determine each keys current state. If the `Status` property value for more than one IAM access key is set to `Active`, the user access configuration does not adhere to this recommendation, refer to the remediation below.- Repeat steps no. 2 and 3 for each IAM user in your AWS account.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API. One of the best ways to protect your account is to not allow users to have multiple access keys.","RemediationProcedure": "**From Console:**1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. In `Access Keys` section, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working. 6. In the same `Access Keys` section, identify your non-operational access keys (other than the chosen one) and deactivate it by clicking the `Make Inactive` link. 7. If you receive the `Change Key Status` confirmation box, click `Deactivate` to switch off the selected key. 8. Repeat steps no. 3 โ€“ 7 for each IAM user in your AWS account.**From Command Line:**1. Using the IAM user and access key information provided in the `Audit CLI`, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working.2. Run the `update-access-key` command below using the IAM user name and the non-operational access key IDs to deactivate the unnecessary key(s). Refer to the Audit section to identify the unnecessary access key ID for the selected IAM user**Note** - the command does not return any output: ``` aws iam update-access-key --access-key-id  --status Inactive --user-name  ``` 3. To confirm that the selected access key pair has been successfully `deactivated` run the `list-access-keys` audit command again for that IAM User: ``` aws iam list-access-keys --user-name  ``` - The command output should expose the metadata for each access key associated with the IAM user. If the non-operational key pair(s) `Status` is set to `Inactive`, the key has been successfully deactivated and the IAM user access configuration adheres now to this recommendation.4. Repeat steps no. 1 โ€“ 3 for each IAM user in your AWS account.","AdditionalInformation": ""}],"description": "Ensure there is only one active access key available for any single IAM user","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.14": {"name": "1.14","checks": {"iam_rotate_access_key_90_days": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#rotate-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html","Description": "Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if access keys are rotated as prescribed:**From Console:**1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click `setting` icon 4. Select `Console last sign-in` 5. Click `Close` 6. Ensure that `Access key age` is less than 90 days ago. note) `None` in the `Access key age` means the user has not used the access key.**From Command Line:**``` aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d ``` The `access_key_1_last_rotated` field in this file notes The date and time, in ISO 8601 date-time format, when the user's access key was created or last changed. If the user does not have an active access key, the value in this field is N/A (not applicable).","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Rotating access keys will reduce the window of opportunity for an access key that is associated with a compromised or terminated account to be used.Access keys should be rotated to ensure that data cannot be accessed with an old key which might have been lost, cracked, or stolen.","RemediationProcedure": "Perform the following to rotate access keys:**From Console:**1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click on `Security Credentials`4. As an Administrator - Click on `Make Inactive` for keys that have not been rotated in `90` Days 5. As an IAM User- Click on `Make Inactive` or `Delete` for keys which have not been rotated or used in `90` Days 6. Click on `Create Access Key`7. Update programmatic call with new Access Key credentials**From Command Line:**1. While the first access key is still active, create a second access key, which is active by default. Run the following command: ``` aws iam create-access-key ```At this point, the user has two active access keys.2. Update all applications and tools to use the new access key. 3. Determine whether the first access key is still in use by using this command: ``` aws iam get-access-key-last-used ``` 4. One approach is to wait several days and then check the old access key for any use before proceeding.Even if step Step 3 indicates no use of the old key, it is recommended that you do not immediately delete the first access key. Instead, change the state of the first access key to Inactive using this command: ``` aws iam update-access-key ``` 5. Use only the new access key to confirm that your applications are working. Any applications and tools that still use the original access key will stop working at this point because they no longer have access to AWS resources. If you find such an application or tool, you can switch its state back to Active to reenable the first access key. Then return to step Step 2 and update this application to use the new key.6. After you wait some period of time to ensure that all applications and tools have been updated, you can delete the first access key with this command: ``` aws iam delete-access-key ```","AdditionalInformation": ""}],"description": "Ensure access keys are rotated every 90 days or less","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.15": {"name": "1.15","checks": {"iam_policy_attached_only_to_group_or_roles": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html","Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. Only the third implementation is recommended.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if an inline policy is set or a policy is directly attached to users:1. Run the following to get a list of IAM users: ```aws iam list-users --query 'Users[*].UserName' --output text``` 2. For each user returned, run the following command to determine if any policies are attached to them: ```aws iam list-attached-user-policies --user-name aws iam list-user-policies --user-name ``` 3. If any policies are returned, the user has an inline policy or direct policy attachment.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Assigning IAM policy only through groups unifies permissions management to a single, flexible layer consistent with organizational functional roles. By unifying permissions management, the likelihood of excessive permissions is reduced.","RemediationProcedure": "Perform the following to create an IAM group and assign a policy to it:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups` and then click `Create New Group` . 3. In the `Group Name` box, type the name of the group and then click `Next Step` . 4. In the list of policies, select the check box for each policy that you want to apply to all members of the group. Then click `Next Step` . 5. Click `Create Group` Perform the following to add a user to a given group:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups`3. Select the group to add a user to 4. Click `Add Users To Group`5. Select the users to be added to the group 6. Click `Add Users` Perform the following to remove a direct association between a user and policy:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left navigation pane, click on Users 3. For each user:- Select the user- Click on the `Permissions` tab- Expand `Permissions policies` - Click `X` for each policy; then click Detach or Remove (depending on policy type)","AdditionalInformation": ""}],"description": "Ensure IAM Users Receive Permissions Only Through Groups","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.16": {"name": "1.16","checks": {"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://docs.aws.amazon.com/cli/latest/reference/iam/index.html#cli-aws-iam","Description": "IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered a standard security advice to grant _least privilege_ -that is, granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform _only_ those tasks, instead of allowing full administrative privileges.","DefaultValue": null,"AuditProcedure": "Perform the following to determine what policies are created:**From Command Line:**1. Run the following to get a list of IAM policies: ```aws iam list-policies --only-attached --output text ``` 2. For each policy returned, run the following command to determine if any policies is allowing full administrative privileges on the account: ```aws iam get-policy-version --policy-arn  --version-id  ``` 3. In output ensure policy should not have any Statement block with `\"Effect\": \"Allow\"` and `Action` set to `\"*\"` and `Resource` set to `\"*\"`","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "It's more secure to start with a minimum set of permissions and grant additional permissions as necessary, rather than starting with permissions that are too lenient and then trying to tighten them later.Providing full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions.IAM policies that have a statement with \"Effect\": \"Allow\" with \"Action\": \"\\*\" over \"Resource\": \"\\*\" should be removed.","RemediationProcedure": "**From Console:**Perform the following to detach the policy that has full administrative privileges:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click Policies and then search for the policy name found in the audit step. 3. Select the policy that needs to be deleted. 4. In the policy action menu, select first `Detach`5. Select all Users, Groups, Roles that have this policy attached 6. Click `Detach Policy`7. In the policy action menu, select `Detach` **From Command Line:**Perform the following to detach the policy that has full administrative privileges as found in the audit step:1. Lists all IAM users, groups, and roles that the specified managed policy is attached to.```aws iam list-entities-for-policy --policy-arn  ``` 2. Detach the policy from all IAM Users: ```aws iam detach-user-policy --user-name  --policy-arn  ``` 3. Detach the policy from all IAM Groups: ```aws iam detach-group-policy --group-name  --policy-arn  ``` 4. Detach the policy from all IAM Roles: ```aws iam detach-role-policy --role-name  --policy-arn  ```","AdditionalInformation": ""}],"description": "Ensure IAM policies that allow full \"*:*\" administrative privileges are not attached","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"1.17": {"name": "1.17","checks": {"iam_support_role_created": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://aws.amazon.com/premiumsupport/pricing/:https://docs.aws.amazon.com/cli/latest/reference/iam/list-policies.html:https://docs.aws.amazon.com/cli/latest/reference/iam/attach-role-policy.html:https://docs.aws.amazon.com/cli/latest/reference/iam/list-entities-for-policy.html","Description": "AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.","DefaultValue": null,"AuditProcedure": "**From Command Line:**1. List IAM policies, filter for the 'AWSSupportAccess' managed policy, and note the \"Arn\" element value: ``` aws iam list-policies --query \"Policies[?PolicyName == 'AWSSupportAccess']\" ``` 2. Check if the 'AWSSupportAccess' policy is attached to any role:``` aws iam list-entities-for-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess ```3. In Output, Ensure `PolicyRoles` does not return empty. 'Example: Example: PolicyRoles: [ ]'If it returns empty refer to the remediation below.","ImpactStatement": "All AWS Support plans include an unlimited number of account and billing support cases, with no long-term contracts. Support billing calculations are performed on a per-account basis for all plans. Enterprise Support plan customers have the option to include multiple enabled accounts in an aggregated monthly billing calculation. Monthly charges for the Business and Enterprise support plans are based on each month's AWS usage charges, subject to a monthly minimum, billed in advance.","AssessmentStatus": "Automated","RationaleStatement": "By implementing least privilege for access control, an IAM Role will require an appropriate IAM Policy to allow Support Center Access in order to manage Incidents with AWS Support.","RemediationProcedure": "**From Command Line:**1. Create an IAM role for managing incidents with AWS:- Create a trust relationship policy document that allows  to manage AWS incidents, and save it locally as /tmp/TrustPolicy.json: ```{\"Version\": \"2012-10-17\",\"Statement\": [{\"Effect\": \"Allow\",\"Principal\": {\"AWS\": \"\"},\"Action\": \"sts:AssumeRole\"}]} ``` 2. Create the IAM role using the above trust policy: ``` aws iam create-role --role-name  --assume-role-policy-document file:///tmp/TrustPolicy.json ``` 3. Attach 'AWSSupportAccess' managed policy to the created IAM role: ``` aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess --role-name  ```","AdditionalInformation": "AWSSupportAccess policy is a global AWS resource. It has same ARN as `arn:aws:iam::aws:policy/AWSSupportAccess` for every account."}],"description": "Ensure a support role has been created to manage incidents with AWS Support","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.18": {"name": "1.18","checks": {"ec2_instance_profile_attached": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html","Description": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. \"AWS Access\" means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.","DefaultValue": null,"AuditProcedure": "Where an instance is associated with a Role:For instances that are known to perform AWS actions, ensure that they belong to an instance role that has the necessary permissions:1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Open the EC2 Dashboard and choose \"Instances\" 3. Click the EC2 instance that performs AWS actions, in the lower pane details find \"IAM Role\" 4. If the Role is blank, the instance is not assigned to one. 5. If the Role is filled in, it does not mean the instance might not \\*also\\* have credentials encoded on it for some activities.Where an Instance Contains Embedded Credentials:- On the instance that is known to perform AWS actions, audit all scripts and environment variables to ensure that none of them contain AWS credentials.Where an Instance Application Contains Embedded Credentials:- Applications that run on an instance may also have credentials embedded. This is a bad practice, but even worse if the source code is stored in a public code repository such as github. When an application contains credentials can be determined by eliminating all other sources of credentials and if the application can still access AWS resources - it likely contains embedded credentials. Another method is to examine all source code and configuration files of the application.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "AWS IAM roles reduce the risks associated with sharing and rotating credentials that can be used outside of AWS itself. If credentials are compromised, they can be used from outside of the AWS account they give access to. In contrast, in order to leverage role permissions an attacker would need to gain and maintain access to a specific instance to use the privileges associated with it.Additionally, if credentials are encoded into compiled applications or other hard to change mechanisms, then they are even more unlikely to be properly rotated due to service disruption risks. As time goes on, credentials that cannot be rotated are more likely to be known by an increasing number of individuals who no longer work for the organization owning the credentials.","RemediationProcedure": "IAM roles can only be associated at the launch of an instance. To remediate an instance to add it to a role you must create a new instance.If the instance has no external dependencies on its current private ip or public addresses are elastic IPs:1. In AWS IAM create a new role. Assign a permissions policy if needed permissions are already known. 2. In the AWS console launch a new instance with identical settings to the existing instance, and ensure that the newly created role is selected. 3. Shutdown both the existing instance and the new instance. 4. Detach disks from both instances. 5. Attach the existing instance disks to the new instance. 6. Boot the new instance and you should have the same machine, but with the associated role.**Note:** if your environment has dependencies on a dynamically assigned PRIVATE IP address you can create an AMI from the existing instance, destroy the old one and then when launching from the AMI, manually assign the previous private IP address.**Note: **if your environment has dependencies on a dynamically assigned PUBLIC IP address there is not a way ensure the address is retained and assign an instance role. Dependencies on dynamically assigned public IP addresses are a bad practice and, if possible, you may wish to rebuild the instance with a new elastic IP address and make the investment to remediate affected systems while assigning the system to a role.","AdditionalInformation": ""}],"description": "Ensure IAM instance roles are used for AWS resource access from instances","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.19": {"name": "1.19","checks": {"iam_no_expired_server_certificates_stored": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html:https://docs.aws.amazon.com/cli/latest/reference/iam/delete-server-certificate.html","Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates.Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.","DefaultValue": null,"AuditProcedure": "**From Console:**Getting the certificates expiration information via AWS Management Console is not currently supported.To request information about the SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).**From Command Line:**Run list-server-certificates command to list all the IAM-stored server certificates:``` aws iam list-server-certificates ```The command output should return an array that contains all the SSL/TLS certificates currently stored in IAM and their metadata (name, ID, expiration date, etc):``` {\"ServerCertificateMetadataList\": [{\"ServerCertificateId\": \"EHDGFRW7EJFYTE88D\",\"ServerCertificateName\": \"MyServerCertificate\",\"Expiration\": \"2018-07-10T23:59:59Z\",\"Path\": \"/\",\"Arn\": \"arn:aws:iam::012345678910:server-certificate/MySSLCertificate\",\"UploadDate\": \"2018-06-10T11:56:08Z\"}] } ```Verify the `ServerCertificateName` and `Expiration` parameter value (expiration date) for each SSL/TLS certificate returned by the list-server-certificates command and determine if there are any expired server certificates currently stored in AWS IAM. If so, use the AWS API to remove them.If this command returns: ``` { { \"ServerCertificateMetadataList\": [] } ``` This means that there are no expired certificates, It DOES NOT mean that no certificates exist.","ImpactStatement": "Deleting the certificate could have implications for your application if you are using an expired server certificate with Elastic Load Balancing, CloudFront, etc. One has to make configurations at respective services to ensure there is no interruption in application functionality.","AssessmentStatus": "Automated","RationaleStatement": "Removing expired SSL/TLS certificates eliminates the risk that an invalid certificate will be deployed accidentally to a resource such as AWS Elastic Load Balancer (ELB), which can damage the credibility of the application/website behind the ELB. As a best practice, it is recommended to delete expired certificates.","RemediationProcedure": "**From Console:**Removing expired certificates via AWS Management Console is not currently supported. To delete SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).**From Command Line:**To delete Expired Certificate run following command by replacing  with the name of the certificate to delete:``` aws iam delete-server-certificate --server-certificate-name  ```When the preceding command is successful, it does not return any output.","AdditionalInformation": ""}],"description": "Ensure that all the expired SSL/TLS certificates stored in AWS IAM are removed","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.20": {"name": "1.20","checks": {"accessanalyzer_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/get-analyzer.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/create-analyzer.html","Description": "Enable IAM Access analyzer for IAM policies about all resources in each region.IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access. Access Analyzer analyzes only policies that are applied to resources in the same AWS Region.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Open the IAM console at `https://console.aws.amazon.com/iam/` 2. Choose `Access analyzer` 3. Click 'Analyzers' 4. Ensure that at least one analyzer is present 5. Ensure that the `STATUS` is set to `Active` 6. Repeat these step for each active region**From Command Line:**1. Run the following command: ``` aws accessanalyzer list-analyzers | grep status ``` 2. Ensure that at least one Analyzer the `status` is set to `ACTIVE`3. Repeat the steps above for each active region.If an Access analyzer is not listed for each region or the status is not set to active refer to the remediation procedure below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data. Access Analyzer identifies resources that are shared with external principals by using logic-based reasoning to analyze the resource-based policies in your AWS environment. IAM Access Analyzer continuously monitors all policies for S3 bucket, IAM roles, KMS(Key Management Service) keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues.","RemediationProcedure": "**From Console:**Perform the following to enable IAM Access analyzer for IAM policies:1. Open the IAM console at `https://console.aws.amazon.com/iam/.` 2. Choose `Access analyzer`. 3. Choose `Create analyzer`. 4. On the `Create analyzer` page, confirm that the `Region` displayed is the Region where you want to enable Access Analyzer. 5. Enter a name for the analyzer. `Optional as it will generate a name for you automatically`. 6. Add any tags that you want to apply to the analyzer. `Optional`.7. Choose `Create Analyzer`. 8. Repeat these step for each active region**From Command Line:**Run the following command: ``` aws accessanalyzer create-analyzer --analyzer-name  --type  ``` Repeat this command above for each active region.**Note:** The IAM Access Analyzer is successfully configured only when the account you use has the necessary permissions.","AdditionalInformation": ""}],"description": "Ensure that IAM Access analyzer is enabled for all regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.21": {"name": "1.21","checks": {"iam_check_saml_providers_sts": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "","Description": "In multi-account environments, IAM user centralization facilitates greater user control. User access beyond the initial account is then provided via role assumption. Centralization of users can be accomplished through federation with an external identity provider or through the use of AWS Organizations.","DefaultValue": null,"AuditProcedure": "For multi-account AWS environments with an external identity provider... 1. Determine the master account for identity federation or IAM user management 2. Login to that account through the AWS Management Console 3. Click `Services`4. Click `IAM`5. Click `Identity providers` 6. Verify the configurationThen..., determine all accounts that should not have local users present. For each account...1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click `Services`5. Click `IAM`6. Click `Users` 7. Confirm that no IAM users representing individuals are presentFor multi-account AWS environments implementing AWS Organizations without an external identity provider... 1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click `Services`5. Click `IAM`6. Click `Users` 7. Confirm that no IAM users representing individuals are present","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Centralizing IAM user management to a single identity store reduces complexity and thus the likelihood of access management errors.","RemediationProcedure": "The remediation procedure will vary based on the individual organization's implementation of identity federation and/or AWS Organizations with the acceptance criteria that no non-service IAM users, and non-root accounts, are present outside the account providing centralized IAM user management.","AdditionalInformation": ""}],"description": "Ensure IAM users are managed centrally via identity federation or AWS Organizations for multi-account environments","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.10": {"name": "3.10","checks": {"cloudtrail_s3_dataevents_write_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html","Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to CloudTrail dashboard at `https://console.aws.amazon.com/cloudtrail/` 2. In the left panel, click `Trails` and then click on the CloudTrail Name that you want to examine. 3. Review `General details` 4. Confirm that `Multi-region trail` is set to `Yes` 5. Scroll down to `Data events` 6. Confirm that it reads: Data events: S3 Bucket Name: All current and future S3 buckets Read: Enabled Write: Enabled 7. Repeat steps 2 to 6 to verify that Multi-region trail and Data events logging of S3 buckets in CloudTrail. If the CloudTrails do not have multi-region and data events configured for S3 refer to the remediation below.**From Command Line:**1. Run `list-trails` command to list the names of all Amazon CloudTrail trails currently available in all AWS regions: ``` aws cloudtrail list-trails ``` 2. The command output will be a list of all the trail names to include. \"TrailARN\": \"arn:aws:cloudtrail:::trail/\", \"Name\": \"\", \"HomeRegion\": \"\" 3. Next run 'get-trail- command to determine Multi-region. ``` aws cloudtrail get-trail --name  --region  ``` 4. The command output should include: \"IsMultiRegionTrail\": true, 5. Next run `get-event-selectors` command using the `Name` of the trail and the `region` returned in step 2 to determine if Data events logging feature is enabled within the selected CloudTrail trail for all S3 buckets: ``` aws cloudtrail get-event-selectors --region  --trail-name  --query EventSelectors[*].DataResources[] ``` 6. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. \"Type\": \"AWS::S3::Object\",\"Values\": [\"arn:aws:s3\" 7. If the `get-event-selectors` command returns an empty array '[]', the Data events are not included in the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. 8. Repeat steps 1 to 5 for auditing each CloudTrail to determine if Data events for S3 are covered. If Multi-region is not set to true and the Data events does not show S3 defined as shown refer to the remediation procedure below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity within your S3 Buckets using Amazon CloudWatch Events.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, check the `Write` event checkbox, so that `object-level` logging for Write events is enabled. 6. Repeat steps 2 to 5 to enable object-level logging of write events for other S3 buckets.**From Command Line:**1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier: ``` aws cloudtrail put-event-selectors --region  --trail-name  --event-selectors '[{ \"ReadWriteType\": \"WriteOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::/\"] }] }]' ``` 2. The command output will be `object-level` event trail configuration. 3. If you want to enable it for all buckets at once then change Values parameter to `[\"arn:aws:s3\"]` in command given above. 4. Repeat step 1 for each s3 bucket to update `object-level` logging of write events. 5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.","AdditionalInformation": ""}],"description": "Ensure that Object-level logging for write events is enabled for S3 bucket","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.11": {"name": "3.11","checks": {"cloudtrail_s3_dataevents_read_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html","Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. If the current status for `Object-level` logging is set to `Disabled`, then object-level logging of read events for the selected s3 bucket is not set. 5. If the current status for `Object-level` logging is set to `Enabled`, but the Read event check-box is unchecked, then object-level logging of read events for the selected s3 bucket is not set. 6. Repeat steps 2 to 5 to verify `object-level` logging for `read` events of your other S3 buckets.**From Command Line:** 1. Run `describe-trails` command to list the names of all Amazon CloudTrail trails currently available in the selected AWS region: ``` aws cloudtrail describe-trails --region  --output table --query trailList[*].Name ``` 2. The command output will be table of the requested trail names. 3. Run `get-event-selectors` command using the name of the trail returned at the previous step and custom query filters to determine if Data events logging feature is enabled within the selected CloudTrail trail configuration for s3 bucket resources: ``` aws cloudtrail get-event-selectors --region  --trail-name  --query EventSelectors[*].DataResources[] ``` 4. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. 5. If the `get-event-selectors` command returns an empty array, the Data events are not included into the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. 6. Repeat steps 1 to 5 for auditing each s3 bucket to identify other trails that are missing the capability to log Data events. 7. Change the AWS region by updating the `--region` command parameter and perform the audit process for other regions.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity using Amazon CloudWatch Events.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, check the Read event checkbox, so that `object-level` logging for `Read` events is enabled. 6. Repeat steps 2 to 5 to enable `object-level` logging of read events for other S3 buckets.**From Command Line:** 1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier: ``` aws cloudtrail put-event-selectors --region  --trail-name  --event-selectors '[{ \"ReadWriteType\": \"ReadOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::/\"] }] }]' ``` 2. The command output will be `object-level` event trail configuration. 3. If you want to enable it for all buckets at ones then change Values parameter to `[\"arn:aws:s3\"]` in command given above. 4. Repeat step 1 for each s3 bucket to update `object-level` logging of read events. 5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.","AdditionalInformation": ""}],"description": "Ensure that Object-level logging for read events is enabled for S3 bucket","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.10": {"name": "4.10","checks": {"cloudwatch_log_metric_filter_security_group_changes": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Security Groups are a stateful packet filter that controls ingress and egress traffic within a VPC. It is recommended that a metric filter and alarm be established for detecting changes to Security Groups.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName== '']\" ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to security group will help ensure that resources and services are not unintentionally exposed.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for security groups changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"\" --filter-name \"\" --metric-transformations metricName= \"\" ,metricNamespace=\"CISBenchmark\",metricValue=1 --filter-pattern \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name \"\" ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn \"\" --protocol  --notification-endpoint \"\" ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"\" --metric-name \"\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions \"\" ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for security group changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.11": {"name": "4.11","checks": {"cloudwatch_changes_to_network_acls_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets within a VPC. It is recommended that a metric filter and alarm be established for changes made to NACLs.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to NACLs will help ensure that AWS resources and services are not unintentionally exposed.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for NACL changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for changes to Network Access Control Lists (NACL)","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.12": {"name": "4.12","checks": {"cloudwatch_changes_to_network_gateways_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Network gateways are required to send/receive traffic to a destination outside of a VPC. It is recommended that a metric filter and alarm be established for changes to network gateways.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to network gateways will help ensure that all ingress/egress traffic traverses the VPC border via a controlled path.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for network gateways changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for changes to network gateways","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.13": {"name": "4.13","checks": {"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Routing tables are used to route network traffic between subnets and to network gateways. It is recommended that a metric filter and alarm be established for changes to route tables.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to route tables will help ensure that all VPC traffic flows through an expected path.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for route table changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for route table changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.14": {"name": "4.14","checks": {"cloudwatch_changes_to_vpcs_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is possible to have more than 1 VPC within an account, in addition it is also possible to create a peer connection between 2 VPCs enabling network traffic to route between VPCs. It is recommended that a metric filter and alarm be established for changes made to VPCs.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to VPC will help ensure VPC traffic flow is not getting impacted.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for VPC changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for VPC changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.15": {"name": "4.15","checks": {"cloudwatch_log_metric_filter_aws_organizations_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_security_incident-response.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for AWS Organizations changes made in the master AWS Account.","DefaultValue": null,"AuditProcedure": "1. Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: - Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails:``` aws cloudtrail describe-trails ``` - Identify Multi region Cloudtrails, Trails with `\"IsMultiRegionTrail\"` set to true - From value associated with CloudWatchLogsLogGroupArn note **Example:** for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:::log-group:NewGroup:*,  would be NewGroup- Ensure Identified Multi region CloudTrail is active: ``` aws cloudtrail get-trail-status --name  ``` Ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events: ``` aws cloudtrail get-event-selectors --trail-name  ``` - Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to true and `ReadWriteType` set to `All`.2. Get a list of all associated metric filters for this : ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }\" ``` 4. Note the `` value associated with the filterPattern found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4: ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the AlarmActions value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic: ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. Example of valid \"SubscriptionArn\":``` \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring AWS Organizations changes can help you prevent any unwanted, accidental or intentional modifications that may lead to unauthorized access or other security breaches. This monitoring technique helps you to ensure that any unexpected changes performed within your AWS Organizations can be investigated and any unwanted changes can be rolled back.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Organizations changes and the `` taken from audit step 1: ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }' ``` **Note:** You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify: ``` aws sns create-topic --name  ``` **Note:** you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2: ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note:** you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2: ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": ""}],"description": "Ensure a log metric filter and alarm exists for AWS Organizations changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.16": {"name": "4.16","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-get-started.html:https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-enable.html#securityhub-enable-api:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/securityhub/enable-security-hub.html","Description": "Security Hub collects security data from across AWS accounts, services, and supported third-party partner products and helps you analyze your security trends and identify the highest priority security issues. When you enable Security Hub, it begins to consume, aggregate, organize, and prioritize findings from AWS services that you have enabled, such as Amazon GuardDuty, Amazon Inspector, and Amazon Macie. You can also enable integrations with AWS partner security products.","DefaultValue": null,"AuditProcedure": "The process to evaluate AWS Security Hub configuration per region **From Console:**1. Sign in to the AWS Management Console and open the AWS Security Hub console at https://console.aws.amazon.com/securityhub/. 2. On the top right of the console, select the target Region. 3. If presented with the Security Hub > Summary page then Security Hub is set-up for the selected region. 4. If presented with Setup Security Hub or Get Started With Security Hub - follow the online instructions. 5. Repeat steps 2 to 4 for each region.","ImpactStatement": "It is recommended AWS Security Hub be enabled in all regions. AWS Security Hub requires AWS Config to be enabled.","AssessmentStatus": "Automated","RationaleStatement": "AWS Security Hub provides you with a comprehensive view of your security state in AWS and helps you check your environment against security industry standards and best practices - enabling you to quickly assess the security posture across your AWS accounts.","RemediationProcedure": "To grant the permissions required to enable Security Hub, attach the Security Hub managed policy AWSSecurityHubFullAccess to an IAM user, group, or role.Enabling Security Hub**From Console:**1. Use the credentials of the IAM identity to sign in to the Security Hub console. 2. When you open the Security Hub console for the first time, choose Enable AWS Security Hub. 3. On the welcome page, Security standards list the security standards that Security Hub supports. 4. Choose Enable Security Hub.**From Command Line:**1. Run the enable-security-hub command. To enable the default standards, include `--enable-default-standards`. ``` aws securityhub enable-security-hub --enable-default-standards ```2. To enable the security hub without the default standards, include `--no-enable-default-standards`. ``` aws securityhub enable-security-hub --no-enable-default-standards ```","AdditionalInformation": ""}],"description": "Ensure AWS Security Hub is enabled","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.1.1": {"name": "2.1.1","checks": {"s3_bucket_default_encryption": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "2.1. Simple Storage Service (S3)","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/default-bucket-encryption.html:https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-related-resources","Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select a Bucket. 3. Click on 'Properties'. 4. Verify that `Default Encryption` is enabled, and displays either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`. 5. Repeat for all the buckets in your AWS account.**From Command Line:**1. Run command to list buckets ``` aws s3 ls ``` 2. For each bucket, run``` aws s3api get-bucket-encryption --bucket  ``` 3. Verify that either``` \"SSEAlgorithm\": \"AES256\" ```or``` \"SSEAlgorithm\": \"aws:kms\"```is displayed.","ImpactStatement": "Amazon S3 buckets with default bucket encryption using SSE-KMS cannot be used as destination buckets for Amazon S3 server access logging. Only SSE-S3 default encryption is supported for server access log destination buckets.","AssessmentStatus": "Automated","RationaleStatement": "Encrypting data at rest reduces the likelihood that it is unintentionally exposed and can nullify the impact of disclosure if the encryption remains unbroken.","RemediationProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select a Bucket. 3. Click on 'Properties'. 4. Click edit on `Default Encryption`. 5. Select either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`. 6. Click `Save` 7. Repeat for all the buckets in your AWS account lacking encryption.**From Command Line:**Run either``` aws s3api put-bucket-encryption --bucket  --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"AES256\"}}]}' ```or``` aws s3api put-bucket-encryption --bucket  --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"aws:kms\",\"KMSMasterKeyID\": \"aws/s3\"}}]}' ```**Note:** the KMSMasterKeyID can be set to the master key of your choosing; aws/s3 is an AWS preconfigured default.","AdditionalInformation": "S3 bucket encryption only applies to objects as they are placed in the bucket. Enabling S3 bucket encryption does **not** encrypt objects previously stored within the bucket."}],"description": "Ensure all S3 buckets employ encryption-at-rest","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.1.2": {"name": "2.1.2","checks": {"s3_bucket_secure_transport_policy": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "2.1. Simple Storage Service (S3)","References": "https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-policy-for-config-rule/:https://aws.amazon.com/blogs/security/how-to-use-bucket-policies-and-apply-defense-in-depth-to-help-secure-your-amazon-s3-data/:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/get-bucket-policy.html","Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.","DefaultValue": null,"AuditProcedure": "To allow access to HTTPS you can use a condition that checks for the key `\"aws:SecureTransport: true\"`. This means that the request is sent through HTTPS but that HTTP can still be used. So to make sure you do not allow HTTP access confirm that there is a bucket policy that explicitly denies access for HTTP requests and that it contains the key \"aws:SecureTransport\": \"false\".**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions', then Click on `Bucket Policy`. 4. Ensure that a policy is listed that matches: ``` '{\"Sid\": ,\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}' ``` `` and `` will be specific to your account5. Repeat for all the buckets in your AWS account.**From Command Line:**1. List all of the S3 Buckets``` aws s3 ls ``` 2. Using the list of buckets run this command on each of them: ``` aws s3api get-bucket-policy --bucket  | grep aws:SecureTransport ``` 3. Confirm that `aws:SecureTransport` is set to false `aws:SecureTransport:false` 4. Confirm that the policy line has Effect set to Deny 'Effect:Deny'","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "By default, Amazon S3 allows both HTTP and HTTPS requests. To achieve only allowing access to Amazon S3 objects through HTTPS you also have to explicitly deny access to HTTP requests. Bucket policies that allow HTTPS requests without explicitly denying HTTP requests will not comply with this recommendation.","RemediationProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions'. 4. Click 'Bucket Policy' 5. Add this to the existing policy filling in the required information ``` {\"Sid\": \",\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}}} ``` 6. Save 7. Repeat for all the buckets in your AWS account that contain sensitive data.**From Console** using AWS Policy Generator:1. Repeat steps 1-4 above. 2. Click on `Policy Generator` at the bottom of the Bucket Policy Editor 3. Select Policy Type `S3 Bucket Policy` 4. Add Statements - `Effect` = Deny - `Principal` = * - `AWS Service` = Amazon S3 - `Actions` = * - `Amazon Resource Name` =  5. Generate Policy 6. Copy the text and add it to the Bucket Policy.**From Command Line:**1. Export the bucket policy to a json file. ``` aws s3api get-bucket-policy --bucket  --query Policy --output text > policy.json ```2. Modify the policy.json file by adding in this statement: ``` {\"Sid\": \",\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}}} ``` 3. Apply this modified policy back to the S3 bucket: ``` aws s3api put-bucket-policy --bucket  --policy file://policy.json ```","AdditionalInformation": ""}],"description": "Ensure S3 Bucket Policy is set to deny HTTP requests","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"2.1.3": {"name": "2.1.3","checks": {"s3_bucket_no_mfa_delete": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "2.1. Simple Storage Service (S3)","References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete:https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html:https://aws.amazon.com/blogs/security/securing-access-to-aws-using-mfa-part-3/:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_lost-or-broken.html","Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.","DefaultValue": null,"AuditProcedure": "Perform the steps below to confirm MFA delete is configured on an S3 Bucket**From Console:**1. Login to the S3 console at `https://console.aws.amazon.com/s3/`2. Click the `Check` box next to the Bucket name you want to confirm3. In the window under `Properties`4. Confirm that Versioning is `Enabled`5. Confirm that MFA Delete is `Enabled`**From Command Line:**1. Run the `get-bucket-versioning` ``` aws s3api get-bucket-versioning --bucket my-bucket ```Output example: ```  EnabledEnabled ```If the Console or the CLI output does not show Versioning and MFA Delete `enabled` refer to the remediation below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Adding MFA delete to an S3 bucket, requires additional authentication when you change the version state of your bucket or you delete and object version adding another layer of security in the event your security credentials are compromised or unauthorized access is granted.","RemediationProcedure": "Perform the steps below to enable MFA delete on an S3 bucket.Note: -You cannot enable MFA Delete using the AWS Management Console. You must use the AWS CLI or API. -You must use your 'root' account to enable MFA Delete on S3 buckets.**From Command line:**1. Run the s3api put-bucket-versioning command``` aws s3api put-bucket-versioning --profile my-root-profile --bucket Bucket_Name --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa โ€œarn:aws:iam::aws_account_id:mfa/root-account-mfa-device passcodeโ€ ```","AdditionalInformation": ""}],"description": "Ensure MFA Delete is enabled on S3 buckets","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"2.1.4": {"name": "2.1.4","checks": {"macie_is_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "2.1. Simple Storage Service (S3)","References": "https://aws.amazon.com/macie/getting-started/:https://docs.aws.amazon.com/workspaces/latest/adminguide/data-protection.html:https://docs.aws.amazon.com/macie/latest/user/data-classification.html","Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.","DefaultValue": null,"AuditProcedure": "Perform the following steps to determine if Macie is running:**From Console:** 1. Login to the Macie console at https://console.aws.amazon.com/macie/ 2. In the left hand pane click on By job under findings. 3. Confirm that you have a Job setup for your S3 BucketsWhen you log into the Macie console if you aren't taken to the summary page and you don't have a job setup and running then refer to the remediation procedure below.If you are using a 3rd Party tool to manage and protect your s3 data you meet this recommendation.","ImpactStatement": "There is a cost associated with using Amazon Macie. There is also typically a cost associated with 3rd Party tools that perform similar processes and protection.","AssessmentStatus": "Manual","RationaleStatement": "Using a Cloud service or 3rd Party software to continuously monitor and automate the process of data discovery and classification for S3 buckets using machine learning and pattern matching is a strong defense in protecting that information.Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS.","RemediationProcedure": "Perform the steps below to enable and configure Amazon Macie**From Console:**1. Log on to the Macie console at `https://console.aws.amazon.com/macie/`2. Click `Get started`.3. Click `Enable Macie`.Setup a repository for sensitive data discovery results1. In the Left pane, under Settings, click `Discovery results`.2. Make sure `Create bucket` is selected.3. Create a bucket, enter a name for the bucket. The name must be unique across all S3 buckets. In addition, the name must start with a lowercase letter or a number.4. Click on `Advanced`.5. Block all public access, make sure `Yes` is selected.6. KMS encryption, specify the AWS KMS key that you want to use to encrypt the results. The key must be a symmetric, customer master key (CMK) that's in the same Region as the S3 bucket.7. Click on `Save`Create a job to discover sensitive data1. In the left pane, click `S3 buckets`. Macie displays a list of all the S3 buckets for your account.2. Select the `check box` for each bucket that you want Macie to analyze as part of the job3. Click `Create job`.3. Click `Quick create`.4. For the Name and description step, enter a name and, optionally, a description of the job.5. Then click `Next`.6. For the Review and create step, click `Submit`.Review your findings1. In the left pane, click `Findings`.2. To view the details of a specific finding, choose any field other than the check box for the finding.If you are using a 3rd Party tool to manage and protect your s3 data, follow the Vendor documentation for implementing and configuring that tool.","AdditionalInformation": ""}],"description": "Ensure all data in Amazon S3 has been discovered, classified and secured when required.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.1.5": {"name": "2.1.5","checks": {"s3_bucket_level_public_access_block": "PASS","s3_account_level_public_access_blocks": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.1. Simple Storage Service (S3)","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/block-public-access-account.html","Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.","DefaultValue": null,"AuditProcedure": "**If utilizing Block Public Access (bucket settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Ensure that block public access settings are set appropriately for this bucket 5. Repeat for all the buckets in your AWS account.**From Command Line:**1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Find the public access setting on that bucket ``` aws s3api get-public-access-block --bucket  ``` Output if Block Public access is enabled:``` {\"PublicAccessBlockConfiguration\": {\"BlockPublicAcls\": true,\"IgnorePublicAcls\": true,\"BlockPublicPolicy\": true,\"RestrictPublicBuckets\": true} } ```If the output reads `false` for the separate configuration settings then proceed to the remediation.**If utilizing Block Public Access (account settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Choose `Block public access (account settings)` 3. Ensure that block public access settings are set appropriately for your AWS account.**From Command Line:**To check Public access settings for this account status, run the following command, `aws s3control get-public-access-block --account-id  --region `Output if Block Public access is enabled:``` {\"PublicAccessBlockConfiguration\": {\"IgnorePublicAcls\": true, \"BlockPublicPolicy\": true, \"BlockPublicAcls\": true, \"RestrictPublicBuckets\": true} } ```If the output reads `false` for the separate configuration settings then proceed to the remediation.","ImpactStatement": "When you apply Block Public Access settings to an account, the settings apply to all AWS Regions globally. The settings might not take effect in all Regions immediately or simultaneously, but they eventually propagate to all Regions.","AssessmentStatus": "Automated","RationaleStatement": "Amazon S3 `Block public access (bucket settings)` prevents the accidental or malicious public exposure of data contained within the respective bucket(s). Amazon S3 `Block public access (account settings)` prevents the accidental or malicious public exposure of data contained within all buckets of the respective AWS account.Whether blocking public access to all or some buckets is an organizational decision that should be based on data sensitivity, least privilege, and use case.","RemediationProcedure": "**If utilizing Block Public Access (bucket settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Click 'Block all public access' 5. Repeat for all the buckets in your AWS account that contain sensitive data.**From Command Line:**1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Set the Block Public Access to true on that bucket ``` aws s3api put-public-access-block --bucket  --public-access-block-configuration \"BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true\" ```**If utilizing Block Public Access (account settings)****From Console:**If the output reads `true` for the separate configuration settings then it is set on the account.1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Choose `Block Public Access (account settings)` 3. Choose `Edit` to change the block public access settings for all the buckets in your AWS account 4. Choose the settings you want to change, and then choose `Save`. For details about each setting, pause on the `i` icons. 5. When you're asked for confirmation, enter `confirm`. Then Click `Confirm` to save your changes.**From Command Line:**To set Block Public access settings for this account, run the following command: ``` aws s3control put-public-access-block --public-access-block-configuration BlockPublicAcls=true, IgnorePublicAcls=true, BlockPublicPolicy=true, RestrictPublicBuckets=true --account-id  ```","AdditionalInformation": ""}],"description": "Ensure that S3 Buckets are configured with 'Block public access (bucket settings)'","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"2.2.1": {"name": "2.2.1","checks": {"ec2_ebs_volume_encryption": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.2. Elastic Compute Cloud (EC2)","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html:https://aws.amazon.com/blogs/aws/new-opt-in-to-default-encryption-for-new-ebs-volumes/","Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/2. Under `Account attributes`, click `EBS encryption`. 3. Verify `Always encrypt new EBS volumes` displays `Enabled`. 4. Review every region in-use.**Note:** EBS volume encryption is configured per region.**From Command Line:**1. Run``` aws --region  ec2 get-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Review every region in-use.**Note:** EBS volume encryption is configured per region.","ImpactStatement": "Losing access or removing the KMS key in use by the EBS volumes will result in no longer being able to access the volumes.","AssessmentStatus": "Automated","RationaleStatement": "Encrypting data at rest reduces the likelihood that it is unintentionally exposed and can nullify the impact of disclosure if the encryption remains unbroken.","RemediationProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/2. Under `Account attributes`, click `EBS encryption`. 3. Click `Manage`. 4. Click the `Enable` checkbox. 5. Click `Update EBS encryption` 6. Repeat for every region requiring the change.**Note:** EBS volume encryption is configured per region.**From Command Line:**1. Run``` aws --region  ec2 enable-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Repeat every region requiring the change.**Note:** EBS volume encryption is configured per region.","AdditionalInformation": "Default EBS volume encryption only applies to newly created EBS volumes. Existing EBS volumes are **not** converted automatically."}],"description": "Ensure EBS Volume Encryption is Enabled in all Regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.3.1": {"name": "2.3.1","checks": {"rds_instance_storage_encrypted": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "2.3. Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html:https://aws.amazon.com/blogs/database/selecting-the-right-encryption-options-for-amazon-rds-and-amazon-aurora-database-engines/#:~:text=With%20RDS%2Dencrypted%20resources%2C%20data,transparent%20to%20your%20database%20engine.:https://aws.amazon.com/rds/features/security/","Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/ 2. In the navigation pane, under RDS dashboard, click `Databases`. 3. Select the RDS Instance that you want to examine 4. Click `Instance Name` to see details, then click on `Configuration` tab. 5. Under Configuration Details section, In Storage pane search for the `Encryption Enabled` Status. 6. If the current status is set to `Disabled`, Encryption is not enabled for the selected RDS Instance database instance. 7. Repeat steps 3 to 7 to verify encryption status of other RDS Instance in same region. 8. Change region from the top of the navigation bar and repeat audit for other regions.**From Command Line:**1. Run `describe-db-instances` command to list all RDS Instance database names, available in the selected AWS region, Output will return each Instance database identifier-name.``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run again `describe-db-instances` command using the RDS Instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True` Or `False`. ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].StorageEncrypted' ``` 3. If the StorageEncrypted parameter value is `False`, Encryption is not enabled for the selected RDS database instance. 4. Repeat steps 1 to 3 for auditing each RDS Instance and change Region to verify for other regions","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Databases are likely to hold sensitive and critical data, it is highly recommended to implement encryption in order to protect your data from unauthorized access or disclosure. With RDS encryption enabled, the data stored on the instance's underlying storage, the automated backups, read replicas, and snapshots, are all encrypted.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases` 3. Select the Database instance that needs to be encrypted. 4. Click on `Actions` button placed at the top right and select `Take Snapshot`. 5. On the Take Snapshot page, enter a database name of which you want to take a snapshot in the `Snapshot Name` field and click on `Take Snapshot`. 6. Select the newly created snapshot and click on the `Action` button placed at the top right and select `Copy snapshot` from the Action menu. 7. On the Make Copy of DB Snapshot page, perform the following:- In the New DB Snapshot Identifier field, Enter a name for the `new snapshot`. - Check `Copy Tags`, New snapshot must have the same tags as the source snapshot. - Select `Yes` from the `Enable Encryption` dropdown list to enable encryption, You can choose to use the AWS default encryption key or custom key from Master Key dropdown list.8. Click `Copy Snapshot` to create an encrypted copy of the selected instance snapshot. 9. Select the new Snapshot Encrypted Copy and click on the `Action` button placed at the top right and select `Restore Snapshot` button from the Action menu, This will restore the encrypted snapshot to a new database instance. 10. On the Restore DB Instance page, enter a unique name for the new database instance in the DB Instance Identifier field. 11. Review the instance configuration details and click `Restore DB Instance`. 12. As the new instance provisioning process is completed can update application configuration to refer to the endpoint of the new Encrypted database instance Once the database endpoint is changed at the application level, can remove the unencrypted instance.**From Command Line:**1. Run `describe-db-instances` command to list all RDS database names available in the selected AWS region, The command output should return the database instance identifier. ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run `create-db-snapshot` command to create a snapshot for the selected database instance, The command output will return the `new snapshot` with name DB Snapshot Name. ``` aws rds create-db-snapshot --region  --db-snapshot-identifier  --db-instance-identifier  ``` 3. Now run `list-aliases` command to list the KMS keys aliases available in a specified region, The command output should return each `key alias currently available`. For our RDS encryption activation process, locate the ID of the AWS default KMS key. ``` aws kms list-aliases --region  ``` 4. Run `copy-db-snapshot` command using the default KMS key ID for RDS instances returned earlier to create an encrypted copy of the database instance snapshot, The command output will return the `encrypted instance snapshot configuration`. ``` aws rds copy-db-snapshot --region  --source-db-snapshot-identifier  --target-db-snapshot-identifier  --copy-tags --kms-key-id  ``` 5. Run `restore-db-instance-from-db-snapshot` command to restore the encrypted snapshot created at the previous step to a new database instance, If successful, the command output should return the new encrypted database instance configuration. ``` aws rds restore-db-instance-from-db-snapshot --region  --db-instance-identifier  --db-snapshot-identifier  ``` 6. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region, Output will return database instance identifier name Select encrypted database name that we just created DB-Name-Encrypted. ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 7. Run again `describe-db-instances` command using the RDS instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True`. ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].StorageEncrypted' ```","AdditionalInformation": ""}],"description": "Ensure that encryption is enabled for RDS Instances","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"2.3.2": {"name": "2.3.2","checks": {"rds_instance_minor_version_upgrade_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.3. Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_RDS_Managing.html:https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Upgrading.html:https://aws.amazon.com/rds/faqs/","Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases`. 3. Select the RDS instance that wants to examine. 4. Click on the `Maintenance and backups` panel. 5. Under the `Maintenance` section, search for the Auto Minor Version Upgrade status. - If the current status is set to `Disabled`, means the feature is not set and the minor engine upgrades released will not be applied to the selected RDS instance**From Command Line:**1. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region: ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance identifier. 3. Run again `describe-db-instances` command using the RDS instance identifier returned earlier to determine the Auto Minor Version Upgrade status for the selected instance: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].AutoMinorVersionUpgrade' ``` 4. The command output should return the feature current status. If the current status is set to `true`, the feature is enabled and the minor engine upgrades will be applied to the selected RDS instance.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "AWS RDS will occasionally deprecate minor engine versions and provide new ones for an upgrade. When the last version number within the release is replaced, the version changed is considered minor. With Auto Minor Version Upgrade feature enabled, the version upgrades will occur automatically during the specified maintenance window so your RDS instances can get the new features, bug fixes, and security patches for their database engines.","RemediationProcedure": "**From Console:**1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases`. 3. Select the RDS instance that wants to update. 4. Click on the `Modify` button placed on the top right side. 5. On the `Modify DB Instance: ` page, In the `Maintenance` section, select `Auto minor version upgrade` click on the `Yes` radio button. 6. At the bottom of the page click on `Continue`, check to Apply Immediately to apply the changes immediately, or select `Apply during the next scheduled maintenance window` to avoid any downtime. 7. Review the changes and click on `Modify DB Instance`. The instance status should change from available to modifying and back to available. Once the feature is enabled, the `Auto Minor Version Upgrade` status should change to `Yes`.**From Command Line:**1. Run `describe-db-instances` command to list all RDS database instance names, available in the selected AWS region: ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance identifier. 3. Run the `modify-db-instance` command to modify the selected RDS instance configuration this command will apply the changes immediately, Remove `--apply-immediately` to apply changes during the next scheduled maintenance window and avoid any downtime: ``` aws rds modify-db-instance --region  --db-instance-identifier  --auto-minor-version-upgrade --apply-immediately ``` 4. The command output should reveal the new configuration metadata for the RDS instance and check `AutoMinorVersionUpgrade` parameter value. 5. Run `describe-db-instances` command to check if the Auto Minor Version Upgrade feature has been successfully enable: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].AutoMinorVersionUpgrade' ``` 6. The command output should return the feature current status set to `true`, the feature is `enabled` and the minor engine upgrades will be applied to the selected RDS instance.","AdditionalInformation": ""}],"description": "Ensure Auto Minor Version Upgrade feature is Enabled for RDS Instances","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.3.3": {"name": "2.3.3","checks": {"rds_instance_no_public_access": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.3. Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.html:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html:https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html:https://aws.amazon.com/rds/faqs/","Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. Under the navigation panel, On RDS Dashboard, click `Databases`. 3. Select the RDS instance that you want to examine. 4. Click `Instance Name` from the dashboard, Under `Connectivity and Security. 5. On the `Security`, check if the Publicly Accessible flag status is set to `Yes`, follow the below-mentioned steps to check database subnet access. - In the `networking` section, click the subnet link available under `Subnets` - The link will redirect you to the VPC Subnets page. - Select the subnet listed on the page and click the `Route Table` tab from the dashboard bottom panel. If the route table contains any entries with the destination `CIDR block set to 0.0.0.0/0` and with an `Internet Gateway` attached. - The selected RDS database instance was provisioned inside a public subnet, therefore is not running within a logically isolated environment and can be accessible from the Internet. 6. Repeat steps no. 4 and 5 to determine the type (public or private) and subnet for other RDS database instances provisioned in the current region. 8. Change the AWS region from the navigation bar and repeat the audit process for other regions.**From Command Line:**1. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region: ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance `identifier`. 3. Run again `describe-db-instances` command using the `PubliclyAccessible` parameter as query filter to reveal the database instance Publicly Accessible flag status: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].PubliclyAccessible' ``` 4. Check for the Publicly Accessible parameter status, If the Publicly Accessible flag is set to `Yes`. Then selected RDS database instance is publicly accessible and insecure, follow the below-mentioned steps to check database subnet access 5. Run again `describe-db-instances` command using the RDS database instance identifier that you want to check and appropriate filtering to describe the VPC subnet(s) associated with the selected instance: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].DBSubnetGroup.Subnets[]' ``` - The command output should list the subnets available in the selected database subnet group. 6. Run `describe-route-tables` command using the ID of the subnet returned at the previous step to describe the routes of the VPC route table associated with the selected subnet: ``` aws ec2 describe-route-tables --region  --filters \"Name=association.subnet-id,Values=\" --query 'RouteTables[*].Routes[]' ``` - If the command returns the route table associated with database instance subnet ID. Check the `GatewayId` and `DestinationCidrBlock` attributes values returned in the output. If the route table contains any entries with the `GatewayId` value set to `igw-xxxxxxxx` and the `DestinationCidrBlock` value set to `0.0.0.0/0`, the selected RDS database instance was provisioned inside a public subnet. - Or - If the command returns empty results, the route table is implicitly associated with subnet, therefore the audit process continues with the next step 7. Run again `describe-db-instances` command using the RDS database instance identifier that you want to check and appropriate filtering to describe the VPC ID associated with the selected instance: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].DBSubnetGroup.VpcId' ``` - The command output should show the VPC ID in the selected database subnet group 8. Now run `describe-route-tables` command using the ID of the VPC returned at the previous step to describe the routes of the VPC main route table implicitly associated with the selected subnet: ``` aws ec2 describe-route-tables --region  --filters \"Name=vpc-id,Values=\" \"Name=association.main,Values=true\" --query 'RouteTables[*].Routes[]' ``` - The command output returns the VPC main route table implicitly associated with database instance subnet ID. Check the `GatewayId` and `DestinationCidrBlock` attributes values returned in the output. If the route table contains any entries with the `GatewayId` value set to `igw-xxxxxxxx` and the `DestinationCidrBlock` value set to `0.0.0.0/0`, the selected RDS database instance was provisioned inside a public subnet, therefore is not running within a logically isolated environment and does not adhere to AWS security best practices.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Ensure that no public-facing RDS database instances are provisioned in your AWS account and restrict unauthorized access in order to minimize security risks. When the RDS instance allows unrestricted access (0.0.0.0/0), everyone and everything on the Internet can establish a connection to your database and this can increase the opportunity for malicious activities such as brute force attacks, PostgreSQL injections, or DoS/DDoS attacks.","RemediationProcedure": "**From Console:**1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. Under the navigation panel, On RDS Dashboard, click `Databases`. 3. Select the RDS instance that you want to update. 4. Click `Modify` from the dashboard top menu. 5. On the Modify DB Instance panel, under the `Connectivity` section, click on `Additional connectivity configuration` and update the value for `Publicly Accessible` to Not publicly accessible to restrict public access. Follow the below steps to update subnet configurations: - Select the `Connectivity and security` tab, and click on the VPC attribute value inside the `Networking` section. - Select the `Details` tab from the VPC dashboard bottom panel and click on Route table configuration attribute value. - On the Route table details page, select the Routes tab from the dashboard bottom panel and click on `Edit routes`. - On the Edit routes page, update the Destination of Target which is set to `igw-xxxxx` and click on `Save` routes. 6. On the Modify DB Instance panel Click on `Continue` and In the Scheduling of modifications section, perform one of the following actions based on your requirements: - Select Apply during the next scheduled maintenance window to apply the changes automatically during the next scheduled maintenance window. - Select Apply immediately to apply the changes right away. With this option, any pending modifications will be asynchronously applied as soon as possible, regardless of the maintenance window setting for this RDS database instance. Note that any changes available in the pending modifications queue are also applied. If any of the pending modifications require downtime, choosing this option can cause unexpected downtime for the application. 7. Repeat steps 3 to 6 for each RDS instance available in the current region. 8. Change the AWS region from the navigation bar to repeat the process for other regions.**From Command Line:**1. Run `describe-db-instances` command to list all RDS database names identifiers, available in the selected AWS region: ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance identifier. 3. Run `modify-db-instance` command to modify the selected RDS instance configuration. Then use the following command to disable the `Publicly Accessible` flag for the selected RDS instances. This command use the apply-immediately flag. If you want `to avoid any downtime --no-apply-immediately flag can be used`: ``` aws rds modify-db-instance --region  --db-instance-identifier  --no-publicly-accessible --apply-immediately ``` 4. The command output should reveal the `PubliclyAccessible` configuration under pending values and should get applied at the specified time. 5. Updating the Internet Gateway Destination via AWS CLI is not currently supported To update information about Internet Gateway use the AWS Console Procedure. 6. Repeat steps 1 to 5 for each RDS instance provisioned in the current region. 7. Change the AWS region by using the --region filter to repeat the process for other regions.","AdditionalInformation": ""}],"description": "Ensure that public access is not given to RDS Instance","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.4.1": {"name": "2.4.1","checks": {"efs_encryption_at_rest_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "2.4 Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/efs/latest/ug/encryption-at-rest.html:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/efs/index.html#efs","Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).","DefaultValue": null,"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console and Navigate to `Elastic File System (EFS) dashboard. 2. Select `File Systems` from the left navigation panel. 3. Each item on the list has a visible Encrypted field that displays data at rest encryption status. 4. Validate that this field reads `Encrypted` for all EFS file systems in all AWS regions.**From CLI:** 1. Run describe-file-systems command using custom query filters to list the identifiers of all AWS EFS file systems currently available within the selected region: ``` aws efs describe-file-systems --region  --output table --query 'FileSystems[*].FileSystemId' ``` 2. The command output should return a table with the requested file system IDs. 3. Run describe-file-systems command using the ID of the file system that you want to examine as identifier and the necessary query filters: ``` aws efs describe-file-systems --region  --file-system-id  --query 'FileSystems[*].Encrypted' ``` 4. The command output should return the file system encryption status true or false. If the returned value is `false`, the selected AWS EFS file system is not encrypted and if the returned value is `true`, the selected AWS EFS file system is encrypted.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Data should be encrypted at rest to reduce the risk of a data breach via direct access to the storage device.","RemediationProcedure": "**It is important to note that EFS file system data at rest encryption must be turned on when creating the file system.**If an EFS file system has been created without data at rest encryption enabled then you must create another EFS file system with the correct configuration and transfer the data.**Steps to create an EFS file system with data encrypted at rest:****From Console:** 1. Login to the AWS Management Console and Navigate to `Elastic File System (EFS)` dashboard. 2. Select `File Systems` from the left navigation panel. 3. Click `Create File System` button from the dashboard top menu to start the file system setup process. 4. On the `Configure file system access` configuration page, perform the following actions. - Choose the right VPC from the VPC dropdown list. - Within Create mount targets section, select the checkboxes for all of the Availability Zones (AZs) within the selected VPC. These will be your mount targets. - Click `Next step` to continue.5. Perform the following on the `Configure optional settings` page. - Create `tags` to describe your new file system. - Choose `performance mode` based on your requirements. - Check `Enable encryption` checkbox and choose `aws/elasticfilesystem` from Select KMS master key dropdown list to enable encryption for the new file system using the default master key provided and managed by AWS KMS. - Click `Next step` to continue.6. Review the file system configuration details on the `review and create` page and then click `Create File System` to create your new AWS EFS file system. 7. Copy the data from the old unencrypted EFS file system onto the newly create encrypted file system. 8. Remove the unencrypted file system as soon as your data migration to the newly create encrypted file system is completed. 9. Change the AWS region from the navigation bar and repeat the entire process for other aws regions.**From CLI:** 1. Run describe-file-systems command to describe the configuration information available for the selected (unencrypted) file system (see Audit section to identify the right resource): ``` aws efs describe-file-systems --region  --file-system-id  ``` 2. The command output should return the requested configuration information. 3. To provision a new AWS EFS file system, you need to generate a universally unique identifier (UUID) in order to create the token required by the create-file-system command. To create the required token, you can use a randomly generated UUID from \"https://www.uuidgenerator.net\". 4. Run create-file-system command using the unique token created at the previous step. ``` aws efs create-file-system --region  --creation-token  --performance-mode generalPurpose --encrypted ``` 5. The command output should return the new file system configuration metadata. 6. Run create-mount-target command using the newly created EFS file system ID returned at the previous step as identifier and the ID of the Availability Zone (AZ) that will represent the mount target: ``` aws efs create-mount-target --region  --file-system-id  --subnet-id  ``` 7. The command output should return the new mount target metadata. 8. Now you can mount your file system from an EC2 instance. 9. Copy the data from the old unencrypted EFS file system onto the newly create encrypted file system. 10. Remove the unencrypted file system as soon as your data migration to the newly create encrypted file system is completed. ``` aws efs delete-file-system --region  --file-system-id  ``` 11. Change the AWS region by updating the --region and repeat the entire process for other aws regions.","AdditionalInformation": ""}],"description": "Ensure that encryption is enabled for EFS file systems","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}}},"requirements_passed": 52,"requirements_failed": 11,"requirements_manual": 0,"total_requirements": 63,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "407c3a08-81aa-4d24-9aca-46a0904f4b1d","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "aws_account_security_onboarding_aws","framework": "AWS-Account-Security-Onboarding","version": "","description": "Checklist when onboarding new AWS Accounts to existing AWS Organization.","region": "eu-west-1","requirements": {"S3 protection": {"name": "S3 protection","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "Enable GuardDuty","Service": "S3","SubGroup": null,"SubSection": null}],"description": "Protection using S3","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"RDS protection": {"name": "RDS protection","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "Enable GuardDuty","Service": "GuardDuty","SubGroup": null,"SubSection": null}],"description": "Protection for RDS instances","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"Block root user": {"name": "Block root user","checks": {"iam_avoid_root_usage": null,"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "SCPs","Service": "IAM","SubGroup": null,"SubSection": "Apply existing SCPs based on OU placement"}],"description": "Block root user","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"Malware Scanning": {"name": "Malware Scanning","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "Enable GuardDuty","Service": "GuardDuty","SubGroup": null,"SubSection": null}],"description": "Conducting a Comprehensive Scan for Malicious Software","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"Threat Detection": {"name": "Threat Detection","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "Enable GuardDuty","Service": "GuardDuty","SubGroup": null,"SubSection": null}],"description": "Detection of Threats in your AWS environment","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"Lambda protection": {"name": "Lambda protection","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "Enable GuardDuty","Service": "Lambda","SubGroup": null,"SubSection": null}],"description": "Protection using Lambda","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"Runtime protection": {"name": "Runtime protection","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Optional","ItemId": null,"Section": "Enable GuardDuty","Service": "GuardDuty","SubGroup": null,"SubSection": null}],"description": "Brand new and in need of thorough testing.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"Predefine IAM Roles": {"name": "Predefine IAM Roles","checks": {"iam_support_role_created": null,"iam_policy_attached_only_to_group_or_roles": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_role_cross_service_confused_deputy_prevention": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Deploy account from predefined IaC template","Service": "IAM","SubGroup": null,"SubSection": null}],"description": "Check if exists predefine IAM Roles","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"Block unused regions": {"name": "Block unused regions","checks": {"organizations_scp_check_deny_regions": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "SCPs","Service": "IAM","SubGroup": null,"SubSection": "Apply existing SCPs based on OU placement"}],"description": "Block unsued regions","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"S3 Block Public Access": {"name": "S3 Block Public Access","checks": {"s3_bucket_public_access": null,"s3_bucket_level_public_access_block": "PASS","s3_account_level_public_access_blocks": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Deploy account from predefined IaC template","Service": "S3","SubGroup": null,"SubSection": null}],"description": "Block public access to S3 buckets","checks_status": {"fail": 0,"pass": 1,"total": 3,"manual": 0}},"Organization invitation": {"name": "Organization invitation","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Deploy account from predefined IaC template","Service": "Organizations","SubGroup": null,"SubSection": null}],"description": "Check if organization invitation is enabled","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Additional managed rules": {"name": "Additional managed rules","checks": {},"status": "PASS","attributes": [{"Type": "Discuss","ItemId": null,"Section": "WAFv2","Service": "WAFv2","SubGroup": null,"SubSection": "Deploy WAF setup for each public web service"}],"description": "Supplementary managed rules","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Blanket rate-based rules": {"name": "Blanket rate-based rules","checks": {},"status": "PASS","attributes": [{"Type": "Must","ItemId": null,"Section": "WAFv2","Service": "WAFv2","SubGroup": null,"SubSection": "Deploy WAF setup for each public web service"}],"description": "Establishing rules based on a standardized, all-encompassing rate.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Restrict instances types": {"name": "Restrict instances types","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "SCPs","Service": "IAM","SubGroup": null,"SubSection": "Apply existing SCPs based on OU placement"}],"description": "Restrict instances types","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Alert on IAM user changes": {"name": "Alert on IAM user changes","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "CloudTrail","SubGroup": null,"SubSection": "CloudTrail"}],"description": "Alert on IAM user changes","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Enabled security services": {"name": "Enabled security services","checks": {"macie_is_enabled": "PASS","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","accessanalyzer_enabled": "PASS"},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Deploy account from predefined IaC template","Service": "SecurityServices","SubGroup": null,"SubSection": null}],"description": "Check if security services are enabled","checks_status": {"fail": 0,"pass": 4,"total": 4,"manual": 0}},"Alert on blocked DNS query": {"name": "Alert on blocked DNS query","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "Route53","SubGroup": null,"SubSection": "R53 DNS Resolver"}],"description": "Notify when a DNS query is obstructed.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Alert on each High finding": {"name": "Alert on each High finding","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Alerting","Service": "GuardDuty","SubGroup": null,"SubSection": "GuardDuty"}],"description": "Checks that GuardDuty is enabled and configured to send High findings to CloudWatch Events","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"Disable AMI public sharing": {"name": "Disable AMI public sharing","checks": {"ec2_ami_public": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Deploy account from predefined IaC template","Service": "EC2","SubGroup": null,"SubSection": null}],"description": "Disable AMI public sharing","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"Add custom SCPs if required": {"name": "Add custom SCPs if required","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "SCPs","Service": "IAM","SubGroup": null,"SubSection": null}],"description": "Add custom SCPs if required","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Scoped-down rate-based rules": {"name": "Scoped-down rate-based rules","checks": {"elbv2_waf_acl_attached": "FAIL","cloudfront_distributions_using_waf": null,"apigateway_restapi_waf_acl_attached": "FAIL"},"status": "FAIL","attributes": [{"Type": "Discuss","ItemId": null,"Section": "WAFv2","Service": "WAFv2","SubGroup": null,"SubSection": "Deploy WAF setup for each public web service"}],"description": "Rate-based rules with a narrowed scope","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"Global allow - and block-lists": {"name": "Global allow - and block-lists","checks": {"elbv2_waf_acl_attached": "FAIL","cloudfront_distributions_using_waf": null,"apigateway_restapi_waf_acl_attached": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "WAFv2","Service": "WAFv2","SubGroup": null,"SubSection": "Deploy WAF setup for each public web service"}],"description": "Establishing International Lists for Permissions and Restrictions","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"Service-unique exclusion rules": {"name": "Service-unique exclusion rules","checks": {"elbv2_waf_acl_attached": "FAIL","cloudfront_distributions_using_waf": null,"apigateway_restapi_waf_acl_attached": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "WAFv2","Service": "WAFv2","SubGroup": null,"SubSection": "Deploy WAF setup for each public web service"}],"description": "Exclusion rules specific to the service provided.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"Alert on snapshot manipulations": {"name": "Alert on snapshot manipulations","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "CloudTrail","SubGroup": null,"SubSection": "CloudTrail"}],"description": "Alert when a snapshot is manipulated","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"EKS protection (if EKS is used)": {"name": "EKS protection (if EKS is used)","checks": {},"status": "PASS","attributes": [{"Type": "Optional","ItemId": null,"Section": "Enable GuardDuty","Service": "EKS","SubGroup": null,"SubSection": null}],"description": "Enhanced Kubernetes Security (EKS) protection, if the Kubernetes service is employed.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Predefined set of managed rules": {"name": "Predefined set of managed rules","checks": {"elbv2_waf_acl_attached": "FAIL","cloudfront_distributions_using_waf": null,"apigateway_restapi_waf_acl_attached": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "WAFv2","Service": "WAFv2","SubGroup": null,"SubSection": "Deploy WAF setup for each public web service"}],"description": "A pre-established collection of rules under management control.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"Alerts based on rate-based rules": {"name": "Alerts based on rate-based rules","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "WAF","SubGroup": null,"SubSection": "WAF"}],"description": "Notifications triggered by rate-based regulations","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Use strictly AWS VPC DNS resolver": {"name": "Use strictly AWS VPC DNS resolver","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "R53 DNS Resolver Firewall","Service": "Route53","SubGroup": null,"SubSection": null}],"description": "Exclusively Employ Amazon Web Services (AWS) Virtual Private Cloud (VPC) DNS Resolver","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Alert based on DDoSDetected metric": {"name": "Alert based on DDoSDetected metric","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "Shield","SubGroup": null,"SubSection": "Shield"}],"description": "Generate an alert triggered by the detection of a DDoS attack based on the DDoSDetected metric.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Enable and configure AWS Inspector": {"name": "Enable and configure AWS Inspector","checks": {"inspector2_is_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "Vulnerability Scanning","Service": "EC2","SubGroup": null,"SubSection": "EC2 used as servers"}],"description": "Enable and set up AWS Inspector.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"IDC integration, SSO configuration": {"name": "IDC integration, SSO configuration","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Deploy account from predefined IaC template","Service": "IAM Identity Center","SubGroup": null,"SubSection": null}],"description": "Check if IDC integration and SSO configuration is enabled","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Send DNS Resolvers queries to SIEM": {"name": "Send DNS Resolvers queries to SIEM","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Logging","Service": "Logging","SubGroup": null,"SubSection": null}],"description": "Send DNS Resolvers queries to SIEM","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Alerts on raised cost anomaly events": {"name": "Alerts on raised cost anomaly events","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "Billing","SubGroup": null,"SubSection": "Cost Anomaly"}],"description": "Alert when cost anomaly events are raised","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Enable as part of Organization trail": {"name": "Enable as part of Organization trail","checks": {"cloudtrail_multi_region_enabled": "PASS","cloudtrail_multi_region_enabled_logging_management_events": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "AWS Cloudtrail","Service": "CloudTrail","SubGroup": null,"SubSection": null}],"description": "Activate as a component of the Organization trail.","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"Root user - distribution email + MFA": {"name": "Root user - distribution email + MFA","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Deploy account from predefined IaC template","Service": "IAM","SubGroup": null,"SubSection": null}],"description": "Check if root user has distribution email and MFA enabled","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"Billing, emergency, security contacts": {"name": "Billing, emergency, security contacts","checks": {"account_maintain_current_contact_details": null,"account_security_contact_information_is_registered": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Deploy account from predefined IaC template","Service": "Billing","SubGroup": null,"SubSection": null}],"description": "Check if billing, emergency, security contacts are configured","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"Realert on inactivity in a set period": {"name": "Realert on inactivity in a set period","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "SecurityHub","SubGroup": null,"SubSection": "SecurityHub"}],"description": "Activate a re-alert system for detecting inactivity within a specified time frame.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Confirm that events are present in SIEM": {"name": "Confirm that events are present in SIEM","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "Enable GuardDuty","Service": "GuardDuty","SubGroup": null,"SubSection": null}],"description": "Confirm that events are present in SIEM","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"Create analyzers in each active regions": {"name": "Create analyzers in each active regions","checks": {"accessanalyzer_enabled": "PASS","accessanalyzer_enabled_without_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "IAM Access Analyzer","Service": "IAM Access Analyzer","SubGroup": null,"SubSection": null}],"description": "Establish analyzers within every active region.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"Export metrics in centralized collector": {"name": "Export metrics in centralized collector","checks": {"wafv2_webacl_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "WAFv2","Service": "CloudWatch","SubGroup": null,"SubSection": null}],"description": "Exporting metrics to a centralized collector for comprehensive data aggregation.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"CFD + ALB + secret rotation architecture": {"name": "CFD + ALB + secret rotation architecture","checks": {"elbv2_waf_acl_attached": "FAIL","cloudfront_distributions_using_waf": null,"apigateway_restapi_waf_acl_attached": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "WAFv2","Service": "WAFv2","SubGroup": null,"SubSection": "Deploy WAF setup for each public web service"}],"description": "Designing an Architecture for Computational Fluid Dynamics (CFD), Application Load Balancing (ALB), and Secret Rotation Integration","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"Critical alert on every root user activity": {"name": "Critical alert on every root user activity","checks": {"cloudwatch_log_metric_filter_root_usage": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Alerting","Service": "CloudTrail","SubGroup": null,"SubSection": "CloudTrail"}],"description": "Send critical alert on every root user activity","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"Consider enabling for critical buckets only": {"name": "Consider enabling for critical buckets only","checks": {"macie_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Macie","Service": "Macie","SubGroup": null,"SubSection": null}],"description": "Please contemplate activating this feature exclusively for essential or crucial buckets.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"Alert on rise of ConsoleLoginFailures events": {"name": "Alert on rise of ConsoleLoginFailures events","checks": {"cloudwatch_log_metric_filter_authentication_failures": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Alerting","Service": "CloudTrail","SubGroup": null,"SubSection": "CloudTrail"}],"description": "Alert on rise ConsoleLoginFailures events","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"Send VPC Flow Logs (only DENYs) to S3 bucket": {"name": "Send VPC Flow Logs (only DENYs) to S3 bucket","checks": {"vpc_flow_logs_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "Logging","Service": "Logging","SubGroup": null,"SubSection": null}],"description": "Send VPC Flow Logs (only DENYs) to S3 bucket","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"Critical alert on cloudtrail settings changes": {"name": "Critical alert on cloudtrail settings changes","checks": {"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Alerting","Service": "CloudTrail","SubGroup": null,"SubSection": "CloudTrail"}],"description": "Send critical alert on cloudtrail settings changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"Scan images for vulnerability on upload to ECR": {"name": "Scan images for vulnerability on upload to ECR","checks": {"inspector2_is_enabled": "FAIL","inspector2_active_findings_exist": "FAIL","ecr_registry_scan_images_on_push_enabled": "PASS","ecr_repositories_scan_images_on_push_enabled": "FAIL","ecr_repositories_scan_vulnerabilities_in_latest_image": null},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "Vulnerability Scanning","Service": "ECR","SubGroup": null,"SubSection": "ECR used as docker images hub"}],"description": "Check uploaded images for vulnerabilities when adding them to the ECR (Elastic Container Registry).","checks_status": {"fail": 3,"pass": 1,"total": 5,"manual": 0}},"Alert on critical vulnerabilities in AMIs/Images": {"name": "Alert on critical vulnerabilities in AMIs/Images","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "Inspector","SubGroup": null,"SubSection": "Vulnerability Scanning"}],"description": "Notification regarding severe vulnerabilities detected in AMIs/Images.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Ban outbound DNS calls from all VPCs to ports 53": {"name": "Ban outbound DNS calls from all VPCs to ports 53","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "R53 DNS Resolver Firewall","Service": "Route53","SubGroup": null,"SubSection": null}],"description": "Prohibit all Virtual Private Clouds (VPCs) from initiating outbound DNS calls on port 53.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Enable/disable additional standards and controls": {"name": "Enable/disable additional standards and controls","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Enable AWS SecurityHub","Service": "SecurityHub","SubGroup": null,"SubSection": null}],"description": "Implement SecurityHub Central Configuration across the organization.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"Confirm that logs are present in S3 bucket and SIEM": {"name": "Confirm that logs are present in S3 bucket and SIEM","checks": {"cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "AWS Cloudtrail","Service": "CloudTrail","SubGroup": null,"SubSection": null}],"description": "Verify the existence of logs within both the S3 bucket and the SIEM system.","checks_status": {"fail": 1,"pass": 0,"total": 3,"manual": 0}},"Alerts based on (at least) each new CRITICAL finding": {"name": "Alerts based on (at least) each new CRITICAL finding","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "SecurityHub","SubGroup": null,"SubSection": "SecurityHub"}],"description": "Alerts triggered by every new CRITICAL finding, at a minimum.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Apply suppression filters to disable useless findings": {"name": "Apply suppression filters to disable useless findings","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "Enable GuardDuty","Service": "GuardDuty","SubGroup": null,"SubSection": null}],"description": "Implementing suppression filters to deactivate non-essential detections.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"Enable continuous recording for most of the resources": {"name": "Enable continuous recording for most of the resources","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Enable AWS Config","Service": "Config","SubGroup": null,"SubSection": null}],"description": "Activate continuous recording for the majority of resources.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"Adopt incident response guide and prepared battle card": {"name": "Adopt incident response guide and prepared battle card","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "Shield","SubGroup": null,"SubSection": "Shield"}],"description": "Utilize the incident response manual and have the battle card ready for use.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Confirm that records are present in central aggregator": {"name": "Confirm that records are present in central aggregator","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Enable AWS Config","Service": "Config","SubGroup": null,"SubSection": null}],"description": "Confirm that records are present in central aggregator","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"Configure R53 health checks for all protected resources": {"name": "Configure R53 health checks for all protected resources","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "WAFv2","Service": "Route53","SubGroup": null,"SubSection": null}],"description": "Establishing Amazon Route 53 (R53) health checks to monitor the well-being of all safeguarded resources.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Export scan results as metrics in centralized collector": {"name": "Export scan results as metrics in centralized collector","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Vulnerability Scanning","Service": "ECR","SubGroup": null,"SubSection": "ECR used as docker images hub"}],"description": "Generate metric data from scan results and store it in a centralized collector.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Include in process of incident response based on events": {"name": "Include in process of incident response based on events","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "Enable GuardDuty","Service": "GuardDuty","SubGroup": null,"SubSection": null}],"description": "Incorporate within the procedural framework of incident response, taking into account the triggering events.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"Apply SecurityHub Central Configuration for Organization": {"name": "Apply SecurityHub Central Configuration for Organization","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Enable AWS SecurityHub","Service": "SecurityHub","SubGroup": null,"SubSection": null}],"description": "Apply SecurityHub Central Configuration for Organization","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"Enable as part of central configuration for Organization": {"name": "Enable as part of central configuration for Organization","checks": {"guardduty_is_enabled": "PASS","guardduty_centrally_managed": "FAIL"},"status": "FAIL","attributes": [{"Type": "Must","ItemId": null,"Section": "Enable GuardDuty","Service": "GuardDuty","SubGroup": null,"SubSection": null}],"description": "Please verify the existence of records within the central aggregator.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"Deploy solution to alert on at least critical new findings": {"name": "Deploy solution to alert on at least critical new findings","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Enable AWS SecurityHub","Service": "SecurityHub","SubGroup": null,"SubSection": null}],"description": "Implement a solution to trigger alerts for newly identified critical issues at minimum.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"Apply managed domain name lists for Resolver in block mode)": {"name": "Apply managed domain name lists for Resolver in block mode)","checks": {"route53_domains_transferlock_enabled": null,"route53_domains_privacy_protection_enabled": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "R53 DNS Resolver Firewall","Service": "Route53","SubGroup": null,"SubSection": null}],"description": "Utilize managed domain name lists within Resolver to implement block mode.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"Block tampering with security-related settings and services": {"name": "Block tampering with security-related settings and services","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "SCPs","Service": "IAM","SubGroup": null,"SubSection": "Apply existing SCPs based on OU placement"}],"description": "Block tampering with security-related settings and services","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Enable Shield Advanced subscription for public facing account": {"name": "Enable Shield Advanced subscription for public facing account","checks": {"shield_advanced_protection_in_global_accelerators": null,"shield_advanced_protection_in_route53_hosted_zones": null,"shield_advanced_protection_in_associated_elastic_ips": null,"shield_advanced_protection_in_classic_load_balancers": null,"shield_advanced_protection_in_cloudfront_distributions": null,"shield_advanced_protection_in_internet_facing_load_balancers": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Shield Advanced","Service": "Shield Advanced","SubGroup": null,"SubSection": null}],"description": "Activate the Shield Advanced subscription for the publicly accessible account.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"Verify that events are present in SecurityHub aggregated view": {"name": "Verify that events are present in SecurityHub aggregated view","checks": {"securityhub_enabled": "PASS","accessanalyzer_enabled": "PASS","accessanalyzer_enabled_without_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "IAM Access Analyzer","Service": "SecurityHub","SubGroup": null,"SubSection": null}],"description": "Confirm the presence of events within the aggregated view of SecurityHub.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"Configure sensitive fields redaction and send WAF logs to SIEM": {"name": "Configure sensitive fields redaction and send WAF logs to SIEM","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "WAFv2","Service": "WAFv2","SubGroup": null,"SubSection": null}],"description": "Configure the redaction of sensitive fields and transmit Web Application Firewall (WAF) logs to the Security Information and Event Management (SIEM) system.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Confirm that findings are being visible in the aggregated view": {"name": "Confirm that findings are being visible in the aggregated view","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Enable AWS SecurityHub","Service": "SecurityHub","SubGroup": null,"SubSection": null}],"description": "Please verify that the findings are visible when viewed in the aggregated perspective.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"Send S3 access logs for critical buckets to separate S3 bucket": {"name": "Send S3 access logs for critical buckets to separate S3 bucket","checks": {"cloudtrail_s3_dataevents_write_enabled": null,"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": "Automated","ItemId": null,"Section": "Logging","Service": "Logging","SubGroup": null,"SubSection": null}],"description": "Send S3 access logs for critical buckets to separate S3 bucket","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"Consider periodic recording for some resources to optimize bill": {"name": "Consider periodic recording for some resources to optimize bill","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Enable AWS Config","Service": "Config","SubGroup": null,"SubSection": null}],"description": "Think about implementing scheduled monitoring for specific resources in order to maximize cost efficiency.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Create DDoS battle card with main info about protected services": {"name": "Create DDoS battle card with main info about protected services","checks": {"shield_advanced_protection_in_global_accelerators": null,"shield_advanced_protection_in_route53_hosted_zones": null,"shield_advanced_protection_in_associated_elastic_ips": null,"shield_advanced_protection_in_classic_load_balancers": null,"shield_advanced_protection_in_cloudfront_distributions": null,"shield_advanced_protection_in_internet_facing_load_balancers": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Shield Advanced","Service": "Shield Advanced","SubGroup": null,"SubSection": null}],"description": "Prepare a Detailed Distributed Denial of Service (DDoS) Battle Card Encompassing Key Information Regarding Safeguarded Services.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"Alerts based on high amount of blocked requests by managed rules": {"name": "Alerts based on high amount of blocked requests by managed rules","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "WAF","SubGroup": null,"SubSection": "WAF"}],"description": "Notifications triggered by a significant number of blocked requests as a result of managed rules.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Alerts based on aggregated findings with severity Medium and below": {"name": "Alerts based on aggregated findings with severity Medium and below","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Alerting","Service": "GuardDuty","SubGroup": null,"SubSection": "GuardDuty"}],"description": "Alert based on aggregated findings with severity Medium and below","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Create Cost Anomaly Detection monitors to alert spending anomalies": {"name": "Create Cost Anomaly Detection monitors to alert spending anomalies","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Budget Alarms","Service": "CloudWatch","SubGroup": null,"SubSection": "QA"}],"description": "Establish monitoring systems for cost anomaly detection to promptly notify about unusual spending patterns.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Enable Shield Advanced automatic application layer DDoS mitigation": {"name": "Enable Shield Advanced automatic application layer DDoS mitigation","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "WAFv2","Service": "Shield Advanced","SubGroup": null,"SubSection": null}],"description": "Activate automatic application layer Distributed Denial of Service (DDoS) mitigation within Shield Advanced.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Apply custom threat list for GuardDuty to alert on access to DoH servers": {"name": "Apply custom threat list for GuardDuty to alert on access to DoH servers","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "R53 DNS Resolver Firewall","Service": "Route53","SubGroup": null,"SubSection": null}],"description": "Implement a customized threat list within GuardDuty to generate alerts when there is access to Domain Name System over HTTPS (DoH) servers.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Configure Budgets Actions to stop services in cases of big unexpected spendings": {"name": "Configure Budgets Actions to stop services in cases of big unexpected spendings","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Budget Alarms","Service": "SNS","SubGroup": null,"SubSection": "QA"}],"description": "Set up Budgets Actions to halt services when significant unexpected expenses occur.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"Ensure that there are no critical (and considered critical) findings present in account": {"name": "Ensure that there are no critical (and considered critical) findings present in account","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Enable AWS SecurityHub","Service": "SecurityHub","SubGroup": null,"SubSection": null}],"description": "Make certain that there are no critical findings, whether deemed critical or not, within the account.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"Deploy solution to periodically rescan currently used images and report found vulnerabilities": {"name": "Deploy solution to periodically rescan currently used images and report found vulnerabilities","checks": {"ecr_repositories_scan_vulnerabilities_in_latest_image": null},"status": "PASS","attributes": [{"Type": "Automated","ItemId": null,"Section": "Vulnerability Scanning","Service": "ECR","SubGroup": null,"SubSection": "ECR used as docker images hub"}],"description": "Implement a solution to conduct regular scans on currently employed images and notify about any identified vulnerabilities.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"Establish ready-to-be-enabled pipelines to deliver ALB and CFD to SIEM to toggle in case of emergency and investigations": {"name": "Establish ready-to-be-enabled pipelines to deliver ALB and CFD to SIEM to toggle in case of emergency and investigations","checks": {},"status": "PASS","attributes": [{"Type": "Manual","ItemId": null,"Section": "Logging","Service": "Logging","SubGroup": null,"SubSection": null}],"description": "Establish ready-to-be-enabled pipelines to deliver ALB and CFD to SIEM to toggle in case of emergency and investigations","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}}},"requirements_passed": 26,"requirements_failed": 23,"requirements_manual": 34,"total_requirements": 83,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "623480b7-012a-4aab-b553-16d3b8898136","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "hipaa_aws","framework": "HIPAA","version": "","description": "The Health Insurance Portability and Accountability Act of 1996 (HIPAA) is legislation that helps US workers to retain health insurance coverage when they change or lose jobs. The legislation also seeks to encourage electronic health records to improve the efficiency and quality of the US healthcare system through improved information sharing.","region": "eu-west-1","requirements": {"164_312_b": {"name": "164.312(b) Audit controls","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_log_file_validation_enabled": "FAIL","cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_312_b","Section": "164.312 Technical Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement hardware, software, and/or procedural mechanisms that record and examine activity in information systems that contain or use electronic protected health information.","checks_status": {"fail": 8,"pass": 4,"total": 16,"manual": 0}},"164_312_d": {"name": "164.312(d) Person or entity authentication","checks": {"iam_root_mfa_enabled": null,"iam_password_policy_reuse_24": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "164_312_d","Section": "164.312 Technical Safeguards","Service": "iam","SubGroup": null,"SubSection": null}],"description": "Implement procedures to verify that a person or entity seeking access to electronic protected health information is the one claimed.","checks_status": {"fail": 0,"pass": 0,"total": 5,"manual": 0}},"164_308_a_8": {"name": "164.308(a)(8) Evaluation","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "164_308_a_8","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Perform a periodic technical and nontechnical evaluation, based initially upon the standards implemented under this rule and subsequently, in response to environmental or operational changes affecting the security of electronic protected health information, that establishes the extent to which an entity's security policies and procedures meet the requirements of this subpart.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"164_312_a_1": {"name": "164.312(a)(1) Access control","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"iam_user_mfa_enabled_console_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"awslambda_function_not_publicly_accessible": "PASS","iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_312_a_1","Section": "164.312 Technical Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement technical policies and procedures for electronic information systems that maintain electronic protected health information to allow access only to those persons or software programs that have been granted access rights as specified in 164.308(a)(4).","checks_status": {"fail": 1,"pass": 5,"total": 16,"manual": 0}},"164_312_c_1": {"name": "164.312(c)(1) Integrity","checks": {"ec2_ebs_volume_encryption": "PASS","s3_bucket_object_versioning": "FAIL","s3_bucket_default_encryption": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_312_c_1","Section": "164.312 Technical Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement policies and procedures to protect electronic protected health information from improper alteration or destruction.","checks_status": {"fail": 4,"pass": 2,"total": 6,"manual": 0}},"164_312_c_2": {"name": "164.312(c)(2) Mechanism to authenticate electronic protected health information","checks": {"vpc_flow_logs_enabled": "FAIL","ec2_ebs_volume_encryption": "PASS","s3_bucket_object_versioning": "FAIL","s3_bucket_default_encryption": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_312_c_2","Section": "164.312 Technical Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement electronic mechanisms to corroborate that electronic protected health information has not been altered or destroyed in an unauthorized manner.","checks_status": {"fail": 5,"pass": 2,"total": 7,"manual": 0}},"164_312_e_1": {"name": "164.312(e)(1) Transmission security","checks": {"elb_ssl_listeners": "FAIL","acm_certificates_expiration_check": "PASS","s3_bucket_secure_transport_policy": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","cloudfront_distributions_https_enabled": null,"awslambda_function_not_publicly_accessible": "PASS","opensearch_service_domains_node_to_node_encryption_enabled": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_312_e_1","Section": "164.312 Technical Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement technical security measures to guard against unauthorized access to electronic protected health information that is being transmitted over an electronic communications network.","checks_status": {"fail": 3,"pass": 3,"total": 9,"manual": 0}},"164_308_a_3_i": {"name": "164.308(a)(3)(i) Workforce security","checks": {"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_3_i","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement policies and procedures to ensure that all members of its workforce have appropriate access to electronic protected health information, as provided under paragraph (a)(4) of this section, and to prevent those workforce members who do not have access under paragraph (a)(4) of this section from obtaining access to electronic protected health information.","checks_status": {"fail": 1,"pass": 5,"total": 15,"manual": 0}},"164_308_a_4_i": {"name": "164.308(a)(4)(i) Information access management","checks": {"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "164_308_a_4_i","Section": "164.308 Administrative Safeguards","Service": "iam","SubGroup": null,"SubSection": null}],"description": "Implement policies and procedures for authorizing access to electronic protected health information that are consistent with the applicable requirements of subpart E of this part.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"164_308_a_6_i": {"name": "164.308(a)(6)(i) Security incident procedures","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudwatch_log_metric_filter_root_usage": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "164_308_a_6_i","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement policies and procedures to address security incidents.","checks_status": {"fail": 0,"pass": 2,"total": 8,"manual": 0}},"164_308_a_7_i": {"name": "164.308(a)(7)(i) Contingency plan","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_7_i","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Establish (and implement as needed) policies and procedures for responding to an emergency or other occurrence (for example, fire, vandalism, system failure, and natural disaster) that damages systems that contain electronic protected health information.","checks_status": {"fail": 3,"pass": 1,"total": 10,"manual": 0}},"164_312_a_2_i": {"name": "164.312(a)(2)(i) Unique user identification","checks": {"iam_no_root_access_key": null,"s3_bucket_public_access": null,"cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "164_312_a_2_i","Section": "164.312 Technical Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Assign a unique name and/or number for identifying and tracking user identity.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"164_312_e_2_i": {"name": "164.312(e)(2)(i) Integrity controls","checks": {"elb_ssl_listeners": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_multi_region_enabled": "PASS","s3_bucket_secure_transport_policy": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_312_e_2_i","Section": "164.312 Technical Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement security measures to ensure that electronically transmitted electronic protected health information is not improperly modified without detection until disposed of.","checks_status": {"fail": 4,"pass": 3,"total": 10,"manual": 0}},"164_308_a_6_ii": {"name": "164.308(a)(6)(ii) Response and reporting","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"cloudwatch_log_metric_filter_root_usage": null,"s3_bucket_server_access_logging_enabled": "FAIL","cloudwatch_log_metric_filter_authentication_failures": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_6_ii","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Identify and respond to suspected or known security incidents; mitigate, to the extent practicable, harmful effects of security incidents that are known to the covered entity or business associate; and document security incidents and their outcomes.","checks_status": {"fail": 6,"pass": 4,"total": 15,"manual": 0}},"164_312_a_2_ii": {"name": "164.312(a)(2)(ii) Emergency access procedure","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_312_a_2_ii","Section": "164.312 Technical Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Establish (and implement as needed) procedures for obtaining necessary electronic protected health information during an emergency.","checks_status": {"fail": 2,"pass": 1,"total": 9,"manual": 0}},"164_312_a_2_iv": {"name": "164.312(a)(2)(iv) Encryption and decryption","checks": {"kms_cmk_rotation_enabled": null,"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null,"eks_cluster_kms_cmk_encryption_in_secrets_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_312_a_2_iv","Section": "164.312 Technical Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement a mechanism to encrypt and decrypt electronic protected health information.","checks_status": {"fail": 6,"pass": 3,"total": 19,"manual": 0}},"164_312_e_2_ii": {"name": "164.312(e)(2)(ii) Encryption","checks": {"elb_ssl_listeners": "FAIL","ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null,"eks_cluster_kms_cmk_encryption_in_secrets_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_312_e_2_ii","Section": "164.312 Technical Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement a mechanism to encrypt electronic protected health information whenever deemed appropriate.","checks_status": {"fail": 7,"pass": 3,"total": 19,"manual": 0}},"164_308_a_1_ii_a": {"name": "164.308(a)(1)(ii)(A) Risk analysis","checks": {"guardduty_is_enabled": "PASS","config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "164_308_a_1_ii_a","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Conduct an accurate and thorough assessment of the potential risks and vulnerabilities to the confidentiality, integrity, and availability of electronic protected health information held by the covered entity or business associate.","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"164_308_a_1_ii_b": {"name": "164.308(a)(1)(ii)(B) Risk Management","checks": {"elb_ssl_listeners": "FAIL","rds_instance_multi_az": "FAIL","ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"ec2_ebs_volume_encryption": "PASS","elbv2_deletion_protection": "FAIL","ec2_ebs_default_encryption": "PASS","rds_instance_backup_enabled": "PASS","rds_snapshots_public_access": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"s3_bucket_default_encryption": "PASS","awslambda_function_url_public": null,"efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"redshift_cluster_public_access": null,"cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","s3_bucket_policy_public_write_access": "PASS","ec2_instance_older_than_specific_days": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"cloudtrail_log_file_validation_enabled": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","awslambda_function_not_publicly_accessible": "PASS","cloudwatch_log_group_kms_encryption_enabled": "FAIL","iam_inline_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_encryption_enabled": null,"iam_aws_attached_policy_no_administrative_privileges": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"iam_customer_attached_policy_no_administrative_privileges": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_1_ii_b","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement security measures sufficient to reduce risks and vulnerabilities to a reasonable and appropriate level to comply with 164.306(a): Ensure the confidentiality, integrity, and availability of all electronic protected health information the covered entity or business associate creates, receives, maintains, or transmits.","checks_status": {"fail": 14,"pass": 9,"total": 39,"manual": 0}},"164_308_a_1_ii_d": {"name": "164.308(a)(1)(ii)(D) Information system activity review","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_log_file_validation_enabled": "FAIL","cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_1_ii_d","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement procedures to regularly review records of information system activity, such as audit logs, access reports, and security incident tracking reports.","checks_status": {"fail": 7,"pass": 4,"total": 15,"manual": 0}},"164_308_a_3_ii_a": {"name": "164.308(a)(3)(ii)(A) Authorization and/or supervision","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","iam_root_mfa_enabled": null,"elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","iam_root_hardware_mfa_enabled": null,"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","iam_user_mfa_enabled_console_access": null,"cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_3_ii_a","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement procedures for the authorization and/or supervision of workforce members who work with electronic protected health information or in locations where it might be accessed.","checks_status": {"fail": 4,"pass": 4,"total": 16,"manual": 0}},"164_308_a_3_ii_b": {"name": "164.308(a)(3)(ii)(B) Workforce clearance procedure","checks": {"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "164_308_a_3_ii_b","Section": "164.308 Administrative Safeguards","Service": "iam","SubGroup": null,"SubSection": null}],"description": "Implement procedures to determine that the access of a workforce member to electronic protected health information is appropriate.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"164_308_a_3_ii_c": {"name": "164.308(a)(3)(ii)(C) Termination procedures","checks": {"iam_rotate_access_key_90_days": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "164_308_a_3_ii_c","Section": "164.308 Administrative Safeguards","Service": "iam","SubGroup": null,"SubSection": null}],"description": "Implement procedures for terminating access to electronic protected health information when the employment of, or other arrangement with, a workforce member ends or as required by determinations made as specified in paragraph (a)(3)(ii)(b).","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"164_308_a_4_ii_a": {"name": "164.308(a)(4)(ii)(A) Isolating health care clearinghouse functions","checks": {"elb_ssl_listeners": "FAIL","ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","rds_instance_backup_enabled": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"acm_certificates_expiration_check": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","redshift_cluster_automated_snapshot": null,"cloudfront_distributions_https_enabled": null,"rds_instance_integration_cloudwatch_logs": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null,"eks_cluster_kms_cmk_encryption_in_secrets_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_4_ii_a","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "If a health care clearinghouse is part of a larger organization, the clearinghouse must implement policies and procedures that protect the electronic protected health information of the clearinghouse from unauthorized access by the larger organization.","checks_status": {"fail": 7,"pass": 5,"total": 25,"manual": 0}},"164_308_a_4_ii_b": {"name": "164.308(a)(4)(ii)(B) Access authorization","checks": {"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "164_308_a_4_ii_b","Section": "164.308 Administrative Safeguards","Service": "iam","SubGroup": null,"SubSection": null}],"description": "Implement policies and procedures for granting access to electronic protected health information, As one illustrative example, through access to a workstation, transaction, program, process, or other mechanism.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"164_308_a_4_ii_c": {"name": "164.308(a)(4)(ii)(B) Access authorization","checks": {"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_password_policy_reuse_24": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_4_ii_c","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement policies and procedures that, based upon the covered entity's or the business associate's access authorization policies, establish, document, review, and modify a user's right of access to a workstation, transaction, program, or process.","checks_status": {"fail": 1,"pass": 0,"total": 9,"manual": 0}},"164_308_a_5_ii_b": {"name": "164.308(a)(5)(ii)(B) Protection from malicious software","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_5_ii_b","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Procedures for guarding against, detecting, and reporting malicious software.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"164_308_a_5_ii_c": {"name": "164.308(a)(5)(ii)(C) Log-in monitoring","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudwatch_log_metric_filter_authentication_failures": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "164_308_a_5_ii_c","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Procedures for monitoring log-in attempts and reporting discrepancies.","checks_status": {"fail": 0,"pass": 2,"total": 3,"manual": 0}},"164_308_a_5_ii_d": {"name": "164.308(a)(5)(ii)(D) Password management","checks": {"iam_user_accesskey_unused": null,"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_reuse_24": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "164_308_a_5_ii_d","Section": "164.308 Administrative Safeguards","Service": "iam","SubGroup": null,"SubSection": null}],"description": "Procedures for creating, changing, and safeguarding passwords.","checks_status": {"fail": 0,"pass": 0,"total": 9,"manual": 0}},"164_308_a_7_ii_a": {"name": "164.308(a)(7)(ii)(A) Data backup plan","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_7_ii_a","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Establish and implement procedures to create and maintain retrievable exact copies of electronic protected health information.","checks_status": {"fail": 3,"pass": 1,"total": 10,"manual": 0}},"164_308_a_7_ii_b": {"name": "164.308(a)(7)(ii)(B) Disaster recovery plan","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_7_ii_b","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Establish (and implement as needed) procedures to restore any loss of data.","checks_status": {"fail": 3,"pass": 1,"total": 10,"manual": 0}},"164_308_a_7_ii_c": {"name": "164.308(a)(7)(ii)(C) Emergency mode operation plan","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "164_308_a_7_ii_c","Section": "164.308 Administrative Safeguards","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Establish (and implement as needed) procedures to enable continuation of critical business processes for protection of the security of electronic protected health information while operating in emergency mode.","checks_status": {"fail": 3,"pass": 1,"total": 10,"manual": 0}}},"requirements_passed": 11,"requirements_failed": 21,"requirements_manual": 0,"total_requirements": 32,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "6a808cc7-3501-4085-98f9-e4a9fa251f4c","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "mitre_attack_aws","framework": "MITRE-ATTACK","version": "","description": "MITRE ATT&CKยฎ is a globally-accessible knowledge base of adversary tactics and techniques based on real-world observations. The ATT&CK knowledge base is used as a foundation for the development of specific threat models and methodologies in the private sector, in government, and in the cybersecurity product and service community.","region": "eu-west-1","requirements": {"T1040": {"name": "Network Sniffing","checks": {"elb_ssl_listeners": "FAIL","elbv2_ssl_listeners": "FAIL","iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_hardware_mfa_enabled": null,"rds_instance_transport_encrypted": "FAIL","acm_certificates_expiration_check": "PASS","s3_bucket_secure_transport_policy": "FAIL","config_recorder_all_regions_enabled": null,"iam_user_mfa_enabled_console_access": null,"cloudfront_distributions_https_enabled": null,"iam_policy_allows_privilege_escalation": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","iam_inline_policy_no_administrative_privileges": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Value": "Significant","Comment": "AWS CloudWatch uses TLS/SSL connections to communicate with other AWS resources which protects against network sniffing attacks. As a result, this mapping is given a score of Significant.","Category": "Protect","AWSService": "AWS CloudWatch"},{"Value": "Significant","Comment": "AWS RDS and AWS RDS Proxy support TLS/SSL connections to database instances which protects against network sniffing attacks. As a result, this mapping is given a score of Significant.","Category": "Protect","AWSService": "AWS RDS"},{"Value": "Significant","Comment": "The VPC service's support for the AWS Virtual Private Network (VPN) can be used to encrypt traffic traversing over untrusted networks which can prevent information from being gathered via network sniffing.","Category": "Protect","AWSService": "Amazon Virtual Private Cloud"},{"Value": "Partial","Comment": "The following AWS IoT Device Defender audit checks and corresponding mitigation actions can identify and resolve configuration problems that should be fixed in order to ensure SSL/TLS encryption is enabled and secure to protect network traffic to/from IoT devices: 'CA certificate expiring' ('CA_CERTIFICATE_EXPIRING_CHECK' in the CLI and API), 'CA certificate key quality' ('CA_CERTIFICATE_KEY_QUALITY_CHECK' in the CLI and API), and 'CA certificate revoked but device certificates still active' ('REVOKED_CA_CERTIFICATE_STILL_ACTIVE_CHECK' in the CLI and API) can identify problems with certificate authority (CA) certificates being used for signing and support the 'UPDATE_CA_CERTIFICATE' mitigation action which can resolve them. 'Device certificate expiring' ('DEVICE_CERTIFICATE_EXPIRING_CHECK' in the CLI and API), 'Device certificate key quality' ('DEVICE_CERTIFICATE_KEY_QUALITY_CHECK' in the CLI and API), 'Device certificate shared' ('DEVICE_CERTIFICATE_SHARED_CHECK' in the CLI and API), and 'Revoked device certificate still active' ('REVOKED_DEVICE_CERTIFICATE_STILL_ACTIVE_CHECK' in the CLI and API) can identify problems with IoT devices' certificates and support the 'UPDATE_DEVICE_CERTIFICATE' and 'ADD_THINGS_TO_THING_GROUP' mitigation actions which can resolve them. Coverage factor is partial for these checks and mitigations, since they are specific to IoT device communication and can only mitigate behavior for adversaries who are unable to decrypt the relevant traffic, resulting in an overall score of Partial.","Category": "Protect","AWSService": "AWS IoT Device Defender"},{"Value": "Partial","Comment": "The following AWS Config managed rules can identify configuration problems that should be fixed in order to ensure SSL/TLS encryption is enabled to protect network traffic: 'acm-certificate-expiration-check' for nearly expired certificates in AWS Certificate Manager (ACM); 'alb-http-to-https-redirection-check' for Application Load Balancer (ALB) HTTP listeners; 'api-gw-ssl-enabled' for API Gateway REST API stages; 'cloudfront-custom-ssl-certificate', 'cloudfront-sni-enabled', and 'cloudfront-viewer-policy-https', for Amazon CloudFront distributions; 'elb-acm-certificate-required', 'elb-custom-security-policy-ssl-check', 'elb-predefined-security-policy-ssl-check', and 'elb-tls-https-listeners-only' for Elastic Load Balancing (ELB) Classic Load Balancer listeners; 'redshift-require-tls-ssl' for Amazon Redshift cluster connections to SQL clients; 's3-bucket-ssl-requests-only' for requests for S3 bucket contents; and 'elasticsearch-node-to-node-encryption-check' for Amazon ElasticSearch Service node-to-node communications. The following AWS Config managed rules can identify configuration problems that should be fixed in order to ensure that private traffic is routed securely and only within VPCs rather than on the public Internet: 'api-gw-endpoint-type-check' for Amazon API Gateway APIs, 'elasticsearch-in-vpc-only' for Amazon ElasticSearch Service domains, and 'redshift-enhanced-vpc-routing-enabled' for Amazon Redshift cluster traffic. All of these are run on configuration changes except 'alb-http-to-https-redirection-check' and 'elasticsearch-in-vpc-only', which are run periodically. Coverage factor is partial for these rules, since they are specific to a subset of the available AWS services and can only mitigate behavior for adversaries who are unable to decrypt the relevant traffic and/or do not have access to traffic within the relevant VPCs, resulting in an overall score of Partial.","Category": "Protect","AWSService": "AWS Config"}],"description": "Adversaries may sniff network traffic to capture information about an environment, including authentication material passed over the network. Network sniffing refers to using the network interface on a system to monitor or capture information sent over a wired or wireless connection. An adversary may place a network interface into promiscuous mode to passively access data in transit over the network, or use span ports to capture a larger amount of data.","checks_status": {"fail": 5,"pass": 1,"total": 17,"manual": 0}},"T1046": {"name": "Network Service Discovery","checks": {"guardduty_is_enabled": "PASS","inspector2_is_enabled": "FAIL","elbv2_waf_acl_attached": "FAIL","networkfirewall_in_all_vpc": "FAIL","inspector2_active_findings_exist": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Value": "Partial","Comment": "The following AWS IoT Device Defender device-side detection metrics can detect indicators that an adversary may be leveraging compromised AWS IoT devices to search their networks for other hosts and their running services, possibly to subsequently carry out lateral movement techniques: 'Destination IPs' ('aws:destination-ip-addresses') outside of expected IP address ranges may suggest that a device is communicating with unexpected devices. 'Bytes in' ('aws:all-bytes-in'), 'Bytes out' ('aws:all-bytes-out'), 'Packets in' ('aws:all-packets-in'), and 'Packets out' ('aws:all-packets-out') values outside of expected norms may indicate that the device is sending and/or receiving non-standard traffic, which may traffic used to discover other hosts/services. 'Listening TCP ports' ('aws:listening-tcp-ports'), 'Listening TCP port count' ('aws:num-listening-tcp-ports'), 'Established TCP connections count' ('aws:num-established-tcp-connections'), 'Listening UDP ports' ('aws:listening-udp-ports'), and 'Listening UDP port count' ('aws:num-listening-udp-ports') values outside of expected norms may indicate that devices are communicating via unexpected ports/protocols that may suggest scanning is taking place. Coverage factor is partial, since these metrics are limited to IoT device communication and detection is only based on network traffic, resulting in an overall score of Partial.","Category": "Detect","AWSService": "AWS IoT Device Defender"},{"Value": "Partial","Comment": "AWS Network Firewall has the ability to pass, drop, or alert on traffic based on the network protocol as well as perform deep packet inspection on the payload. This functionality can be used to restrict access to the endpoints within the virtual private cloud and protect against network service scanning. This mapping is given a score of Partial because it only protects against network service scanning attacks that originate from outside the firewall and not from within network protected by the firewall.","Category": "Protect","AWSService": "AWS Network Firewall"},{"Value": "Partial","Comment": "AWS WAF protects against bots that run scans against web applications such as Nessus (vulnerability assessments) and Nmap (IP address and port scans) among others. AWS WAF does this by blocking malicious traffic that indicate bad bots such as those listed above (e.g., via User-Agent values). AWS WAF uses the following rule sets to provide this protection. AWSManagedRulesCommonRuleSet AWSManagedRulesBotControlRuleSet. This is scored as Partial because the rule sets, while they block malicious traffic in near real-time, only protect web applications against scans performed by bots.","Category": "Protect","AWSService": "AWS Web Application Firewall"},{"Value": "Partial","Comment": "The following GuardDuty finding types reflect flagged events where there is an attempt to get a list of services running on a remote host. Recon:EC2/PortProbeEMRUnprotectedPort Recon:EC2/PortProbeUnprotectedPort Recon:EC2/Portscan Impact:EC2/PortSweep","Category": "Detect","AWSService": "Amazon GuardDuty"},{"Value": "Partial","Comment": "The Amazon Inspector Network Reachability assessment package can assess whether or not cloud/network components are vulnerable (e.g., publicly accessible from the Internet). Amazon Inspector does not directly protect cloud/network components rather reports on vulnerabilities that it identifies which can then be used to securely configure the cloud/network components. Due to this, the score is capped at Partial.","Category": "Protect","AWSService": "Amazon Inspector"},{"Value": "Significant","Comment": "VPC security groups and network access control lists (NACLs) can filter both internal and external network traffic and therefore, can mitigate unauthorized network service scanning.","Category": "Protect","AWSService": "Amazon Virtual Private Cloud"}],"description": "Adversaries may attempt to get a listing of services running on remote hosts and local network infrastructure devices, including those that may be vulnerable to remote software exploitation. Common methods to acquire this information include port and/or vulnerability scans using tools that are brought onto a system.","checks_status": {"fail": 7,"pass": 16,"total": 23,"manual": 0}},"T1048": {"name": "Exfiltration Over Alternative Protocol","checks": {"guardduty_is_enabled": "PASS","networkfirewall_in_all_vpc": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Value": "Partial","Comment": "AWS Network Firewall has the ability to pass, drop, or alert on traffic based on the network protocol as well as perform deep packet inspection on the payload. This functionality can be used to block adversaries from accessing resources from which to exfiltrate data as well as prevent resources from communicating with known-bad IP addresses and domains that might be used to receive exfiltrated data. This mapping is given a score of Partial because the known-bad IP addresses and domains would need to be known in advance and AWS Network Firewall wouldn't have deep packet inspection visibility into encrypted non-C2 protocols.","Category": "Protect","AWSService": "AWS Network Firewall"},{"Value": "Partial","Comment": "The following GuardDuty finding type flags events where adversaries may steal data by exfiltrating it over a different protocol than that of the existing command-and-control channel. Trojan:EC2/DNSDataExfiltration Behavior:EC2/TrafficVolumeUnusual.","Category": "Detect","AWSService": "Amazon GuardDuty"},{"Value": "Partial","Comment": "This control provides partial coverage for this technique and all of its sub-techniques, resulting in an overall score of Partial.","Category": "Detect","AWSService": "AWS IoT Device Defender"},{"Value": "Partial","Comment": "VPC security groups and network access control lists (NACLs) can limit access to external hosts and can therefore provide mitigation of this technique. For environments where Internet access is required, these controls can be used to block known malicious addresses. Because this latter protection is limited to known malicious endpoints, it provides Partial coverage resulting in an overall Partial score.","Category": "Protect","AWSService": "Amazon Virtual Private Cloud"}],"description": "Adversaries may steal data by exfiltrating it over a different protocol than that of the existing command and control channel. The data may also be sent to an alternate network location from the main command and control server.","checks_status": {"fail": 4,"pass": 16,"total": 20,"manual": 0}},"T1049": {"name": "System Network Connections Discovery","checks": {},"status": "PASS","attributes": [],"description": "Adversaries may attempt to get a listing of network connections to or from the compromised system they are currently accessing or from remote systems by querying for information over the network.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"T1059": {"name": "Command and Scripting Interpreter","checks": {"elbv2_waf_acl_attached": "FAIL"},"status": "FAIL","attributes": [{"Value": "Partial","Comment": "The AWS WAF protects web applications from injection attacks that leverage command and scripting interpreters. AWS WAF provides this protection via the following rule sets that block malicious traffic across a variety of operating systems and applications. AWSManagedRulesCommonRuleSet AWSManagedRulesSQLiRuleSet AWSManagedRulesUnixRuleSet AWSManagedRulesWindowsRuleSet AWSManagedRulesPHPRuleSet AWSManagedRulesWordPressRuleSet. This is given a score of Partial (instead of Minimal) because while it only protects against a subset of SubTechniques (3 out of 8), it does provide protections for command and scripting interpreters that do not have SubTechniques (SQL, PHP, etc.). Furthermore, it blocks the malicious content in near real-time.","Category": "Protect","AWSService": "AWS Web Application Firewall"}],"description": "Adversaries may abuse command and script interpreters to execute commands, scripts, or binaries. These interfaces and languages provide ways of interacting with computer systems and are a common feature across many different platforms. Most systems come with some built-in command-line interface and scripting capabilities, for example, macOS and Linux distributions include some flavor of Unix Shell while Windows installations include the Windows Command Shell and PowerShell.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"T1069": {"name": "Permission Groups Discovery","checks": {},"status": "PASS","attributes": [],"description": "Adversaries may attempt to discover group and permission settings. This information can help adversaries determine which user accounts and groups are available, the membership of users in particular groups, and which users and groups have elevated permissions.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"T1074": {"name": "Data from Cloud Storage","checks": {},"status": "PASS","attributes": [],"description": "Adversaries may stage collected data in a central location or directory prior to Exfiltration. Data may be kept in separate files or combined into one file through techniques such as Archive Collected Data. Interactive command shells may be used, and common functionality within cmd and bash may be used to copy data into a staging location.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"T1078": {"name": "Valid Accounts","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","iam_avoid_root_usage": null,"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_reuse_24": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_hardware_mfa_enabled": null,"iam_user_console_access_unused": null,"iam_user_two_active_access_key": null,"iam_policy_no_full_access_to_kms": null,"iam_administrator_access_with_mfa": null,"config_recorder_all_regions_enabled": null,"iam_user_mfa_enabled_console_access": null,"iam_user_no_setup_initial_access_key": null,"organizations_scp_check_deny_regions": null,"iam_password_policy_minimum_length_14": null,"iam_policy_allows_privilege_escalation": null,"organizations_delegated_administrators": null,"iam_policy_no_full_access_to_cloudtrail": null,"iam_no_expired_server_certificates_stored": null,"organizations_account_part_of_organizations": null,"iam_role_cross_account_readonlyaccess_policy": null,"iam_inline_policy_no_administrative_privileges": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_role_cross_service_confused_deputy_prevention": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"iam_customer_unattached_policy_no_administrative_privileges": null,"iam_password_policy_expires_passwords_within_90_days_or_less": null},"status": "PASS","attributes": [{"Value": "Partial","Comment": "GuardDuty implements a finding that flags occurrences unattended behavior from an IAM User in the Account. PenTest:IAMUser/KaliLinux, PenTest:IAMUser/ParrotLinux, PenTest:IAMUser/PentooLinux, Policy:IAMUser/RootCredentialUsage, PrivilegeEscalation:IAMUser/AdministrativePermissions, UnauthorizedAccess:IAMUser/ConsoleLogin, UnauthorizedAccess:IAMUser/ConsoleLoginSuccess.B, UnauthorizedAccess:IAMUser/MaliciousIPCaller, UnauthorizedAccess:IAMUser/MaliciousIPCaller.Custom, UnauthorizedAccess:IAMUser/TorIPCaller, Policy:S3/AccountBlockPublicAccessDisabled, Policy:S3/BucketAnonymousAccessGranted, Policy:S3/BucketBlockPublicAccessDisabled, Policy:S3/BucketPublicAccessGranted, CredentialAccess:IAMUser/AnomalousBehavior, DefenseEvasion:IAMUser/AnomalousBehavior, Discovery:IAMUser/AnomalousBehavior, Exfiltration:IAMUser/AnomalousBehavior, Impact:IAMUser/AnomalousBehavior, Persistence:IAMUser/AnomalousBehavior, Recon:IAMUser/MaliciousIPCaller, Recon:IAMUser/MaliciousIPCaller.Custom, UnauthorizedAccess:IAMUser/InstanceCredentialExfiltration.","Category": "Detect","AWSService": "Amazon GuardDuty"},{"Value": "Minimal","Comment": "This control provides significant coverage for one of this technique's SubTechniques, resulting in an overall score of Minimal.","Category": "Protect","AWSService": "AWS Config"},{"Value": "Partial","Comment": "This control provides detection capability for one of this technique's SubTechniques and some of its procedure examples resulting in an overall Partial protection score.","Category": "Detect","AWSService": "AWS IAM"},{"Value": "Partial","Comment": "This control provides protection capability for one of this technique's SubTechniques and some of its procedure examples resulting in an overall Partial protection score.","Category": "Protect","AWSService": "AWS IAM"},{"Value": "Partial","Comment": "This control provides protection capability for one of this technique's SubTechniques and some of its procedure examples resulting in an overall Partial protection score.","Category": "Protect","AWSService": "AWS Single Sign-On"},{"Value": "Minimal","Comment": "This control provides partial detection capability for one of this technique's SubTechniques and a few of its procedure examples resulting in an overall Minimal protection score.","Category": "Detect","AWSService": "AWS IoT Device Defender"},{"Value": "Minimal","Comment": "This control provides partial protection for one of this technique's SubTechniques and a few of its procedure examples resulting in an overall Minimal protection score.","Category": "Protect","AWSService": "AWS IoT Device Defender"},{"Value": "Partial","Comment": "This control may protect against malicious use of cloud accounts but may not mitigate exploitation of local, domain, or default accounts present within deployed resources.","Category": "Protect","AWSService": "AWS Organizations"},{"Value": "Minimal","Comment": "AWS Security Hub detects suspicious activity by AWS accounts which could indicate valid accounts being leveraged by an adversary. AWS Security Hub provides these detections with the following managed insights. AWS principals with suspicious access key activity Credentials that may have leaked AWS resources with unauthorized access attempts IAM users with suspicious activity. AWS Security Hub also performs checks from the AWS Foundations CIS Benchmark and PCI-DSS security standard that, if implemented, would help towards detecting the misuse of valid accounts. AWS Security Hub provides these detections with the following checks. 3.1 Ensure a log metric filter and alarm exist for unauthorized API calls 3.2 Ensure a log metric filter and alarm exist for Management Console sign-in without MFA 3.3 Ensure a log metric filter and alarm exist for usage of root account 3.4 Ensure a log metric filter and alarm exist for IAM policy changes 3.6 Ensure a log metric filter and alarm exist for AWS Management Console authentication failures [PCI.CW.1] A log metric filter and alarm should exist for usage of the root user. By monitoring the root account, activity where accounts make unauthorized API calls, and changes to IAM permissions among other things, it may be possible to detect valid accounts that are being misused and are potentially compromised. This is scored as Minimal because it only supports a subset of the SubTechniques (1 of 4).","Category": "Detect","AWSService": "AWS Security Hub"},{"Value": "Minimal","Comment": "This control provides partial protection for one of this technique's SubTechniques and a few of its procedure examples resulting in an overall Minimal protection score.","Category": "Protect","AWSService": "Amazon Cognito"}],"description": "Adversaries may obtain and abuse credentials of existing accounts as a means of gaining Initial Access, Persistence, Privilege Escalation, or Defense Evasion. Compromised credentials may be used to bypass access controls placed on various resources on systems within the network and may even be used for persistent access to remote systems and externally available services, such as VPNs, Outlook Web Access, network devices, and remote desktop.[1] Compromised credentials may also grant an adversary increased privilege to specific systems or access to restricted areas of the network. Adversaries may choose not to use malware or tools in conjunction with the legitimate access those credentials provide to make it harder to detect their presence.","checks_status": {"fail": 0,"pass": 2,"total": 36,"manual": 0}},"T1082": {"name": "System Information Discovery","checks": {},"status": "PASS","attributes": [],"description": "An adversary may attempt to get detailed information about the operating system and hardware, including version, patches, hotfixes, service packs, and architecture. Adversaries may use the information from System Information Discovery during automated discovery to shape follow-on behaviors, including whether or not the adversary fully infects the target and/or attempts specific actions.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"T1087": {"name": "Account Discovery","checks": {"organizations_account_part_of_organizations": null},"status": "PASS","attributes": [{"Value": "Minimal","Comment": "This control may protect against cloud account discovery but does not mitigate against other forms of account discovery.","Category": "Protect","AWSService": "AWS Organizations"}],"description": "Adversaries may attempt to get a listing of valid accounts, usernames, or email addresses on a system or within a compromised environment. This information can help adversaries determine which accounts exist, which can aid in follow-on behavior such as brute-forcing, spear-phishing attacks, or account takeovers (e.g., Valid Accounts).","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"T1098": {"name": "Account Manipulation","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","iam_avoid_root_usage": null,"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_hardware_mfa_enabled": null,"iam_policy_no_full_access_to_kms": null,"iam_administrator_access_with_mfa": null,"config_recorder_all_regions_enabled": null,"iam_user_mfa_enabled_console_access": null,"iam_policy_allows_privilege_escalation": null,"iam_policy_no_full_access_to_cloudtrail": null,"iam_inline_policy_no_administrative_privileges": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Value": "Minimal","Comment": "This control provides significant coverage for one of this technique's SubTechniques, resulting in an overall score of Minimal.","Category": "Protect","AWSService": "AWS Config"},{"Value": "Minimal","Comment": "This control may generate logs for creation and manipulation of accounts but the relevant security information would be handled by another security control.","Category": "Detect","AWSService": "AWS IAM"},{"Value": "Minimal","Comment": "AWS Security Hub performs a check from the AWS Foundations CIS Benchmark that, if implemented, would help towards detecting the manipulation of accounts. AWS Security Hub provides this detection with the following check. 3.4 Ensure a log metric filter and alarm exist for IAM policy changes. This is scored as Minimal because it only supports a subset of the SubTechniques (1 of 4).","Category": "Detect","AWSService": "AWS Security Hub"},{"Value": "Partial","Comment": "GuardDuty has a finding types that flag events where an adversary may have compromised an AWS IAM User. Finding Type: Persistence:IAMUser/AnomalousBehavior.","Category": "Detect","AWSService": "Amazon GuardDuty"}],"description": "Adversaries may manipulate accounts to maintain access to victim systems. Account manipulation may consist of any action that preserves adversary access to a compromised account, such as modifying credentials or permission groups. These actions could also include account activity designed to subvert security policies, such as performing iterative password updates to bypass password duration policies and preserve the life of compromised credentials.","checks_status": {"fail": 0,"pass": 2,"total": 16,"manual": 0}},"T1110": {"name": "Brute Force","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","iam_root_mfa_enabled": null,"inspector2_is_enabled": "FAIL","iam_user_accesskey_unused": null,"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_reuse_24": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_hardware_mfa_enabled": null,"iam_user_console_access_unused": null,"inspector2_active_findings_exist": "FAIL","config_recorder_all_regions_enabled": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"iam_password_policy_expires_passwords_within_90_days_or_less": null},"status": "FAIL","attributes": [{"Value": "Significant","Comment": "This control provides significant coverage for all of this technique's sub-techniques, resulting in an overall score of Significant.","Category": "Protect","AWSService": "AWS Config"},{"Value": "Significant","Comment": "This control provides significant coverage for all of this technique's sub-techniques, resulting in an overall score of Significant.","Category": "Protect","AWSService": "AWS IAM"},{"Value": "Partial","Comment": "This control may not provide any mitigation against password cracking.","Category": "Protect","AWSService": "AWS Single Sign-On"},{"Value": "Significant","Comment": "Amazon Cognito's MFA capability provides significant protection against password compromises, requiring the adversary to complete an additional authentication method before their access is permitted.","Category": "Protect","AWSService": "Amazon Cognito"},{"Value": "Minimal","Comment": "AWS Security Hub performs a check from the AWS Foundations CIS Benchmark that, if implemented, would help towards detecting the brute forcing of accounts. AWS Security Hub provides this detection with the following checks. 3.6 Ensure a log metric filter and alarm exist for AWS Management Console authentication failures. This is scored as Minimal because it only applies to the AWS Management Console and not other access mechanisms (e.g., CLI, SDK, etc.) and it only supports a subset of the sub-techniques (3 of 4). Furthermore, it does not detect brute-forcing methods for other components such as EC2 instances.","Category": "Detect","AWSService": "AWS Security Hub"},{"Value": "Minimal","Comment": "Finding types such as UnauthorizedAccess:EC2/RDPBruteForce, UnauthorizedAccess:EC2/SSHBruteForce, Impact:EC2/WinRMBruteForce, and Stealth:IAMUser/PasswordPolicyChange can detect when an EC2 instance may be involved in a brute force attack aimed at obtaining passwords. Due to the detection being limited to a specific set of application protocols, its coverage is Minimal resulting in a Minimal score.","Category": "Detect","AWSService": "Amazon GuardDuty"},{"Value": "Minimal","Comment": "The Amazon Inspector Best Practices assessment package can detect security control settings related to authentication and password policies on Linux endpoints. Specific security controls it can assess include 'Disable password authentication over SSH', 'Configure password maximum age', 'Configure password minimum length', and 'Configure password complexity' all of which impact the ability to brute force a password. This information can be used identify insecure configurations and harden the endpoints. Amazon Inspector does not directly protect against brute force attacks. Given Amazon Inspector can only assess these security controls on Linux platforms (although it also supports Windows), the coverage score is Minimal leading to an overall Minimal score.","Category": "Protect","AWSService": "Amazon Inspector"}],"description": "Adversaries may use brute force techniques to gain access to accounts when passwords are unknown or when password hashes are obtained. Without knowledge of the password for an account or set of accounts, an adversary may systematically guess the password using a repetitive or iterative mechanism. Brute forcing passwords can take place via interaction with a service that will check the validity of those credentials or offline against previously acquired credential data, such as password hashes.","checks_status": {"fail": 2,"pass": 2,"total": 19,"manual": 0}},"T1119": {"name": "Automated Collection","checks": {"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","ec2_ebs_snapshots_encrypted": "FAIL","s3_bucket_default_encryption": "PASS","rds_instance_storage_encrypted": "FAIL","config_recorder_all_regions_enabled": null},"status": "FAIL","attributes": [{"Value": "Minimal","Comment": "The following AWS Config managed rules can identify configuration problems that should be fixed in order to ensure that storage volumes are encrypted, which may mitigate adversary attempts to automate collection within cloud environments: 'ec2-ebs-encryption-by-default' which is run periodically and 'encrypted-volumes' which is run on configuration changes. Coverage factor is minimal for these rules, since they are specific to EBS volumes and will only prevent certain forms of collection since adversaries with access to mounted volumes may be able to decrypt their contents, resulting in an overall score of Minimal.","Category": "Protect","AWSService": "AWS Config"}],"description": "Once established within a system or network, an adversary may use automated techniques for collecting internal data. Methods for performing this technique could include use of a Command and Scripting Interpreter to search for and copy information fitting set criteria such as file type, location, or name at specific time intervals. In cloud-based environments, adversaries may also use cloud APIs, command line interfaces, or extract, transform, and load (ETL) services to automatically collect data. This functionality could also be built into remote access tools.","checks_status": {"fail": 2,"pass": 3,"total": 6,"manual": 0}},"T1136": {"name": "Create Account","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Value": "Minimal","Comment": "This control provides partial coverage for one of this technique's SubTechniques, resulting in an overall score of Minimal.","Category": "Protect","AWSService": "AWS Config"}],"description": "Adversaries may create an account to maintain access to victim systems. With a sufficient level of access, creating such accounts may be used to establish secondary credentialed access that do not require persistent remote access tools to be deployed on the system.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"T1190": {"name": "Exploit Public-Facing Application","checks": {"drs_job_exist": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","inspector2_is_enabled": "FAIL","ec2_instance_public_ip": "FAIL","elbv2_waf_acl_attached": "FAIL","rds_instance_backup_enabled": "PASS","inspector2_active_findings_exist": "FAIL","config_recorder_all_regions_enabled": null,"awslambda_function_not_publicly_accessible": "PASS","rds_instance_minor_version_upgrade_enabled": "PASS"},"status": "FAIL","attributes": [{"Value": "Significant","Comment": "AWS CloudEndure Disaster Recovery enables the replication and recovery of servers into AWS Cloud. In the event that a public-facing application or server is compromised, AWS CloudEndure can be used to provision an instance of the server from a previous point in time within minutes. As a result, this mapping is given a score of Significant.","Category": "Respond","AWSService": "AWS CloudEndure Disaster Recovery"},{"Value": "Partial","Comment": "The following AWS Config managed rules can identify configuration problems that should be fixed in order to ensure that applications intended for internal use cannot be accessed externally for exploitation: 'api-gw-endpoint-type-check' can ensure that Amazon API Gateway APIs are private and can only be accessed from within VPCs, 'elasticsearch-in-vpc-only' can ensure that Amazon ElasticSearch Service (Amazon ES) domains are in the same VPC and the domain endpoint is not public, 'lambda-function-public-access-prohibited' can verify that AWS Lambda functions are not publicly available, and 'ec2-instance-no-public-ip' can verify whether EC2 instances have public IP addresses. The following AWS Config managed rules can identify configuration problems that should be fixed in order to ensure that insecure applications are not installed and installed packages are kept updated, reducing the likelihood of adversary exploitation: the 'ec2-managedinstance-applications-blacklisted' managed rule verifies that a pre-defined list of applications are not installed on specified managed instances. It can be used to identify the presence of vulnerable applications (prompting removal before they can be exploited) and/or to identify the presence of allowed packages below a minimum version (prompting updates before they can be exploited). The 'ec2-managedinstance-platform-check' managed rule verifies that managed instances are running desired platform types, including using a desired version (as opposed to an out-of-date one). Both can reduce instances' attack surface for adversary exploitation. 'rds-automatic-minor-version-upgrade-enabled' can verify that Amazon RDS is being patched, and 'elastic-beanstalk-managed-updates-enabled' can verify that Elastic Beanstalk is being patched. Coverage factor is partial for these rules, since they are specific to a subset of the available AWS services that can be used to host public-facing applications and will only protect against certain forms of identifiable exploitation, resulting in an overall score of Partial.","Category": "Protect","AWSService": "AWS Config"},{"Value": "Partial","Comment": "AWS RDS supports the automatic patching of minor versions of database instances. This can result in security flaws in the database instances being fixed before they can be exploited. This mapping is given a score of Partial because it does not protect against misconfigured database instances which may be susceptible to exploitation.","Category": "Protect","AWSService": "AWS RDS"},{"Value": "Significant","Comment": "AWS RDS supports the replication and recovery of database instances. In the event that a database instance is compromised, AWS RDS can be used to restore the database instance to a previous point in time. As a result, this mapping is given a score of Significant.","Category": "Respond","AWSService": "AWS RDS"},{"Value": "Minimal","Comment": "There is a GuardDuty finding type that captures when vulnerable publicly facing resources are leveraged to capture data not intended to be viewable (e.g., IAM credentials associated with the resource). UnauthorizedAccess:EC2/MetadataDNSRebind - This finding type only detects MetadataDNSRebind and is more focused on the EC2 instance and not the application running on the instance itself resulting in Minimal coverage.","Category": "Detect","AWSService": "Amazon GuardDuty"},{"Value": "Partial","Comment": "AWS Security Hub reports on EC2 instances that are missing security patches for vulnerabilities which could enable an adversary to exploit vulnerabilities through the attack lifecycle. AWS Security Hub provides this detection with the following managed insight. EC2 instances that have missing security patches for important vulnerabilities. This is scored as Partial because the checks associated with Security Hub would only report on missing patches for known vulnerabilities. It doesn't not cover zero-day vulnerabilities.","Category": "Detect","AWSService": "AWS Security Hub"},{"Value": "Significant","Comment": "The AWS WAF protects public-facing applications against a range of vulnerabilities including those listed in the OWASP Top 10. AWS WAF provides this protection via the following rule sets that block malicious traffic across a variety of operating systems and applications. AWSManagedRulesCommonRuleSet AWSManagedRulesKnownBadInputRuleSet AWSManagedRulesSQLiRuleSet AWSManagedRulesLinuxRuleSet AWSManagedRulesUnixRuleSet AWSManagedRulesWindowsRuleSet AWSManagedRulesPHPRuleSet AWSManagedRulesWordPressRuleSet. This is given a score of Significant because it protects against vulnerabilities across multiple operating systems (Windows, Linux, POSIX) and technologies (JavaScript, SQL, PHP, WordPress). Furthermore, it blocks the malicious content in near real-time.","Category": "Protect","AWSService": "AWS Web Application Firewall"},{"Value": "Partial","Comment": "Amazon Inspector can detect known vulnerabilities on various Windows and Linux endpoints. Furthermore, the Amazon Inspector Best Practices assessment package can assess security controls for 'Enable Address Space Layout Randomization (ASLR)' and 'Enable Data Execution Prevention (DEP)' that makes it more difficult for an attacker to exploit vulnerabilities in software. This information can be used to patch, isolate, and remove vulnerable software and endpoints. Amazon Inspector does not directly protect against exploitation and it is not effective against zero-day attacks, vulnerabilities with no available patch, and software that may not be analyzed by the scanner. As a result, the score is capped at Partial.","Category": "Protect","AWSService": "Amazon Inspector"}],"description": "Adversaries may attempt to exploit a weakness in an Internet-facing host or system to initially access a network. The weakness in the system can be a software bug, a temporary glitch, or a misconfiguration.","checks_status": {"fail": 5,"pass": 5,"total": 11,"manual": 0}},"T1199": {"name": "Trusted Relationship","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Value": "Partial","Comment": "VPC network access control lists (NACLs) can isolate portions of the network that do not require network-wide access, limiting some attackers that leverage trusted relationships such as remote access for vendor maintenance. Coverage partial, Temporal Immediate.","Category": "Protect","AWSService": "Amazon Virtual Private Cloud"}],"description": "Adversaries may breach or otherwise leverage organizations who have access to intended victims. Access through trusted third party relationship abuses an existing connection that may not be protected or receives less scrutiny than standard mechanisms of gaining access to a network.","checks_status": {"fail": 3,"pass": 15,"total": 18,"manual": 0}},"T1201": {"name": "Password Policy Discovery","checks": {"iam_policy_allows_privilege_escalation": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Value": "Significant","Comment": "Ensure least privilege in IAM since password policies can be discovered in cloud environments using available APIs such as GetAccountPasswordPolicy in AWS.","Category": "Protect","AWSService": "AWS IAM"}],"description": "Adversaries may attempt to access detailed information about the password policy used within an enterprise network or cloud environment. Password policies are a way to enforce complex passwords that are difficult to guess or crack through Brute Force. This information may help the adversary to create a list of common passwords and launch dictionary and/or brute force attacks which adheres to the policy (e.g. if the minimum password length should be 8, then not trying passwords such as 'pass123'; not checking for more than 3-4 passwords per account if the lockout is set to 6 as to not lock out accounts).","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"T1204": {"name": "User Execution","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Value": "Minimal","Comment": "This control provides significant coverage for one of this technique's SubTechniques, resulting in an overall score of Minimal.","Category": "Detect","AWSService": "AWS Config"}],"description": "An adversary may rely upon specific actions by a user in order to gain execution. Users may be subjected to social engineering to get them to execute malicious code by, for example, opening a malicious document file or link. These user actions will typically be observed as follow-on behavior from forms of Phishing.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"T1213": {"name": "Data from Information Repositories","checks": {},"status": "PASS","attributes": [],"description": "Adversaries may leverage information repositories to mine valuable information. Information repositories are tools that allow for storage of information, typically to facilitate collaboration or information sharing between users, and can store a wide variety of data that may aid adversaries in further objectives, or direct access to the target information. Adversaries may also abuse external sharing features to share sensitive documents with recipients outside of the organization.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"T1485": {"name": "Data Destruction","checks": {"drs_job_exist": "FAIL","backup_plans_exist": "PASS","s3_bucket_object_lock": "FAIL","efs_have_backup_enabled": "FAIL","s3_bucket_no_mfa_delete": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"rds_instance_deletion_protection": "FAIL","config_recorder_all_regions_enabled": null,"s3_bucket_policy_public_write_access": "PASS","cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "FAIL","attributes": [{"Value": "Significant","Comment": "AWS CloudEndure Disaster Recovery enables the replication and recovery of servers into AWS Cloud. In the event that data on servers is destroyed, AWS CloudEndure can be used to provision an instance of the server from a previous point in time within minutes. As a result, this mapping is given a score of Significant.","Category": "Respond","AWSService": "AWS CloudEndure Disaster Recovery"},{"Value": "Partial","Comment": "The following AWS Config managed rules can identify configuration problems that should be fixed in order to prevent malicious write access to data within Amazon Simple Storage Service (S3) storage, which may include data destruction: 's3-bucket-blacklisted-actions-prohibited' checks whether bucket policies prohibit disallowed actions (including S3:DeleteObject) for principals from other AWS accounts, 's3-bucket-default-lock-enabled' checks whether a bucket that should be locked in write-once-read-many (WORM) mode is configured to prevent modification, and 's3-bucket-public-write-prohibited' checks whether a bucket is configured to allow public access and modification. All of these controls are run on configuration changes. The following AWS Config managed rules can identify configuration problems that should be fixed in order to ensure backups and redundancy are in place which can mitigate the effects of data destruction: 'aurora-mysql-backtracking-enabled' for data in Aurora MySQL; 'db-instance-backup-enabled' and 'rds-in-backup-plan' for Amazon Relational Database Service (RDS) data; 'dynamodb-in-backup-plan' and 'dynamodb-pitr-enabled' for Amazon DynamoDB table contents; 'ebs-in-backup-plan' for Elastic Block Store (EBS) volumes; 'efs-in-backup-plan' for Amazon Elastic File System (EFS) file systems; 'elasticache-redis-cluster-automatic-backup-check' for Amazon ElastiCache Redis cluster data; 'redshift-backup-enabled' and 'redshift-cluster-maintenancesettings-check' for Redshift; 's3-bucket-replication-enabled' and 's3-bucket-versioning-enabled' for S3 storage; and 'cloudfront-origin-failover-enabled' for CloudFront. The following AWS Config managed rules provide specific detections for configuration problems that should be fixed in order to prevent malicious deletion of specific data: 'elb-deletion-protection-enabled' for Elastic Block Store (EBS) volumes, and 'rds-cluster-deletion-protection-enabled' and 'rds-instance-deletion-protection-enabled' for RDS data. Coverage factor is partial for these rules, since they are specific to a subset of the available AWS services and will only protect certain types of data against destruction, resulting in an overall score of Partial.","Category": "Protect","AWSService": "AWS Config"},{"Value": "Partial","Comment": "AWS RDS generates events for database instances and includes the following events that may indicate that an adversary has destroyed the database instance. RDS-EVENT-0003: The DB instance has been deleted RDS-EVENT-0041: A DB snapshot has been deleted. This mapping is given a score of Partial because it can't differentiate between an authorized and unauthorized deletion.","Category": "Detect","AWSService": "AWS RDS"},{"Value": "Significant","Comment": "AWS RDS provides deletion protection which prevents any user from deleting a database instance. If applied, the setting may mitigate attempts to delete a database instance. As a result, this mapping is given a score of Significant.","Category": "Protect","AWSService": "AWS RDS"},{"Value": "Significant","Comment": "AWS RDS supports the replication and recovery of database instances. In the event that a database instance is deleted, AWS RDS can be used to restore the database instance to a previous point in time. As a result, this mapping is given a score of Significant.","Category": "Respond","AWSService": "AWS RDS"},{"Value": "Significant","Comment": "AWS S3 may protect against data destruction through application of several best practices. Multi-factor authentication can be enabled for delete operations and for changing the versioning state of a bucket. Versioning can be enabled to revert objects to a previous state after malicious destruction or corruption. S3 Object Lock can help prevent objects from being deleted or overwritten for a fixed amount of time or indefinitely. In addition, S3 Cross Region Replication can be used to replicate S3 buckets to another AWS region for add protection.","Category": "Protect","AWSService": "AWS S3"},{"Value": "Minimal","Comment": "AWS Security Hub performs a check from the AWS Foundations CIS Benchmark that, if implemented, would help towards detecting the scheduled destruction of Customer Master Keys (CMKs) which are critical for being able to decrypt data. AWS Security Hub provides this detection with the following check. Ensure a log metric filter and alarm exist for disabling or scheduled deletion of customer created CMKs. This is scored as Minimal because CMKs only represent one type of data that could be destroyed by an adversary.","Category": "Detect","AWSService": "AWS Security Hub"},{"Value": "Partial","Comment": "The following GuardDuty finding type flags events where adversaries may destroy data and files on specific systems or in large numbers on a network to interrupt availability to systems, services, and network resources. Impact:S3/MaliciousIPCaller, Impact:IAMUser/AnomalousBehavior Stealth:S3/ServerAccessLoggingDisabled UnauthorizedAccess:S3/MaliciousIPCaller.Custom UnauthorizedAccess:S3/TorIPCaller PenTest:S3/PentooLinux PenTest:S3/ParrotLinux PenTest:S3/KaliLinux.","Category": "Detect","AWSService": "Amazon GuardDuty"}],"description": "Adversaries may destroy data and files on specific systems or in large numbers on a network to interrupt availability to systems, services, and network resources. Data destruction is likely to render stored data irrecoverable by forensic techniques through overwriting files or data on local and remote drives.[1][2][3][4][5][6] Common operating system file deletion commands such as del and rm often only remove pointers to files without wiping the contents of the files themselves, making the files recoverable by proper forensic methodology. This behavior is distinct from Disk Content Wipe and Disk Structure Wipe because individual files are destroyed rather than sections of a storage disk or the disk's logical structure.","checks_status": {"fail": 6,"pass": 3,"total": 12,"manual": 0}},"T1486": {"name": "Data Encrypted for Impact","checks": {"drs_job_exist": "FAIL","backup_plans_exist": "PASS","s3_bucket_object_lock": "FAIL","efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"config_recorder_all_regions_enabled": null,"s3_bucket_policy_public_write_access": "PASS"},"status": "FAIL","attributes": [{"Value": "Significant","Comment": "AWS CloudEndure Disaster Recovery enables the replication and recovery of servers into AWS Cloud. In the event that data on servers is destroyed, AWS CloudEndure can be used to provision an instance of the server from a previous point in time within minutes. As a result, this mapping is given a score of Significant.","Category": "Respond","AWSService": "AWS CloudEndure Disaster Recovery"},{"Value": "Partial","Comment": "The following AWS Config managed rules can identify configuration problems that should be fixed in order to prevent malicious changes to data encryption within Amazon Simple Storage Service (S3) storage: 's3-bucket-blacklisted-actions-prohibited' checks whether bucket policies prohibit disallowed actions (including encryption configuration changes) for principals from other AWS accounts, 's3-bucket-default-lock-enabled' checks whether a bucket that should be locked in write-once-read-many (WORM) mode is configured to prevent modification, and 's3-bucket-public-write-prohibited' checks whether a bucket is configured to allow public access and modification. All of these controls are run on configuration changes. The following AWS Config managed rules can identify configuration problems that should be fixed in order to ensure backups and redundancy are in place which can mitigate the effects of malicious changes to data encryption: 'aurora-mysql-backtracking-enabled' for data in Aurora MySQL; 'db-instance-backup-enabled' and 'rds-in-backup-plan' for Amazon Relational Database Service (RDS) data; 'dynamodb-in-backup-plan' and 'dynamodb-pitr-enabled' for Amazon DynamoDB table contents; 'ebs-in-backup-plan' for Elastic Block Store (EBS) volumes; 'efs-in-backup-plan' for Amazon Elastic File System (EFS) file systems; 'elasticache-redis-cluster-automatic-backup-check' for Amazon ElastiCache Redis cluster data; 'redshift-backup-enabled' and 'redshift-cluster-maintenancesettings-check' for Redshift; 's3-bucket-replication-enabled' and 's3-bucket-versioning-enabled' for S3 storage; and 'cloudfront-origin-failover-enabled' for CloudFront. Coverage factor is partial for these rules, since they are specific to a subset of the available AWS services and will only protect certain types of data against malicious encryption changes, resulting in an overall score of Partial.","Category": "Protect","AWSService": "AWS Config"},{"Value": "Significant","Comment": "AWS RDS supports the replication and recovery of database instances. In the event that a database instance is encrypted by an adversary (e.g., ransomware), AWS RDS can be used to restore the database instance to a previous point in time. As a result, this mapping is given a score of Significant.","Category": "Respond","AWSService": "AWS RDS"},{"Value": "Partial","Comment": "The following GuardDuty finding type flags events where adversaries may encrypt data on target systems or on large numbers of systems in a network to interrupt availability to system and network resources. Impact:S3/MaliciousIPCaller Stealth:S3/ServerAccessLoggingDisabled UnauthorizedAccess:S3/MaliciousIPCaller.Custom UnauthorizedAccess:S3/TorIPCaller PenTest:S3/PentooLinux PenTest:S3/ParrotLinux PenTest:S3/KaliLinux","Category": "Detect","AWSService": "Amazon GuardDuty"}],"description": "Adversaries may encrypt data on target systems or on large numbers of systems in a network to interrupt availability to system and network resources. They can attempt to render stored data inaccessible by encrypting files or data on local and remote drives and withholding access to a decryption key. This may be done in order to extract monetary compensation from a victim in exchange for decryption or a decryption key (ransomware) or to render data permanently inaccessible in cases where the key is not saved or transmitted.","checks_status": {"fail": 4,"pass": 3,"total": 9,"manual": 0}},"T1490": {"name": "Inhibit System Recovery","checks": {"drs_job_exist": "FAIL","rds_instance_backup_enabled": "PASS"},"status": "FAIL","attributes": [{"Value": "Significant","Comment": "AWS CloudEndure Disaster Recovery enables the replication and recovery of servers into AWS Cloud. In the event that servers are defaced, AWS CloudEndure can be used to provision an instance of the server from a previous point in time within minutes. This mapping is given a score of Significant because it supports all of the sub-techniques (2 of 2).","Category": "Respond","AWSService": "AWS CloudEndure Disaster Recovery"},{"Value": "Partial","Comment": "AWS RDS generates events for database instances and includes the following event that may indicate that an adversary has attempted to inhibit system recovery. RDS-EVENT-0028: Automatic backups for this DB instance have been disabled. This mapping is given a score of Partial because it can't differentiate between an authorized and unauthorized disabling of automatic backups.","Category": "Detect","AWSService": "AWS RDS"},{"Value": "Significant","Comment": "AWS RDS supports the replication and recovery of database instances. In the event that a database instance is compromised and modified to disrupt recovery, AWS RDS can be used to restore the database instance to a previous point in time. As a result, this mapping is given a score of Significant.","Category": "Respond","AWSService": "AWS RDS"}],"description": "Adversaries may delete or remove built-in data and turn off services designed to aid in the recovery of a corrupted system to prevent recovery.[1][2] This may deny access to available backups and recovery options.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"T1491": {"name": "Defacement","checks": {"drs_job_exist": "FAIL","config_recorder_all_regions_enabled": null},"status": "FAIL","attributes": [{"Value": "Significant","Comment": "AWS CloudEndure Disaster Recovery enables the replication and recovery of servers into AWS Cloud. In the event that servers are defaced, AWS CloudEndure can be used to provision an instance of the server from a previous point in time within minutes. This mapping is given a score of Significant because it supports all of the sub-techniques (2 of 2).","Category": "Respond","AWSService": "AWS CloudEndure Disaster Recovery"},{"Value": "Significant","Comment": "This control provides significant coverage for all of this technique's sub-techniques, resulting in an overall score of Significant.","Category": "Protect","AWSService": "AWS Config"},{"Value": "Partial","Comment": "GuardDuty provides multiple finding types that flag malicious activity against resources. These findings focus on API calls that look suspicious and although they do not flag events such as Defacement specifically, it can be inferred that these findings can result in mitigating this technique's negative impact. With this assumption the score is capped at Partial.","Category": "Detect","AWSService": "Amazon GuardDuty"}],"description": "Adversaries may modify visual content available internally or externally to an enterprise network, thus affecting the integrity of the original content. Reasons for Defacement include delivering messaging, intimidation, or claiming (possibly false) credit for an intrusion. Disturbing or offensive images may be used as a part of Defacement in order to cause user discomfort, or to pressure compliance with accompanying messages.","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"T1496": {"name": "Resource Hijacking","checks": {"guardduty_is_enabled": "PASS","config_recorder_all_regions_enabled": null,"cloudwatch_log_metric_filter_root_usage": null,"rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Value": "Partial","Comment": "AWS CloudWatch provides various metrics including CPU utilization, connections, disk space, memory, bytes sent/received, and the number of running containers among others. The following metrics (not an exhaustive list) could be used to detect if the usage of a resource has increased such as when an adversary hijacks a resource to perform intensive tasks. Linux/Mac OS ------------- cpu_time_active cpu_time_guest cpu_usage_active cpu_usage_guest disk_free disk_total disk_used ethtool_bw_in_allowance_exceeded ethtool_bw_out_allowance_exceeded ethtool_conntrack_allowance_exceeded mem_active mem_available_percent mem_free net_bytes_recv net_bytes_sent net_packets_sent net_packets_recv netstat_tcp_established netstat_tcp_listen processes_running processes_total swap_free swap_used. Containers ---------- CpuUtilized MemoryUtilized NetworkRxBytes NetworkTxBytes node_cpu_usage_total node_cpu_utilization node_filesystem_utilization node_memory_utilization. This mapping is given a score of Partial because it is not possible to differentiate between an authorized and unauthorized increase in resource utilization.","Category": "Detect","AWSService": "AWS CloudWatch"},{"Value": "Partial","Comment": "The following AWS Config managed rules can identify configuration problems that should be fixed in order to ensure alarms exist for spikes in resource utilization, which help to identify malicious use of resources within a cloud environment: 'cloudwatch-alarm-action-check', 'cloudwatch-alarm-resource-check', 'cloudwatch-alarm-settings-check', 'desired-instance-tenancy', 'desired-instance-type', 'dynamodb-autoscaling-enabled', 'dynamodb-throughput-limit-check', 'ec2-instance-detailed-monitoring-enabled', and 'rds-enhanced-monitoring-enabled'. Coverage factor is partial for these rules, since they are specific to a subset of the available AWS services and will only detect resource hijacking that results in a change in utilization that is significant enough to trigger alarms, resulting in an overall score of Partial.","Category": "Detect","AWSService": "AWS Config"},{"Value": "Partial","Comment": "The following AWS IoT Device Defender device-side detection metrics can detect indicators that an adversary may be leveraging compromised AWS IoT devices' resources to perform resource-intensive operations like mining cryptocurrency or performing denial of service attacks on other environments: 'Destination IPs' ('aws:destination-ip-addresses') outside of expected IP address ranges may suggest that a device is communicating with unexpected parties. 'Bytes in' ('aws:all-bytes-in'), 'Bytes out' ('aws:all-bytes-out'), 'Packets in' ('aws:all-packets-in'), and 'Packets out' ('aws:all-packets-out') values outside of expected norms may indicate that the device is sending and/or receiving non-standard traffic, which may include traffic related to resource hijacking activities. 'Listening TCP ports' ('aws:listening-tcp-ports'), 'Listening TCP port count' ('aws:num-listening-tcp-ports'), 'Established TCP connections count' ('aws:num-established-tcp-connections'), 'Listening UDP ports' ('aws:listening-udp-ports'), and 'Listening UDP port count' ('aws:num-listening-udp-ports') values outside of expected norms may indicate that devices are communicating via unexpected ports/protocols which may include traffic related to resource hijacking activities. Coverage factor is partial, since these metrics are limited to IoT device hijacking, resulting in an overall score of Partial.","Category": "Detect","AWSService": "AWS IoT Device Defender"},{"Value": "Partial","Comment": "The following GuardDuty finding types flag events where adversaries may leverage the resources of co-opted systems in order to solve resource intensive problems which may impact system and/or hosted service availability. CryptoCurrency:EC2/BitcoinTool.B CryptoCurrency:EC2/BitcoinTool.B!DNS Impact:EC2/BitcoinDomainRequest.Reputation UnauthorizedAccess:EC2/TorRelay","Category": "Detect","AWSService": "Amazon GuardDuty"}],"description": "Adversaries may leverage the resources of co-opted systems in order to solve resource intensive problems, which may impact system and/or hosted service availability.","checks_status": {"fail": 1,"pass": 1,"total": 18,"manual": 0}},"T1498": {"name": "Network Denial of Service","checks": {"guardduty_is_enabled": "PASS","networkfirewall_in_all_vpc": "FAIL","config_recorder_all_regions_enabled": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","shield_advanced_protection_in_global_accelerators": null,"shield_advanced_protection_in_route53_hosted_zones": null,"shield_advanced_protection_in_associated_elastic_ips": null,"shield_advanced_protection_in_classic_load_balancers": null,"shield_advanced_protection_in_cloudfront_distributions": null,"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","shield_advanced_protection_in_internet_facing_load_balancers": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Value": "Minimal","Comment": "This control provides minimal coverage for this technique's sub-techniques as well as its procedures, resulting in an overall score of Minimal.","Category": "Protect","AWSService": "AWS Config"},{"Value": "Minimal","Comment": "AWS Network Firewall has the ability to pass, drop, or alert on traffic based on the network protocol as well as perform deep packet inspection on the payload. This functionality can be used to block the sources of smaller-scale network denial of service attacks. While AWS Network Firewall supports both all sub-techniques (2 of 2), this mapping is given a score of Minimal because often times it is necessary to block the traffic at an Internet Service Provider or Content Provider Network level.","Category": "Protect","AWSService": "AWS Network Firewall"},{"Value": "Significant","Comment": "AWS Shield is a service that protects against Distributed Denial of Service attacks. There are two tiers for this service Standard and Advanced. AWS Shield Standard defends against most common, frequently occurring network and transport (Layer 3 and 4 attacks) layer DDoS attacks that target your web site or applications. AWS Shield Advanced adds on to standard by providing additional detection and mitigation against large and sophisticated DDoS attacks. There is near real-time visibility into attacks. AWS Shield Advanced also comes with 24x7 access to the AWS DDoS Response Team (DRT).","Category": "Respond","AWSService": "AWS Shield"},{"Value": "Partial","Comment": "The following finding types in GuardDuty flag events where adversaries may perform Network Denial of Service (DoS) attacks to degrade or block the availability of targeted resources to users. Backdoor:EC2/DenialOfService.UdpOnTcpPorts Backdoor:EC2/DenialOfService.UnusualProtocol Backdoor:EC2/DenialOfService.Udp Backdoor:EC2/DenialOfService.Tcp Backdoor:EC2/DenialOfService.Dns","Category": "Detect","AWSService": "Amazon GuardDuty"},{"Value": "Minimal","Comment": "VPC security groups and network access control lists (NACLs) can be used to restrict access to endpoints but will prove effective at mitigating only low-end DOS attacks resulting in a Minimal score.","Category": "Protect","AWSService": "Amazon Virtual Private Cloud"}],"description": "Adversaries may perform Network Denial of Service (DoS) attacks to degrade or block the availability of targeted resources to users. Network DoS can be performed by exhausting the network bandwidth services rely on. Example resources include specific websites, email services, DNS, and web-based applications. Adversaries have been observed conducting network DoS attacks for political purposes[1] and to support other malicious activities, including distraction[2], hacktivism, and extortion.","checks_status": {"fail": 4,"pass": 16,"total": 27,"manual": 0}},"T1499": {"name": "Endpoint Denial of Service","checks": {"networkfirewall_in_all_vpc": "FAIL","config_recorder_all_regions_enabled": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","shield_advanced_protection_in_global_accelerators": null,"shield_advanced_protection_in_route53_hosted_zones": null,"shield_advanced_protection_in_associated_elastic_ips": null,"shield_advanced_protection_in_classic_load_balancers": null,"shield_advanced_protection_in_cloudfront_distributions": null,"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","shield_advanced_protection_in_internet_facing_load_balancers": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Value": "Significant","Comment": "AWS Shield is a service that protects against Distributed Denial of Service attacks. There are two tiers for this service Standard and Advanced. AWS Shield Standard defends against most common, frequently occurring network and transport (Layer 3 and 4 attacks) layer DDoS attacks that target your web site or applications. AWS Shield Advanced adds on to standard by providing additional detection and mitigation against large and sophisticated DDoS attacks. There is near real-time visibility into attacks. AWS Shield Advanced also comes with 24x7 access to the AWS DDoS Response Team (DRT).","Category": "Respond","AWSService": "AWS Shield"},{"Value": "Minimal","Comment": "This control provides minimal coverage for this technique's sub-techniques as well as its procedures, resulting in an overall score of Minimal.","Category": "Protect","AWSService": "AWS Config"},{"Value": "Partial","Comment": "AWS Network Firewall has the ability to pass, drop, or alert on traffic based on the network protocol as well as perform deep packet inspection on the payload. This functionality can be used to block adversaries from carrying out denial of service attacks by implementing restrictions on which IP addresses and domains can access the resources (e.g., allow lists) as well as which protocol traffic is permitted. That is, the AWS Network Firewall could block the source of the denial of service attack. This mapping is given a score of Partial because it only supports a subset of the sub-techniques (3 of 4) and because the source of the attack would have to be known before rules could be put in place to protect against it.","Category": "Protect","AWSService": "AWS Network Firewall"},{"Value": "Minimal","Comment": "VPC security groups and network access control lists (NACLs) provides minimal protection for a majority of this control's sub-techniques and procedure examples resulting in an overall score of Minimal.","Category": "Protect","AWSService": "Amazon Virtual Private Cloud"}],"description": "Adversaries may perform Endpoint Denial of Service (DoS) attacks to degrade or block the availability of services to users. Endpoint DoS can be performed by exhausting the system resources those services are hosted on or exploiting the system to cause a persistent crash condition. Example services include websites, email services, DNS, and web-based applications. Adversaries have been observed conducting DoS attacks for political purposes[1] and to support other malicious activities, including distraction[2], hacktivism, and extortion.","checks_status": {"fail": 4,"pass": 15,"total": 26,"manual": 0}},"T1518": {"name": "Software Discovery","checks": {},"status": "PASS","attributes": [],"description": "Adversaries may attempt to get a listing of software and software versions that are installed on a system or in a cloud environment. Adversaries may use the information from Software Discovery during automated discovery to shape follow-on behaviors, including whether or not the adversary fully infects the target and/or attempts specific actions.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"T1525": {"name": "Implant Internal Image","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Value": "Minimal","Comment": "The following AWS Config managed rules can identify running instances that are not using AMIs within a specified allow list: 'approved-amis-by-id' and 'approved-amis-by-tag', both of which are run on configuration changes. This does not provide detection of the image implanting itself, but does provide detection for any subsequent use of images that are implanted and not present within the allow list, resulting in a score of Minimal.","Category": "Detect","AWSService": "AWS Config"}],"description": "Adversaries may implant cloud or container images with malicious code to establish persistence after gaining access to an environment. Amazon Web Services (AWS) Amazon Machine Images (AMIs), Google Cloud Platform (GCP) Images, and Azure Images as well as popular container runtimes such as Docker can be implanted or backdoored. Unlike Upload Malware, this technique focuses on adversaries implanting an image in a registry within a victimโ€™s environment. Depending on how the infrastructure is provisioned, this could provide persistent access if the infrastructure provisioning tool is instructed to always use the latest image.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"T1526": {"name": "Cloud Service Discovery","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Value": "Partial","Comment": "GuardDuty has the following finding types to flag events where there is an attempt to discover information about resources on the account. Recon:IAMUser/MaliciousIPCaller Recon:IAMUser/MaliciousIPCaller.Custom Recon:IAMUser/TorIPCaller","Category": "Detect","AWSService": "Amazon GuardDuty"}],"description": "An adversary may attempt to enumerate the cloud services running on a system after gaining access. These methods can differ from platform-as-a-service (PaaS), to infrastructure-as-a-service (IaaS), or software-as-a-service (SaaS). Many services exist throughout the various cloud providers and can include Continuous Integration and Continuous Delivery (CI/CD), Lambda Functions, Azure AD, etc. They may also include security services, such as AWS GuardDuty and Microsoft Defender for Cloud, and logging services, such as AWS CloudTrail and Google Cloud Audit Logs.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"T1530": {"name": "Data from Cloud Storage","checks": {"macie_is_enabled": "PASS","securityhub_enabled": "PASS","s3_bucket_public_access": null,"networkfirewall_in_all_vpc": "FAIL","efs_not_publicly_accessible": "FAIL","rds_snapshots_public_access": "PASS","s3_bucket_default_encryption": "PASS","rds_instance_no_public_access": "PASS","efs_encryption_at_rest_enabled": "FAIL","emr_cluster_publicly_accesible": null,"rds_instance_storage_encrypted": "FAIL","redshift_cluster_public_access": null,"rds_instance_transport_encrypted": "FAIL","config_recorder_all_regions_enabled": null,"s3_bucket_level_public_access_block": "PASS","s3_account_level_public_access_blocks": null,"sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"sagemaker_notebook_instance_encryption_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null,"sagemaker_training_jobs_intercontainer_encryption_enabled": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Value": "Significant","Comment": "The following AWS Config managed rules can identify configuration problems that should be fixed in order to prevent malicious access of data within Amazon Simple Storage Service (S3) storage: 's3-account-level-public-access-blocks', 's3-bucket-level-public-access-prohibited', 's3-bucket-public-read-prohibited', 's3-bucket-policy-not-more-permissive', 'cloudfront-origin-access-identity-enabled', and 'cloudfront-default-root-object-configured' identify objects that are publicly available or subject to overly permissive access policies; 's3-bucket-blacklisted-actions-prohibited' checks whether bucket policies prohibit disallowed actions for principals from other AWS accounts; and 's3-bucket-policy-grantee-check' checks whether bucket policies appropriately control which AWS principals, federated users, service principals, IP addresses, and VPCs have access. All of these controls are run on configuration changes. The following AWS Config managed rules can identify configuration problems that should be fixed in order to prevent malicious access of data from other AWS services: 'dms-replication-not-public' for AWS Database Migration Service; 'emr-master-no-public-ip' for Amazon Elastic MapReduce (EMR); 'rds-cluster-iam-authentication-enabled', 'rds-instance-iam-authentication-enabled', 'rds-instance-public-access-check' and 'rds-snapshots-public-prohibited' for Amazon Relational Database Service; 'redshift-cluster-public-access-check' for Amazon Redshift; and 'sagemaker-notebook-no-direct-internet-access' for SageMaker. The following AWS Config managed rules can identify configuration problems that should be fixed in order to ensure that cloud storage data are encrypted to prevent malicious access: 'dax-encryption-enabled', 'dynamodb-table-encrypted-kms', and 'dynamodb-table-encryption-enabled' for Amazon DynamoDB table contents; 'efs-encrypted-check' for Amazon Elastic File System (EFS) file systems; 'elasticsearch-encrypted-at-rest' for Elasticsearch Service (ES) domains; 'rds-snapshot-encrypted' and 'rds-storage-encrypted' for Amazon Relational Database Service; 's3-bucket-server-side-encryption-enabled' and 's3-default-encryption-kms' for S3 storage; 'sns-encrypted-kms' for Amazon Simple Notification Service (SNS); 'redshift-cluster-configuration-check' and 'redshift-cluster-kms-enabled' for Redshift clusters; 'sagemaker-endpoint-configuration-kms-key-configured' and 'sagemaker-notebook-instance-kms-key-configured' for SageMaker. These rules provide a wide range of coverage for many AWS services, especially those most significant to procedures for this technique, resulting in an overall score of Significant.","Category": "Protect","AWSService": "AWS Config"},{"Value": "Partial","Comment": "The following AWS IoT Device Defender cloud-side detection metrics can detect indicators that an adversary may be leveraging compromised AWS IoT devices and the Message Queuing Telemetry Transport (MQTT) protocol for unauthorized data transfer from cloud-side data sources: 'Source IP' ('aws:source-ip-address') values outside of expected IP address ranges may suggest that a device has been stolen. 'Messages sent' ('aws:num-messages-sent'), 'Messages received' ('aws:num-messages-received'), and 'Message size' ('aws:message-byte-size') values outside of expected norms may indicate that devices are sending and/or receiving non-standard traffic, which may include data retrieved from cloud storage. The following AWS IoT Device Defender device-side detection metrics can detect indicators that an adversary may be leveraging compromised AWS IoT devices and the Message Queuing Telemetry Transport (MQTT) protocol for unauthorized data transfer from cloud-side data sources: 'Bytes in' ('aws:all-bytes-in'), 'Bytes out' ('aws:all-bytes-out'), 'Packets in' ('aws:all-packets-in'), and 'Packets out' ('aws:all-packets-out') values outside of expected norms may indicate that devices are sending and/or receiving non-standard traffic, which may include data retrieved from cloud storage. Coverage factor is partial, since these metrics are limited to IoT device-based collection, resulting in an overall score of Partial.","Category": "Detect","AWSService": "AWS IoT Device Defender"},{"Value": "Partial","Comment": "AWS Network Firewall has the ability to pass, drop, or alert on traffic based on the network protocol as well as perform deep packet inspection on the payload. This functionality can be used to block adversaries from accessing resources such as cloud storage objects by implementing restrictions on which IP addresses and domains can access the resources (e.g., allow lists). However, since cloud storage objects are located outside the virtual private cloud where the AWS Network Firewall protects, the mapping is only given a score of Partial.","Category": "Protect","AWSService": "AWS Network Firewall"},{"Value": "Significant","Comment": "AWS RDS supports the encryption of the underlying storage for database instances, backups, read replicas, and snapshots using the AES-256 encryption algorithm. This can protect against an adversary from gaining access to a database instance in the event they get access to the underlying system where the database instance is hosted or to S3 where the backups are stored. Furthermore, with AWS RDS, there is a setting that specifies whether or not a database instances is publicly accessible. When public accessibility is turned off, the database instance will not be available outside the VPC in which it was created. As a result, this mapping is given a score of Significant.","Category": "Protect","AWSService": "AWS RDS"},{"Value": "Significant","Comment": "S3 provides full control of access via Identity and Access Management (IAM) policies and with its access control lists (ACLs). The S3 Block Public Access feature allows for policies limiting public access to Amazon S3 resources that are enforced regardless of how the resources are created or associated IAM policies. Server-side encryption can be enabled for data at rest and allows for use of S3-managed keys, AWS Key Management Service managed keys, or customer-provided keys.","Category": "Protect","AWSService": "AWS S3"},{"Value": "Partial","Comment": "AWS Security Hub detects improperly secured data from S3 buckets such as public read and write access that may result in an adversary getting access to data in cloud storage. AWS Security Hub provides this detection with the following managed insight. S3 buckets with public write or read permissions. AWS Security Hub also performs checks from the AWS Foundations CIS Benchmark that, if implemented, would help towards detecting improperly secured S3 buckets which could result in them being discovered. AWS Security Hub provides this detection with the following check. 3.8 Ensure a log metric filter and alarm exist for S3 bucket policy changes. This is scored as Partial because it only detects when S3 buckets have public read or write access and doesn't detect improperly secured data in other storage types (e.g., DBs, NFS, etc.).","Category": "Detect","AWSService": "AWS Security Hub"},{"Value": "Partial","Comment": "The following GuardDuty finding types flag events where adversaries may have access data objects from improperly secured cloud storage. UnauthorizedAccess:S3/MaliciousIPCaller.Custom UnauthorizedAccess:S3/TorIPCaller Impact:S3/MaliciousIPCaller Exfiltration:S3/MaliciousIPCaller Exfiltration:S3/ObjectRead.Unusual PenTest:S3/KaliLinux PenTest:S3/ParrotLinux PenTest:S3/PentooLinux UnauthorizedAccess:S3/MaliciousIPCaller.Custom UnauthorizedAccess:S3/TorIPCaller.","Category": "Detect","AWSService": "Amazon GuardDuty"},{"Value": "Minimal","Comment": "The following Macie findings can detect the collection of data from S3 buckets: Policy:IAMUser/S3BlockPublicAccessDisabled Policy:IAMUser/S3BucketEncryptionDisabled Policy:IAMUser/S3BucketPublic Policy:IAMUser/S3BucketReplicatedExternally Policy:IAMUser/S3BucketSharedExternally. This type of detection is limited to only the S3 storage type and not other storage types available on the platform (such as file or block storage) and therefore has Minimal coverage resulting in a Minimal score.","Category": "Detect","AWSService": "Amazon Macie"},{"Value": "Minimal","Comment": "The following Macie findings can protect against collection of sensitive data from S3 buckets: SensitiveData:S3Object/Credentials SensitiveData:S3Object/CustomIdentifier SensitiveData:S3Object/Financial SensitiveData:S3Object/Multiple SensitiveData:S3Object/Personal. The ability to discover this type of sensitive data stored in a bucket may lead to hardening steps or removing the data altogether which would prevent an adversary from being able to collect the data. This type of protection is limited to only the S3 storage type and not other storage types available on the platform (such as file or block storage) and therefore has Minimal coverage resulting in a Minimal score.","Category": "Protect","AWSService": "Amazon Macie"}],"description": "Adversaries may collect sensitive data from these cloud storage solutions. Providers typically offer security guides to help end users configure systems, though misconfigurations are a common problem.[5][6][7] There have been numerous incidents where cloud storage has been improperly secured, typically by unintentionally allowing public access to unauthenticated users, overly-broad access by all users, or even access for any anonymous person outside the control of the Identity Access Management system without even needing basic user permissions.","checks_status": {"fail": 6,"pass": 6,"total": 22,"manual": 0}},"T1535": {"name": "Unused/Unsupported Cloud Regions","checks": {"organizations_scp_check_deny_regions": null},"status": "PASS","attributes": [],"description": "Adversaries may create cloud instances in unused geographic service regions in order to evade detection. Access is usually obtained through compromising accounts used to manage cloud infrastructure.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"T1537": {"name": "Transfer Data to Cloud Account","checks": {"macie_is_enabled": "PASS"},"status": "PASS","attributes": [{"Value": "Minimal","Comment": "The following Macie findings can detect attempts to replicate data objects from a monitored bucket to an Amazon Web Services account that isn't part of your organization: Policy:IAMUser/S3BucketReplicatedExternally Policy:IAMUser/S3BucketSharedExternally. This type of detection is limited to only the S3 storage type and not other storage types available on the platform (such as file or block storage) and therefore has Minimal coverage resulting in a Minimal score.","Category": "Detect","AWSService": "Amazon Macie"}],"description": "Adversaries may exfiltrate data by transferring the data, including backups of cloud environments, to another cloud account they control on the same service to avoid typical file transfers/downloads and network-based exfiltration detection.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"T1538": {"name": "Cloud Service Dashboard","checks": {"iam_user_mfa_enabled_console_access": null,"organizations_account_part_of_organizations": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Value": "Partial","Comment": "This control may protect against cloud service dashboard abuse by segmenting accounts into separate organizational units and restricting dashboard access by least privilege.","Category": "Protect","AWSService": "AWS Organizations"},{"Value": "Significant","Comment": "The 'mfa-enabled-for-iam-console-access' managed rule checks whether multi-factor authentication is enabled for all AWS IAM users that use a console password, protecting against misuse of those accounts' dashboard access. It is run periodically, and provides significant coverage, resulting in an overall score of Significant.","Category": "Protect","AWSService": "AWS Config"}],"description": "An adversary may use a cloud service dashboard GUI with stolen credentials to gain useful information from an operational cloud environment, such as specific services, resources, and features. For example, the GCP Command Center can be used to view all assets, findings of potential security risks, and to run additional queries, such as finding public IP addresses and open ports.","checks_status": {"fail": 0,"pass": 0,"total": 5,"manual": 0}},"T1546": {"name": "Event Triggered Execution","checks": {},"status": "PASS","attributes": [],"description": "Adversaries may establish persistence and/or elevate privileges using system mechanisms that trigger execution based on specific events. Various operating systems have means to monitor and subscribe to events such as logons or other user activity such as running specific applications/binaries. Cloud environments may also support various functions and services that monitor and can be invoked in response to specific cloud events.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"T1550": {"name": "Use Alternate Authentication Material","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_hardware_mfa_enabled": null,"iam_user_console_access_unused": null,"iam_user_two_active_access_key": null,"iam_policy_no_full_access_to_kms": null,"iam_administrator_access_with_mfa": null,"iam_user_mfa_enabled_console_access": null,"iam_user_no_setup_initial_access_key": null,"iam_policy_allows_privilege_escalation": null,"iam_policy_no_full_access_to_cloudtrail": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Value": "Minimal","Comment": "This control provides significant coverage for one of this technique's SubTechniques, resulting in an overall score of Minimal.","Category": "Protect","AWSService": "AWS IAM"}],"description": "Adversaries may use alternate authentication material, such as password hashes, Kerberos tickets, and application access tokens, in order to move laterally within an environment and bypass normal system access controls.","checks_status": {"fail": 0,"pass": 0,"total": 17,"manual": 0}},"T1552": {"name": "Unsecured Credentials","checks": {"macie_is_enabled": "PASS","guardduty_is_enabled": "PASS","ssm_document_secrets": "PASS","ec2_instance_imdsv2_enabled": "PASS","ec2_instance_secrets_user_data": "PASS","ec2_launch_template_no_secrets": "PASS","awslambda_function_no_secrets_in_code": "PASS","cloudwatch_log_group_no_secrets_in_logs": "FAIL","cloudformation_stack_outputs_find_secrets": "PASS","secretsmanager_automatic_rotation_enabled": "FAIL","awslambda_function_no_secrets_in_variables": "PASS","ecs_task_definitions_no_environment_secrets": "PASS","autoscaling_find_secrets_ec2_launch_configuration": "PASS","eks_cluster_kms_cmk_encryption_in_secrets_enabled": null},"status": "FAIL","attributes": [{"Value": "Minimal","Comment": "This control's protection is specific to a minority of this technique's sub-techniques and procedure examples resulting in a Minimal Coverage score and consequently an overall score of Minimal.","Category": "Protect","AWSService": "AWS CloudHSM"},{"Value": "Significant","Comment": "The following AWS Config managed rules can identify insecure plaintext credentials within specific parts of a cloud environment: 'codebuild-project-envvar-awscred-check' for credentials (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY) stored within environment variables, 'codebuild-project-source-repo-url-check' for personal access tokens and/or credentials within source repository URLs. The following AWS Config managed rules can identify configuration problems that should be fixed in order to ensure that the contents of secrets in AWS Secrets Manager (including credentials) are properly secured to avoid adversary access: 'secretsmanager-rotation-enabled-check', 'secretsmanager-scheduled-rotation-success-check', 'secretsmanager-secret-periodic-rotation', and 'secretsmanager-using-cmk'. This control provides partial coverage for a minority of this technique's sub-techniques, in addition to the parent coverage above, resulting in an overall score of Partial.","Category": "Protect","AWSService": "AWS Config"},{"Value": "Minimal","Comment": "This control provides partial coverage for a minority of this technique's sub-techniques, resulting in an overall score of Minimal.","Category": "Detect","AWSService": "AWS IoT Device Defender"},{"Value": "Minimal","Comment": "This control's protection is specific to a minority of this technique's sub-techniques and procedure examples resulting in a Minimal Coverage score and consequently an overall score of Minimal.","Category": "Protect","AWSService": "AWS Key Management Service"},{"Value": "Partial","Comment": "This control is relevant for credentials stored in applications or configuration files but not credentials entered directly by a user.","Category": "Protect","AWSService": "AWS Secrets Manager"},{"Value": "Minimal","Comment": "This control provides minimal to partial coverage for a minority of this technique's sub-techniques, and without specific coverage for its procedures, resulting in an overall score of Minimal.","Category": "Detect","AWSService": "Amazon GuardDuty"},{"Value": "Minimal","Comment": "Macie only provides detection for the Credentials in Files sub-technique of this technique and only for the S3 storage type resulting in Minimal coverage and an overall Minimal score.","Category": "Protect","AWSService": "Amazon Macie"}],"description": "Adversaries may search compromised systems to find and obtain insecurely stored credentials. These credentials can be stored and/or misplaced in many locations on a system, including plaintext files (e.g. Bash History), operating system or application-specific repositories (e.g. Credentials in Registry), or other specialized files/artifacts (e.g. Private Keys).","checks_status": {"fail": 2,"pass": 11,"total": 14,"manual": 0}},"T1556": {"name": "Modify Authentication Process","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Value": "Partial","Comment": "This control provides coverage for one of this technique's SubTechniques, resulting in an overall score of Partial. Enforce MFA in IAM Users.","Category": "Protect","AWSService": "AWS IAM"}],"description": "Adversaries may modify authentication mechanisms and processes to access user credentials or enable otherwise unwarranted access to accounts. The authentication process is handled by mechanisms, such as the Local Security Authentication Server (LSASS) process and the Security Accounts Manager (SAM) on Windows, pluggable authentication modules (PAM) on Unix-based systems, and authorization plugins on MacOS systems, responsible for gathering, storing, and validating credentials. By modifying an authentication process, an adversary may be able to authenticate to a service or system without using Valid Accounts.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"T1562": {"name": "Impair Defenses","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","inspector2_is_enabled": "FAIL","inspector2_active_findings_exist": "FAIL","config_recorder_all_regions_enabled": null},"status": "FAIL","attributes": [{"Value": "Minimal","Comment": "This control provides significant coverage for a minority of this technique's SubTechniques, resulting in an overall score of Minimal.","Category": "Detect","AWSService": "AWS Config"},{"Value": "Minimal","Comment": "This control provides partial coverage for a minority of this technique's SubTechniques, resulting in an overall score of Minimal.","Category": "Detect","AWSService": "AWS IoT Device Defender"},{"Value": "Minimal","Comment": "This control provides partial coverage for a minority of this technique's SubTechniques, resulting in an overall score of Minimal.","Category": "Respond","AWSService": "AWS IoT Device Defender"}],"description": "Adversaries may maliciously modify components of a victim environment in order to hinder or disable defensive mechanisms. This not only involves impairing preventative defenses, such as firewalls and anti-virus, but also detection capabilities that defenders can use to audit activity and identify malicious behavior. This may also span both native defenses as well as supplemental capabilities installed by users and administrators.","checks_status": {"fail": 2,"pass": 2,"total": 5,"manual": 0}},"T1578": {"name": "Modify Cloud Compute Infrastructure","checks": {"iam_policy_no_full_access_to_kms": null,"iam_policy_allows_privilege_escalation": null,"iam_policy_no_full_access_to_cloudtrail": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Value": "Significant","Comment": "Limit permissions for creating, deleting, and otherwise altering compute components in accordance with least privilege.","Category": "Protect","AWSService": "AWS IAM"}],"description": "An adversary may attempt to modify a cloud account's compute service infrastructure to evade defenses. A modification to the compute service infrastructure can include the creation, deletion, or modification of one or more components such as compute instances, virtual machines, and snapshots.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"T1580": {"name": "Cloud Infrastructure Discovery","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","iam_policy_no_full_access_to_kms": null,"iam_policy_allows_privilege_escalation": null,"iam_policy_no_full_access_to_cloudtrail": null,"organizations_account_part_of_organizations": null,"iam_inline_policy_no_administrative_privileges": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Value": "Partial","Comment": "This control may protect against cloud infrastructure discovery by segmenting accounts into separate organizational units and restricting infrastructure access by least privilege.","Category": "Protect","AWSService": "AWS Organizations"},{"Value": "Partial","Comment": "AWS Security Hub detects improperly secured data from S3 buckets such as public read and write access as well as accessible EC2 instances that may result in an adversary learning about cloud infrastructure used by the organization. AWS Security Hub provides these detections with the following managed insights. S3 buckets with public write or read permissions EC2 instances that have ports accessible from the Internet EC2 instances that are open to the Internet. AWS Security Hub also performs checks from the AWS Foundations CIS Benchmark that, if implemented, would help towards detecting improperly secured S3 buckets which could result in them being discovered. AWS Security Hub provides this detection with the following check. 3.8 Ensure a log metric filter and alarm exist for S3 bucket policy changes. This is scored as Partial because S3 and EC2 only represent a subset of available cloud infrastructure components.","Category": "Detect","AWSService": "AWS Security Hub"},{"Value": "Partial","Comment": "The following GuardDuty finding types flag events that are linked to Discovery techniques and can be used to capture events where a malicious user may be searching through the account looking for available resources. The finding types are also used to flag certain signatures of running services to detect malicious user activities from commonly used pentest operating systems. Discovery:IAMUser/AnomalousBehavior Discovery:S3/MaliciousIPCaller Discovery:S3/MaliciousIPCaller.Custom Discovery:S3/TorIPCaller PenTest:IAMUser/KaliLinux PenTest:IAMUser/ParrotLinux PenTest:IAMUser/PentooLinux PenTest:S3/KaliLinux PenTest:S3/ParrotLinux PenTest:S3/PentooLinux.","Category": "Detect","AWSService": "Amazon GuardDuty"},{"Value": "Significant","Comment": "Limit IAM permissions to discover cloud infrastructure in accordance with least privilege. Organizations should limit the number of users within the organization with an IAM role that has administrative privileges, strive to reduce all permanent privileged role assignments, and conduct periodic entitlement reviews on IAM users, roles and policies.","Category": "Protect","AWSService": "AWS IAM"}],"description": "An adversary may attempt to discover infrastructure and resources that are available within an infrastructure-as-a-service (IaaS) environment. This includes compute service resources such as instances, virtual machines, and snapshots as well as resources of other services including the storage and database services.","checks_status": {"fail": 0,"pass": 2,"total": 10,"manual": 0}},"T1606": {"name": "Forge Web Credentials","checks": {"iam_policy_allows_privilege_escalation": null,"iam_no_custom_policy_permissive_role_assumption": null},"status": "PASS","attributes": [{"Value": "Partial","Comment": "Limit IAM permissions from calling the sts:GetFederationToken API unless explicitly required, in accordance with least privilege.","Category": "Protect","AWSService": "AWS IAM"}],"description": "Adversaries may forge credential materials that can be used to gain access to web applications or Internet services. Web applications and services (hosted in cloud SaaS environments or on-premise servers) often use session cookies, tokens, or other materials to authenticate and authorize user access.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"T1614": {"name": "System Location Discovery","checks": {},"status": "PASS","attributes": [],"description": "Adversaries may gather information in an attempt to calculate the geographical location of a victim host. Adversaries may use the information from System Location Discovery during automated discovery to shape follow-on behaviors, including whether or not the adversary fully infects the target and/or attempts specific actions.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"T1619": {"name": "Cloud Storage Object Discovery","checks": {"iam_policy_allows_privilege_escalation": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Value": "Significant","Comment": "Restrict granting of permissions related to listing objects in AWS S3 Buckets to necessary accounts.","Category": "Protect","AWSService": "AWS IAM"}],"description": "Adversaries may enumerate objects in cloud storage infrastructure. Adversaries may use this information during automated discovery to shape follow-on behaviors, including requesting all or specific objects from cloud storage. Similar to File and Directory Discovery on a local host, after identifying available storage services (i.e. Cloud Infrastructure Discovery) adversaries may access the contents/objects stored in cloud infrastructure.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"T1621": {"name": "Multi-Factor Authentication Request Generation","checks": {},"status": "PASS","attributes": [],"description": "Adversaries may attempt to bypass multi-factor authentication (MFA) mechanisms and gain access to accounts by generating MFA requests sent to users.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"T1648": {"name": "Serverless Execution","checks": {"iam_policy_no_full_access_to_kms": null,"iam_policy_allows_privilege_escalation": null,"iam_policy_no_full_access_to_cloudtrail": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Value": "Significant","Comment": "Remove permissions to create, modify, or run serverless resources from users that do not explicitly require them.","Category": "Protect","AWSService": "AWS IAM"}],"description": "Adversaries may abuse serverless computing, integration, and automation services to execute arbitrary code in cloud environments. Many cloud providers offer a variety of serverless resources, including compute engines, application integration services, and web servers.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"T1651": {"name": "Cloud Administration Command","checks": {},"status": "PASS","attributes": [],"description": "Adversaries may abuse cloud management services to execute commands within virtual machines or hybrid-joined devices. Resources such as AWS Systems Manager, Azure RunCommand, and Runbooks allow users to remotely run scripts in virtual machines by leveraging installed virtual machine agents. Similarly, in Azure AD environments, Microsoft Endpoint Manager allows Global or Intune Administrators to run scripts as SYSTEM on on-premises devices joined to the Azure AD.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}}},"requirements_passed": 18,"requirements_failed": 18,"requirements_manual": 10,"total_requirements": 46,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "6e52644d-3557-4704-9cf6-e33e4c1a316b","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "ffiec_aws","framework": "FFIEC","version": "","description": "In light of the increasing volume and sophistication of cyber threats, the Federal Financial Institutions Examination Council (FFIEC) developed the Cybersecurity Assessment Tool (Assessment), on behalf of its members, to help institutions identify their risks and determine their cybersecurity maturity.","region": "eu-west-1","requirements": {"d1-g-it-b-1": {"name": "D1.G.IT.B.1","checks": {"ec2_elastic_ip_unassigned": "FAIL","ec2_instance_managed_by_ssm": "FAIL","ec2_instance_older_than_specific_days": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d1-g-it-b-1","Section": "Cyber Risk Management and Oversight (Domain 1)","Service": "aws","SubGroup": null,"SubSection": "Governance (G)"}],"description": "An inventory of organizational assets (e.g., hardware, software, data, and systems hosted externally) is maintained.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"d4-c-co-b-2": {"name": "D4.C.Co.B.2","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d4-c-co-b-2","Section": "External Dependency Management (Domain 4)","Service": "aws","SubGroup": null,"SubSection": "Connections (C)"}],"description": "The institution ensures that third-party connections are authorized.","checks_status": {"fail": 2,"pass": 1,"total": 4,"manual": 0}},"d1-rm-ra-b-2": {"name": "D1.RM.RA.B.2","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "d1-rm-ra-b-2","Section": "Cyber Risk Management and Oversight (Domain 1)","Service": "aws","SubGroup": null,"SubSection": "Risk Management (RM)"}],"description": "The risk assessment identifies Internet- based systems and high-risk transactions that warrant additional authentication controls.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"d1-rm-rm-b-1": {"name": "D1.RM.Rm.B.1","checks": {"rds_instance_multi_az": "FAIL","rds_instance_backup_enabled": "PASS","redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d1-rm-rm-b-1","Section": "Cyber Risk Management and Oversight (Domain 1)","Service": "aws","SubGroup": null,"SubSection": "Risk Management (RM)"}],"description": "An information security and business continuity risk management function(s) exists within the institution.","checks_status": {"fail": 1,"pass": 1,"total": 4,"manual": 0}},"d2-is-is-b-1": {"name": "D2.IS.Is.B.1","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d2-is-is-b-1","Section": "Threat Intelligence and Collaboration (Domain 2)","Service": "aws","SubGroup": null,"SubSection": "Information Sharing (IS)"}],"description": "Information security threats are gathered and shared with applicable internal employees.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"d2-ma-ma-b-1": {"name": "D2.MA.Ma.B.1","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","opensearch_service_domains_cloudwatch_logging_enabled": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d2-ma-ma-b-1","Section": "Threat Intelligence and Collaboration (Domain 2)","Service": "aws","SubGroup": null,"SubSection": "Monitoring and Analyzing (MA)"}],"description": "Information security threats are gathered and shared with applicable internal employees.","checks_status": {"fail": 7,"pass": 2,"total": 14,"manual": 0}},"d2-ma-ma-b-2": {"name": "D2.MA.Ma.B.2","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","opensearch_service_domains_cloudwatch_logging_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d2-ma-ma-b-2","Section": "Threat Intelligence and Collaboration (Domain 2)","Service": "aws","SubGroup": null,"SubSection": "Monitoring and Analyzing (MA)"}],"description": "Computer event logs are used for investigations once an event has occurred.","checks_status": {"fail": 5,"pass": 2,"total": 12,"manual": 0}},"d2-ti-ti-b-1": {"name": "D2.TI.Ti.B.1","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "d2-ti-ti-b-1","Section": "Threat Intelligence and Collaboration (Domain 2)","Service": "aws","SubGroup": null,"SubSection": "Threat Intelligence (TI)"}],"description": "The institution belongs or subscribes to a threat and vulnerability information-sharing source(s) that provides information on threats (e.g., FS-ISAC, US- CERT).","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"d2-ti-ti-b-2": {"name": "D2.TI.Ti.B.2","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d2-ti-ti-b-2","Section": "Threat Intelligence and Collaboration (Domain 2)","Service": "aws","SubGroup": null,"SubSection": "Threat Intelligence (TI)"}],"description": "Threat information is used to monitor threats and vulnerabilities.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"d2-ti-ti-b-3": {"name": "D2.TI.Ti.B.3","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "d2-ti-ti-b-3","Section": "Threat Intelligence and Collaboration (Domain 2)","Service": "aws","SubGroup": null,"SubSection": "Threat Intelligence (TI)"}],"description": "Threat information is used to enhance internal risk management and controls.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"d3-cc-pm-b-1": {"name": "D3.CC.PM.B.1","checks": {"ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null,"rds_instance_minor_version_upgrade_enabled": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-cc-pm-b-1","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Corrective Controls (CC)"}],"description": "A patch management program is implemented and ensures that software and firmware patches are applied in a timely manner.","checks_status": {"fail": 1,"pass": 1,"total": 3,"manual": 0}},"d3-cc-pm-b-3": {"name": "D3.CC.PM.B.3","checks": {"ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null,"rds_instance_minor_version_upgrade_enabled": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-cc-pm-b-3","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Corrective Controls (CC)"}],"description": "Patch management reports are reviewed and reflect missing security patches.","checks_status": {"fail": 1,"pass": 1,"total": 3,"manual": 0}},"d3-dc-an-b-1": {"name": "D3.DC.An.B.1","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-dc-an-b-1","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Detective Controls (DC)"}],"description": "The institution is able to detect anomalous activities through monitoring across the environment.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"d3-dc-an-b-2": {"name": "D3.DC.An.B.2","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "d3-dc-an-b-2","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Detective Controls (DC)"}],"description": "Customer transactions generating anomalous activity alerts are monitored and reviewed.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"d3-dc-an-b-3": {"name": "D3.DC.An.B.3","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","opensearch_service_domains_cloudwatch_logging_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-dc-an-b-3","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Detective Controls (DC)"}],"description": "Logs of physical and/or logical access are reviewed following events.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"d3-dc-an-b-4": {"name": "D3.DC.An.B.4","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","opensearch_service_domains_cloudwatch_logging_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-dc-an-b-4","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Detective Controls (DC)"}],"description": "Access to critical systems by third parties is monitored for unauthorized or unusual activity.","checks_status": {"fail": 6,"pass": 2,"total": 13,"manual": 0}},"d3-dc-an-b-5": {"name": "D3.DC.An.B.5","checks": {"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-dc-an-b-5","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Detective Controls (DC)"}],"description": "Elevated privileges are monitored.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"d3-dc-ev-b-1": {"name": "D3.DC.Ev.B.1","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-dc-ev-b-1","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Detective Controls (DC)"}],"description": "A normal network activity baseline is established.","checks_status": {"fail": 4,"pass": 2,"total": 10,"manual": 0}},"d3-dc-ev-b-2": {"name": "D3.DC.Ev.B.2","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "d3-dc-ev-b-2","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Detective Controls (DC)"}],"description": "Mechanisms (e.g., antivirus alerts, log event alerts) are in place to alert management to potential attacks.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"d3-dc-ev-b-3": {"name": "D3.DC.Ev.B.3","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-dc-ev-b-3","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Detective Controls (DC)"}],"description": "Processes are in place to monitor for the presence of unauthorized users, devices, connections, and software.","checks_status": {"fail": 1,"pass": 3,"total": 4,"manual": 0}},"d3-dc-th-b-1": {"name": "D3.DC.Th.B.1","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-dc-th-b-1","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Detective Controls (DC)"}],"description": "Independent testing (including penetration testing and vulnerability scanning) is conducted according to the risk assessment for external-facing systems and the internal network.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"d3-pc-am-b-1": {"name": "D3.PC.Am.B.1","checks": {"iam_no_root_access_key": null,"ec2_instance_profile_attached": "PASS","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "d3-pc-am-b-1","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Employee access is granted to systems and confidential data based on job responsibilities and the principles of least privilege.","checks_status": {"fail": 0,"pass": 1,"total": 6,"manual": 0}},"d3-pc-am-b-2": {"name": "D3.PC.Am.B.2","checks": {"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "d3-pc-am-b-2","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Employee access to systems and confidential data provides for separation of duties.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"d3-pc-am-b-3": {"name": "D3.PC.Am.B.3","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_root_hardware_mfa_enabled": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "d3-pc-am-b-3","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Elevated privileges (e.g., administrator privileges) are limited and tightly controlled (e.g., assigned to individuals, not shared, and require stronger password controls","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"d3-pc-am-b-6": {"name": "D3.PC.Am.B.6","checks": {"iam_root_mfa_enabled": null,"iam_user_accesskey_unused": null,"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "d3-pc-am-b-6","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Identification and authentication are required and managed for access to systems, applications, and hardware.","checks_status": {"fail": 0,"pass": 0,"total": 16,"manual": 0}},"d3-pc-am-b-7": {"name": "D3.PC.Am.B.7","checks": {"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "d3-pc-am-b-7","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Access controls include password complexity and limits to password attempts and reuse.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"d3-pc-am-b-8": {"name": "D3.PC.Am.B.8","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "d3-pc-am-b-8","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "All default passwords and unnecessary default accounts are changed before system implementation.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"d3-pc-im-b-1": {"name": "D3.PC.Im.B.1","checks": {"ec2_instance_public_ip": "FAIL","elbv2_waf_acl_attached": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"acm_certificates_expiration_check": "PASS","apigateway_restapi_waf_acl_attached": "FAIL","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-pc-im-b-1","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Network perimeter defense tools (e.g., border router and firewall) are used.","checks_status": {"fail": 5,"pass": 7,"total": 20,"manual": 0}},"d3-pc-im-b-2": {"name": "D3.PC.Im.B.2","checks": {"elbv2_waf_acl_attached": "FAIL","apigateway_restapi_waf_acl_attached": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-pc-im-b-2","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Systems that are accessed from the Internet or by external parties are protected by firewalls or other similar devices.","checks_status": {"fail": 4,"pass": 1,"total": 6,"manual": 0}},"d3-pc-im-b-3": {"name": "D3.PC.Im.B.3","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-pc-im-b-3","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "All ports are monitored.","checks_status": {"fail": 4,"pass": 2,"total": 6,"manual": 0}},"d3-pc-im-b-5": {"name": "D3.PC.Im.B.5","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-pc-im-b-5","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Systems configurations (for servers, desktops, routers, etc.) follow industry standards and are enforced","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"d3-pc-im-b-6": {"name": "D3.PC.Im.B.6","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-pc-im-b-6","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Ports, functions, protocols and services are prohibited if no longer needed for business purposes.","checks_status": {"fail": 2,"pass": 1,"total": 4,"manual": 0}},"d3-pc-im-b-7": {"name": "D3.PC.Im.B.7","checks": {"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-pc-im-b-7","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Access to make changes to systems configurations (including virtual machines and hypervisors) is controlled and monitored.","checks_status": {"fail": 1,"pass": 1,"total": 6,"manual": 0}},"d3-pc-se-b-1": {"name": "D3.PC.Se.B.1","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "d3-pc-se-b1","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Developers working for the institution follow secure program coding practices, as part of a system development life cycle (SDLC), that meet industry standards.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"d5-dr-de-b-1": {"name": "D5.DR.De.B.1","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "d5-dr-de-b-1","Section": "Cyber Incident Management and Resilience (Domain 5)","Service": "aws","SubGroup": null,"SubSection": "Detection, Response, & Mitigation (DR)"}],"description": "Alert parameters are set for detecting information security incidents that prompt mitigating actions.","checks_status": {"fail": 0,"pass": 2,"total": 6,"manual": 0}},"d5-dr-de-b-2": {"name": "D5.DR.De.B.2","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "d5-dr-de-b-2","Section": "Cyber Incident Management and Resilience (Domain 5)","Service": "aws","SubGroup": null,"SubSection": "Detection, Response, & Mitigation (DR)"}],"description": "System performance reports contain information that can be used as a risk indicator to detect information security incidents.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"d5-dr-de-b-3": {"name": "D5.DR.De.B.3","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d5-dr-de-b-3","Section": "Cyber Incident Management and Resilience (Domain 5)","Service": "aws","SubGroup": null,"SubSection": "Detection, Response, & Mitigation (DR)"}],"description": "Tools and processes are in place to detect, alert, and trigger the incident response program.","checks_status": {"fail": 5,"pass": 3,"total": 16,"manual": 0}},"d5-er-es-b-4": {"name": "D5.ER.Es.B.4","checks": {"guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d5-er-es-b-4","Section": "Cyber Incident Management and Resilience (Domain 5)","Service": "aws","SubGroup": null,"SubSection": "Escalation and Reporting (ER)"}],"description": "Incidents are classified, logged and tracked.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"d5-ir-pl-b-6": {"name": "D5.IR.Pl.B.6","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"rds_instance_deletion_protection": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d5-ir-pl-b-6","Section": "Cyber Incident Management and Resilience (Domain 5)","Service": "aws","SubGroup": null,"SubSection": "Incident Resilience Planning & Strategy (IR)"}],"description": "The institution plans to use business continuity, disaster recovery, and data backup programs to recover operations following an incident.","checks_status": {"fail": 5,"pass": 1,"total": 8,"manual": 0}},"d3-pc-am-b-10": {"name": "D3.PC.Am.B.10","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-pc-am-b-10","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Production and non-production environments are segregated to prevent unauthorized access or changes to information assets. (*N/A if no production environment exists at the institution or the institution's third party.)","checks_status": {"fail": 1,"pass": 1,"total": 3,"manual": 0}},"d3-pc-am-b-12": {"name": "D3.PC.Am.B.12","checks": {"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_encryption_at_rest_enabled": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-pc-am-b-12","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "All passwords are encrypted in storage and in transit.","checks_status": {"fail": 4,"pass": 3,"total": 10,"manual": 0}},"d3-pc-am-b-13": {"name": "D3.PC.Am.B.13","checks": {"elb_ssl_listeners": "FAIL","elbv2_insecure_ssl_ciphers": "PASS","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-pc-am-b-13","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Confidential data is encrypted when transmitted across public or untrusted networks (e.g., Internet).","checks_status": {"fail": 3,"pass": 1,"total": 4,"manual": 0}},"d3-pc-am-b-15": {"name": "D3.PC.Am.B.15","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"s3_bucket_secure_transport_policy": "FAIL","iam_user_mfa_enabled_console_access": null,"apigateway_restapi_client_certificate_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "d3-pc-am-b-15","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Remote access to critical systems by employees, contractors, and third parties uses encrypted connections and multifactor authentication.","checks_status": {"fail": 2,"pass": 0,"total": 5,"manual": 0}},"d3-pc-am-b-16": {"name": "D3.PC.Am.B.16","checks": {"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "d3-pc-am-b-16","Section": "Cybersecurity Controls (Domain 3)","Service": "aws","SubGroup": null,"SubSection": "Preventative Controls (PC)"}],"description": "Administrative, physical, or technical controls are in place to prevent users without administrative responsibilities from installing unauthorized software.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}}},"requirements_passed": 13,"requirements_failed": 29,"requirements_manual": 2,"total_requirements": 44,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "7058bd2a-3241-4e0e-9773-9a0136d861bc","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "cis_2.0_aws","framework": "CIS","version": "2.0","description": "The CIS Amazon Web Services Foundations Benchmark provides prescriptive guidance for configuring security options for a subset of Amazon Web Services with an emphasis on foundational, testable, and architecture agnostic settings.","region": "eu-west-1","requirements": {"1.1": {"name": "1.1","checks": {"account_maintain_current_contact_details": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/manage-account-payment.html#contact-info","Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization.An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.","DefaultValue": null,"AuditProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing )1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose `My Account`. 3. On the `Account Settings` page, review and verify the current details. 4. Under `Contact Information`, review and verify the current details.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question, so it is in both the customers' and AWS' best interests that prompt contact can be established. This is best achieved by setting AWS account contact details to point to resources which have multiple individuals as recipients, such as email aliases and PABX hunt groups.","RemediationProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing ).1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose `My Account`. 3. On the `Account Settings` page, next to `Account Settings`, choose `Edit`. 4. Next to the field that you need to update, choose `Edit`. 5. After you have entered your changes, choose `Save changes`. 6. After you have made your changes, choose `Done`. 7. To edit your contact information, under `Contact Information`, choose `Edit`. 8. For the fields that you want to change, type your updated information, and then choose `Update`.","AdditionalInformation": ""}],"description": "Maintain current contact details","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.2": {"name": "1.2","checks": {"account_security_contact_information_is_registered": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "","Description": "AWS provides customers with the option of specifying the contact information for account's security team. It is recommended that this information be provided.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if security contact information is present:**From Console:**1. Click on your account name at the top right corner of the console 2. From the drop-down menu Click `My Account`3. Scroll down to the `Alternate Contacts` section 4. Ensure contact information is specified in the `Security` section","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them.","RemediationProcedure": "Perform the following to establish security contact information:**From Console:**1. Click on your account name at the top right corner of the console. 2. From the drop-down menu Click `My Account`3. Scroll down to the `Alternate Contacts` section 4. Enter contact information in the `Security` section**Note:** Consider specifying an internal email distribution list to ensure emails are regularly monitored by more than one individual.","AdditionalInformation": ""}],"description": "Ensure security contact information is registered","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.3": {"name": "1.3","checks": {"account_security_questions_are_registered_in_the_aws_account": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "","Description": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS account as the 'root' user 2. On the top right you will see the __ 3. Click on the __ 4. From the drop-down menu Click `My Account`5. In the `Configure Security Challenge Questions` section on the `Personal Information` page, configure three security challenge questions. 6. Click `Save questions` .","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "When creating a new AWS account, a default super user is automatically created. This account is referred to as the 'root user' or 'root' account. It is recommended that the use of this account be limited and highly controlled. During events in which the 'root' password is no longer accessible or the MFA token associated with 'root' is lost/destroyed it is possible, through authentication using secret questions and associated answers, to recover 'root' user login access.","RemediationProcedure": "**From Console:**1. Login to the AWS Account as the 'root' user 2. Click on the __ from the top right of the console 3. From the drop-down menu Click _My Account_ 4. Scroll down to the `Configure Security Questions` section 5. Click on `Edit`6. Click on each `Question` - From the drop-down select an appropriate question- Click on the `Answer` section- Enter an appropriate answer - Follow process for all 3 questions 7. Click `Update` when complete 8. Save Questions and Answers and place in a secure physical location","AdditionalInformation": ""}],"description": "Ensure security questions are registered in the AWS account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.4": {"name": "1.4","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html:http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetAccountSummary.html:https://aws.amazon.com/blogs/security/an-easier-way-to-determine-the-presence-of-aws-account-access-keys/","Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be removed.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has access keys:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on `Credential Report`5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `` user, ensure the `access_key_1_active` and `access_key_2_active` fields are set to `FALSE` .**From Command Line:**Run the following command: ```aws iam get-account-summary | grep \"AccountAccessKeysPresent\"``` If no 'root' access keys exist the output will show \"AccountAccessKeysPresent\": 0,. If the output shows a \"1\" than 'root' keys exist, refer to the remediation procedure below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Removing access keys associated with the 'root' user account limits vectors by which the account can be compromised. Additionally, removing the 'root' access keys encourages the creation and use of role based accounts that are least privileged.","RemediationProcedure": "Perform the following to delete or disable active 'root' user access keys**From Console:**1. Sign in to the AWS Management Console as 'root' and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. Click on __ at the top right and select `My Security Credentials` from the drop down list 3. On the pop out screen Click on `Continue to Security Credentials`4. Click on `Access Keys` _(Access Key ID and Secret Access Key)_ 5. Under the `Status` column if there are any Keys which are Active- Click on `Make Inactive` - (Temporarily disable Key - may be needed again)- Click `Delete` - (Deleted keys cannot be recovered)","AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions is not enabled by default. However, on request to AWS support enables 'root' access only through access-keys (CLI, API methods) for us-gov cloud region."}],"description": "Ensure no 'root' user account access key exists","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.5": {"name": "1.5","checks": {"iam_root_mfa_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html#enable-virt-mfa-for-root","Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device.**Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has MFA setup:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on `Credential Report`5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `` user, ensure the `mfa_active` field is set to `TRUE` .**From Command Line:**1. Run the following command: ```aws iam get-account-summary | grep \"AccountMFAEnabled\" ``` 2. Ensure the AccountMFAEnabled property is set to 1","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that emits a time-sensitive key and have knowledge of a credential.","RemediationProcedure": "Perform the following to establish MFA for the 'root' user account:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the 'root' AWS account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials.2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA`4. In the wizard, choose `A virtual MFA` device and then choose `Next Step` . 5. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes. 6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see [Virtual MFA Applications](http://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications).) If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.- In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application.When you are finished, the virtual MFA device starts generating one-time passwords.In the Manage MFA Device wizard, in the Authentication Code 1 box, type the one-time password that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second one-time password into the Authentication Code 2 box. Choose Assign Virtual MFA.","AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions does not have console access. This recommendation is not applicable for us-gov cloud regions."}],"description": "Ensure MFA is enabled for the 'root' user account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.6": {"name": "1.6","checks": {"iam_root_hardware_mfa_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html#enable-hw-mfa-for-root","Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the 'root' user account be protected with a hardware MFA.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has a hardware MFA setup:1. Run the following command to determine if the 'root' account has MFA setup: ```aws iam get-account-summary | grep \"AccountMFAEnabled\" ```The `AccountMFAEnabled` property is set to `1` will ensure that the 'root' user account has MFA (Virtual or Hardware) Enabled. If `AccountMFAEnabled` property is set to `0` the account is not compliant with this recommendation.2. If `AccountMFAEnabled` property is set to `1`, determine 'root' account has Hardware MFA enabled. Run the following command to list all virtual MFA devices: ```aws iam list-virtual-mfa-devices``` If the output contains one MFA with the following Serial Number, it means the MFA is virtual, not hardware and the account is not compliant with this recommendation: `\"SerialNumber\": \"arn:aws:iam::__:mfa/root-account-mfa-device\"`","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "A hardware MFA has a smaller attack surface than a virtual MFA. For example, a hardware MFA does not suffer the attack surface introduced by the mobile smartphone on which a virtual MFA resides.**Note**: Using hardware MFA for many, many AWS accounts may create a logistical device management issue. If this is the case, consider implementing this Level 2 recommendation selectively to the highest security AWS accounts and the Level 1 recommendation applied to the remaining accounts.","RemediationProcedure": "Perform the following to establish a hardware MFA for the 'root' user account:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the AWS 'root' user account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials. 2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA`4. In the wizard, choose `A hardware MFA` device and then choose `Next Step` . 5. In the `Serial Number` box, enter the serial number that is found on the back of the MFA device. 6. In the `Authentication Code 1` box, enter the six-digit number displayed by the MFA device. You might need to press the button on the front of the device to display the number. 7. Wait 30 seconds while the device refreshes the code, and then enter the next six-digit number into the `Authentication Code 2` box. You might need to press the button on the front of the device again to display the second number. 8. Choose `Next Step` . The MFA device is now associated with the AWS account. The next time you use your AWS account credentials to sign in, you must type a code from the hardware MFA device.Remediation for this recommendation is not available through AWS CLI.","AdditionalInformation": "IAM User account 'root' for us-gov cloud regions does not have console access. This control is not applicable for us-gov cloud regions."}],"description": "Ensure hardware MFA is enabled for the 'root' user account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.7": {"name": "1.7","checks": {"iam_avoid_root_usage": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html:https://docs.aws.amazon.com/general/latest/gr/aws_tasks-that-require-root.html","Description": "With the creation of an AWS account, a 'root user' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console at `https://console.aws.amazon.com/iam/` 2. In the left pane, click `Credential Report` 3. Click on `Download Report` 4. Open of Save the file locally 5. Locate the `` under the user column 6. Review `password_last_used, access_key_1_last_used_date, access_key_2_last_used_date` to determine when the 'root user' was last used.**From Command Line:**Run the following CLI commands to provide a credential report for determining the last time the 'root user' was used: ``` aws iam generate-credential-report ``` ``` aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,5,11,16 | grep -B1 '' ```Review `password_last_used`, `access_key_1_last_used_date`, `access_key_2_last_used_date` to determine when the _root user_ was last used.**Note:** There are a few conditions under which the use of the 'root' user account is required. Please see the reference links for all of the tasks that require use of the 'root' user.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "The 'root user' has unrestricted access to and control over all account resources. Use of it is inconsistent with the principles of least privilege and separation of duties, and can lead to unnecessary harm due to error or account compromise.","RemediationProcedure": "If you find that the 'root' user account is being used for daily activity to include administrative tasks that do not require the 'root' user:1. Change the 'root' user password. 2. Deactivate or delete any access keys associate with the 'root' user.**Remember, anyone who has 'root' user credentials for your AWS account has unrestricted access to and control of all the resources in your account, including billing information.","AdditionalInformation": "The 'root' user for us-gov cloud regions is not enabled by default. However, on request to AWS support, they can enable the 'root' user and grant access only through access-keys (CLI, API methods) for us-gov cloud region. If the 'root' user for us-gov cloud regions is enabled, this recommendation is applicable.Monitoring usage of the 'root' user can be accomplished by implementing recommendation 3.3 Ensure a log metric filter and alarm exist for usage of the 'root' user."}],"description": "Eliminate use of the 'root' user for administrative and daily tasks","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.8": {"name": "1.8","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy","Description": "Password policies are, in part, used to enforce password complexity requirements. IAM password policies can be used to ensure password are at least a given length. It is recommended that the password policy require a minimum password length 14.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Minimum password length\" is set to 14 or greater.**From Command Line:** ``` aws iam get-account-password-policy ``` Ensure the output of the above command includes \"MinimumPasswordLength\": 14 (or higher)","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Setting a password complexity policy increases account resiliency against brute force login attempts.","RemediationProcedure": "Perform the following to set the password policy as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Set \"Minimum password length\" to `14` or greater. 5. Click \"Apply password policy\"**From Command Line:** ```aws iam update-account-password-policy --minimum-password-length 14 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.","AdditionalInformation": ""}],"description": "Ensure IAM password policy requires minimum length of 14 or greater","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.9": {"name": "1.9","checks": {"iam_password_policy_reuse_24": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy","Description": "IAM password policies can prevent the reuse of a given password by the same user. It is recommended that the password policy prevent the reuse of passwords.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Prevent password reuse\" is checked 5. Ensure \"Number of passwords to remember\" is set to 24**From Command Line:** ``` aws iam get-account-password-policy``` Ensure the output of the above command includes \"PasswordReusePrevention\": 24","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Preventing password reuse increases account resiliency against brute force login attempts.","RemediationProcedure": "Perform the following to set the password policy as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Check \"Prevent password reuse\" 5. Set \"Number of passwords to remember\" is set to `24` **From Command Line:** ```aws iam update-account-password-policy --password-reuse-prevention 24 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.","AdditionalInformation": ""}],"description": "Ensure IAM password policy prevents password reuse","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.1": {"name": "3.1","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html?icmpid=docs_cloudtrail_console#logging-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-services.html#cloud-trail-supported-services-data-events","Description": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail provides a history of AWS API calls for an account, including API calls made via the Management Console, SDKs, command line tools, and higher-level AWS services (such as CloudFormation).","DefaultValue": null,"AuditProcedure": "Perform the following to determine if CloudTrail is enabled for all regions:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane- You will be presented with a list of trails across all regions 3. Ensure at least one Trail has `All` specified in the `Region` column 4. Click on a trail via the link in the _Name_ column 5. Ensure `Logging` is set to `ON`6. Ensure `Apply trail to all regions` is set to `Yes` 7. In section `Management Events` ensure `Read/Write Events` set to `ALL`**From Command Line:** ```aws cloudtrail describe-trails ``` Ensure `IsMultiRegionTrail` is set to `true```` aws cloudtrail get-trail-status --name  ``` Ensure `IsLogging` is set to `true` ``` aws cloudtrail get-event-selectors --trail-name  ``` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`","ImpactStatement": "S3 lifecycle features can be used to manage the accumulation and management of logs over time. See the following AWS resource for more information on these features:1. https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html","AssessmentStatus": "Automated","RationaleStatement": "The AWS API call history produced by CloudTrail enables security analysis, resource change tracking, and compliance auditing. Additionally, - ensuring that a multi-regions trail exists will ensure that unexpected activity occurring in otherwise unused regions is detected- ensuring that a multi-regions trail exists will ensure that `Global Service Logging` is enabled for a trail by default to capture recording of events generated onAWS global services- for a multi-regions trail, ensuring that management events configured for all type of Read/Writes ensures recording of management operations that are performed on all resources in an AWS account","RemediationProcedure": "Perform the following to enable global (Multi-region) CloudTrail logging:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on _Trails_ on the left navigation pane 3. Click `Get Started Now` , if presented- Click `Add new trail` - Enter a trail name in the `Trail name` box- Set the `Apply trail to all regions` option to `Yes` - Specify an S3 bucket name in the `S3 bucket` box- Click `Create`4. If 1 or more trails already exist, select the target trail to enable for global logging 5. Click the edit icon (pencil) next to `Apply trail to all regions` , Click `Yes` and Click `Save`. 6. Click the edit icon (pencil) next to `Management Events` click `All` for setting `Read/Write Events` and Click `Save`.**From Command Line:** ``` aws cloudtrail create-trail --name  --bucket-name  --is-multi-region-trailaws cloudtrail update-trail --name  --is-multi-region-trail ```Note: Creating CloudTrail via CLI without providing any overriding options configures `Management Events` to set `All` type of `Read/Writes` by default.","AdditionalInformation": ""}],"description": "Ensure CloudTrail is enabled in all regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"3.2": {"name": "3.2","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-enabling.html","Description": "CloudTrail log file validation creates a digitally signed digest file containing a hash of each log that CloudTrail writes to S3. These digest files can be used to determine whether a log file was changed, deleted, or unchanged after CloudTrail delivered the log. It is recommended that file validation be enabled on all CloudTrails.","DefaultValue": null,"AuditProcedure": "Perform the following on each trail to determine if log file validation is enabled:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. For Every Trail: - Click on a trail via the link in the _Name_ column - Under the `General details` section, ensure `Log file validation` is set to `Enabled` **From Command Line:** ``` aws cloudtrail describe-trails ``` Ensure `LogFileValidationEnabled` is set to `true` for each trail","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling log file validation will provide additional integrity checking of CloudTrail logs.","RemediationProcedure": "Perform the following to enable log file validation on a given trail:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. Click on target trail 4. Within the `General details` section click `edit` 5. Under the `Advanced settings` section 6. Check the enable box under `Log file validation`7. Click `Save changes` **From Command Line:** ``` aws cloudtrail update-trail --name  --enable-log-file-validation ``` Note that periodic validation of logs using these digests can be performed by running the following command: ``` aws cloudtrail validate-logs --trail-arn  --start-time  --end-time  ```","AdditionalInformation": ""}],"description": "Ensure CloudTrail log file validation is enabled","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.3": {"name": "3.3","checks": {"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html","Description": "CloudTrail logs a record of every API call made in your AWS account. These logs file are stored in an S3 bucket. It is recommended that the bucket policy or access control list (ACL) applied to the S3 bucket that CloudTrail logs to prevent public access to the CloudTrail logs.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if any public access is granted to an S3 bucket via an ACL or S3 bucket policy:**From Console:**1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home) 2. In the `API activity history` pane on the left, click `Trails`3. In the `Trails` pane, note the bucket names in the `S3 bucket` column 4. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home) 5. For each bucket noted in step 3, right-click on the bucket and click `Properties`6. In the `Properties` pane, click the `Permissions` tab. 7. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. 8. Ensure no rows exists that have the `Grantee` set to `Everyone` or the `Grantee` set to `Any Authenticated User.`9. If the `Edit bucket policy` button is present, click it to review the bucket policy. 10. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}**From Command Line:**1. Get the name of the S3 bucket that CloudTrail is logging to: ```aws cloudtrail describe-trails --query 'trailList[*].S3BucketName' ``` 2. Ensure the `AllUsers` principal is not granted privileges to that `` : ```aws s3api get-bucket-acl --bucket  --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/AllUsers` ]' ``` 3. Ensure the `AuthenticatedUsers` principal is not granted privileges to that ``: ```aws s3api get-bucket-acl --bucket  --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/Authenticated Users` ]' ``` 4. Get the S3 Bucket Policy ```aws s3api get-bucket-policy --bucket ``` 5. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}**Note:** Principal set to \"\\*\" or {\"AWS\" : \"\\*\"} allows anonymous access.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Allowing public access to CloudTrail log content may aid an adversary in identifying weaknesses in the affected account's use or configuration.","RemediationProcedure": "Perform the following to remove any public access that has been granted to the bucket via an ACL or S3 bucket policy:1. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home) 2. Right-click on the bucket and click Properties 3. In the `Properties` pane, click the `Permissions` tab. 4. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. 5. Select the row that grants permission to `Everyone` or `Any Authenticated User`6. Uncheck all the permissions granted to `Everyone` or `Any Authenticated User` (click `x` to delete the row). 7. Click `Save` to save the ACL. 8. If the `Edit bucket policy` button is present, click it. 9. Remove any `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}.","AdditionalInformation": ""}],"description": "Ensure the S3 bucket used to store CloudTrail logs is not publicly accessible","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"3.4": {"name": "3.4","checks": {"cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-user-guide.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/how-cloudtrail-works.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-aws-service-specific-topics.html","Description": "AWS CloudTrail is a web service that records AWS API calls made in a given AWS account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs within a specified S3 bucket for long term analysis, realtime analysis can be performed by configuring CloudTrail to send logs to CloudWatch Logs. For a trail that is enabled in all regions in an account, CloudTrail sends log files from all those regions to a CloudWatch Logs log group. It is recommended that CloudTrail logs be sent to CloudWatch Logs.Note: The intent of this recommendation is to ensure AWS account activity is being captured, monitored, and appropriately alarmed on. CloudWatch Logs is a native way to accomplish this using AWS services but does not preclude the use of an alternate solution.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure CloudTrail is configured as prescribed:**From Console:**1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/` 2. Under `Trails` , click on the CloudTrail you wish to evaluate 3. Under the `CloudWatch Logs` section. 4. Ensure a `CloudWatch Logs` log group is configured and listed. 5. Under `General details` confirm `Last log file delivered` has a recent (~one day old) timestamp.**From Command Line:**1. Run the following command to get a listing of existing trails: ```aws cloudtrail describe-trails ``` 2. Ensure `CloudWatchLogsLogGroupArn` is not empty and note the value of the `Name` property. 3. Using the noted value of the `Name` property, run the following command: ```aws cloudtrail get-trail-status --name  ``` 4. Ensure the `LatestcloudwatchLogdDeliveryTime` property is set to a recent (~one day old) timestamp.If the `CloudWatch Logs` log group is not setup and the delivery time is not recent refer to the remediation below.","ImpactStatement": "Note: By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods:1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html","AssessmentStatus": "Automated","RationaleStatement": "Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user, API, resource, and IP address, and provides opportunity to establish alarms and notifications for anomalous or sensitivity account activity.","RemediationProcedure": "Perform the following to establish the prescribed state:**From Console:**1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/` 2. Select the `Trail` the needs to be updated. 3. Scroll down to `CloudWatch Logs` 4. Click `Edit` 5. Under `CloudWatch Logs` click the box `Enabled` 6. Under `Log Group` pick new or select an existing log group 7. Edit the `Log group name` to match the CloudTrail or pick the existing CloudWatch Group. 8. Under `IAM Role` pick new or select an existing. 9. Edit the `Role name` to match the CloudTrail or pick the existing IAM Role. 10. Click `Save changes.**From Command Line:** ``` aws cloudtrail update-trail --name  --cloudwatch-logs-log-group-arn  --cloudwatch-logs-role-arn  ```","AdditionalInformation": ""}],"description": "Ensure CloudTrail trails are integrated with CloudWatch Logs","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.5": {"name": "3.5","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/cli/latest/reference/configservice/describe-configuration-recorder-status.html","Description": "AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items (AWS resources), any configuration changes between resources. It is recommended AWS Config be enabled in all regions.","DefaultValue": null,"AuditProcedure": "Process to evaluate AWS Config configuration per region**From Console:**1. Sign in to the AWS Management Console and open the AWS Config console at [https://console.aws.amazon.com/config/](https://console.aws.amazon.com/config/). 2. On the top right of the console select target Region. 3. If presented with Setup AWS Config - follow remediation procedure: 4. On the Resource inventory page, Click on edit (the gear icon). The Set Up AWS Config page appears. 5. Ensure 1 or both check-boxes under \"All Resources\" is checked.- Include global resources related to IAM resources - which needs to be enabled in 1 region only 6. Ensure the correct S3 bucket has been defined. 7. Ensure the correct SNS topic has been defined. 8. Repeat steps 2 to 7 for each region.**From Command Line:**1. Run this command to show all AWS Config recorders and their properties: ``` aws configservice describe-configuration-recorders ``` 2. Evaluate the output to ensure that there's at least one recorder for which `recordingGroup` object includes `\"allSupported\": true` AND `\"includeGlobalResourceTypes\": true`Note: There is one more parameter \"ResourceTypes\" in recordingGroup object. We don't need to check the same as whenever we set \"allSupported\": true, AWS enforces resource types to be empty (\"ResourceTypes\":[])Sample Output:``` {\"ConfigurationRecorders\": [{\"recordingGroup\": {\"allSupported\": true,\"resourceTypes\": [],\"includeGlobalResourceTypes\": true},\"roleARN\": \"arn:aws:iam:::role/service-role/\",\"name\": \"default\"}] } ```3. Run this command to show the status for all AWS Config recorders: ``` aws configservice describe-configuration-recorder-status ``` 4. In the output, find recorders with `name` key matching the recorders that met criteria in step 2. Ensure that at least one of them includes `\"recording\": true` and `\"lastStatus\": \"SUCCESS\"`","ImpactStatement": "It is recommended AWS Config be enabled in all regions.","AssessmentStatus": "Automated","RationaleStatement": "The AWS configuration item history captured by AWS Config enables security analysis, resource change tracking, and compliance auditing.","RemediationProcedure": "To implement AWS Config configuration:**From Console:**1. Select the region you want to focus on in the top right of the console 2. Click `Services`3. Click `Config`4. Define which resources you want to record in the selected region 5. Choose to include global resources (IAM resources) 6. Specify an S3 bucket in the same account or in another managed AWS account 7. Create an SNS Topic from the same AWS account or another managed AWS account**From Command Line:**1. Ensure there is an appropriate S3 bucket, SNS topic, and IAM role per the [AWS Config Service prerequisites](http://docs.aws.amazon.com/config/latest/developerguide/gs-cli-prereq.html). 2. Run this command to set up the configuration recorder ``` aws configservice subscribe --s3-bucket my-config-bucket --sns-topic arn:aws:sns:us-east-1:012345678912:my-config-notice --iam-role arn:aws:iam::012345678912:role/myConfigRole ``` 3. Run this command to start the configuration recorder: ``` start-configuration-recorder --configuration-recorder-name  ```","AdditionalInformation": ""}],"description": "Ensure AWS Config is enabled in all regions","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.6": {"name": "3.6","checks": {"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html","Description": "S3 Bucket Access Logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that bucket access logging be enabled on the CloudTrail S3 bucket.","DefaultValue": null,"AuditProcedure": "Perform the following ensure the CloudTrail S3 bucket has access logging is enabled:**From Console:**1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home) 2. In the API activity history pane on the left, click Trails 3. In the Trails pane, note the bucket names in the S3 bucket column 4. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 5. Under `All Buckets` click on a target S3 bucket 6. Click on `Properties` in the top right of the console 7. Under `Bucket:` _ `` _ click on `Logging`8. Ensure `Enabled` is checked.**From Command Line:**1. Get the name of the S3 bucket that CloudTrail is logging to: ```aws cloudtrail describe-trails --query 'trailList[*].S3BucketName'``` 2. Ensure Bucket Logging is enabled: ``` aws s3api get-bucket-logging --bucket  ``` Ensure command does not returns empty output.Sample Output for a bucket with logging enabled:``` {\"LoggingEnabled\": {\"TargetPrefix\": \"\",\"TargetBucket\": \"\"} } ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "By enabling S3 bucket logging on target S3 buckets, it is possible to capture all events which may affect objects within any target buckets. Configuring logs to be placed in a separate bucket allows access to log information which can be useful in security and incident response workflows.","RemediationProcedure": "Perform the following to enable S3 bucket logging:**From Console:**1. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 2. Under `All Buckets` click on the target S3 bucket 3. Click on `Properties` in the top right of the console 4. Under `Bucket:`  click on `Logging`5. Configure bucket logging- Click on the `Enabled` checkbox- Select Target Bucket from list- Enter a Target Prefix 6. Click `Save`.**From Command Line:**1. Get the name of the S3 bucket that CloudTrail is logging to: ``` aws cloudtrail describe-trails --region  --query trailList[*].S3BucketName ``` 2. Copy and add target bucket name at ``, Prefix for logfile at `` and optionally add an email address in the following template and save it as ``: ``` {\"LoggingEnabled\": {\"TargetBucket\": \"\",\"TargetPrefix\": \"\",\"TargetGrants\": [{\"Grantee\": {\"Type\": \"AmazonCustomerByEmail\",\"EmailAddress\": \"\"},\"Permission\": \"FULL_CONTROL\"}]}} ``` 3. Run the `put-bucket-logging` command with bucket name and `` as input, for more information refer at [put-bucket-logging](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-logging.html): ``` aws s3api put-bucket-logging --bucket  --bucket-logging-status file:// ```","AdditionalInformation": ""}],"description": "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.7": {"name": "3.7","checks": {"cloudtrail_kms_encryption_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html:https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html","Description": "AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if CloudTrail is configured to use SSE-KMS:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Select a Trail 4. Under the `S3` section, ensure `Encrypt log files` is set to `Yes` and a KMS key ID is specified in the `KSM Key Id` field.**From Command Line:**1. Run the following command: ```aws cloudtrail describe-trails``` 2. For each trail listed, SSE-KMS is enabled if the trail has a `KmsKeyId` property defined.","ImpactStatement": "Customer created keys incur an additional cost. See https://aws.amazon.com/kms/pricing/ for more information.","AssessmentStatus": "Automated","RationaleStatement": "Configuring CloudTrail to use SSE-KMS provides additional confidentiality controls on log data as a given user must have S3 read permission on the corresponding log bucket and must be granted decrypt permission by the CMK policy.","RemediationProcedure": "Perform the following to configure CloudTrail to use SSE-KMS:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Click on a Trail 4. Under the `S3` section click on the edit button (pencil icon) 5. Click `Advanced`6. Select an existing CMK from the `KMS key Id` drop-down menu- Note: Ensure the CMK is located in the same region as the S3 bucket- Note: You will need to apply a KMS Key policy on the selected CMK in order for CloudTrail as a service to encrypt and decrypt log files using the CMK provided. Steps are provided [here](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-kms-key-policy-for-cloudtrail.html) for editing the selected CMK Key policy 7. Click `Save`8. You will see a notification message stating that you need to have decrypt permissions on the specified KMS key to decrypt log files. 9. Click `Yes` **From Command Line:** ``` aws cloudtrail update-trail --name  --kms-id  aws kms put-key-policy --key-id  --policy  ```","AdditionalInformation": "3 statements which need to be added to the CMK policy:1\\. Enable Cloudtrail to describe CMK properties ``` 
{\"Sid\": \"Allow CloudTrail access\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"cloudtrail.amazonaws.com\"},\"Action\": \"kms:DescribeKey\",\"Resource\": \"*\" } ``` 2\\. Granting encrypt permissions ``` 
{\"Sid\": \"Allow CloudTrail to encrypt logs\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"cloudtrail.amazonaws.com\"},\"Action\": \"kms:GenerateDataKey*\",\"Resource\": \"*\",\"Condition\": {\"StringLike\": {\"kms:EncryptionContext:aws:cloudtrail:arn\": [\"arn:aws:cloudtrail:*:aws-account-id:trail/*\"]}} } ``` 3\\. Granting decrypt permissions ``` 
{\"Sid\": \"Enable CloudTrail log decrypt permissions\",\"Effect\": \"Allow\",\"Principal\": {\"AWS\": \"arn:aws:iam::aws-account-id:user/username\"},\"Action\": \"kms:Decrypt\",\"Resource\": \"*\",\"Condition\": {\"Null\": {\"kms:EncryptionContext:aws:cloudtrail:arn\": \"false\"}} } ```"}],"description": "Ensure CloudTrail logs are encrypted at rest using KMS CMKs","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.8": {"name": "3.8","checks": {"kms_cmk_rotation_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://aws.amazon.com/kms/pricing/:https://csrc.nist.gov/publications/detail/sp/800-57-part-1/rev-5/final","Description": "AWS Key Management Service (KMS) allows customers to rotate the backing key which is key material stored within the KMS which is tied to the key ID of the Customer Created customer master key (CMK). It is the backing key that is used to perform cryptographic operations such as encryption and decryption. Automated key rotation currently retains all prior backing keys so that decryption of encrypted data can take place transparently. It is recommended that CMK key rotation be enabled for symmetric keys. Key rotation can not be enabled for any asymmetric CMK.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam). 2. In the left navigation pane, choose `Customer managed keys` 3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT` 4. Underneath the `General configuration` panel open the tab `Key rotation` 5. Ensure that the checkbox `Automatically rotate this KMS key every year.` is activated 6. Repeat steps 3 - 5 for all customer managed CMKs where \"Key spec = SYMMETRIC_DEFAULT\"**From Command Line:**1. Run the following command to get a list of all keys and their associated `KeyIds````aws kms list-keys ``` 2. For each key, note the KeyId and run the following command ``` describe-key --key-id  ``` 3. If the response contains \"KeySpec = SYMMETRIC_DEFAULT\" run the following command ```aws kms get-key-rotation-status --key-id  ``` 4. Ensure `KeyRotationEnabled` is set to `true` 5. Repeat steps 2 - 4 for all remaining CMKs","ImpactStatement": "Creation, management, and storage of CMKs may require additional time from and administrator.","AssessmentStatus": "Automated","RationaleStatement": "Rotating encryption keys helps reduce the potential impact of a compromised key as data encrypted with a new key cannot be accessed with a previous key that may have been exposed. Keys should be rotated every year, or upon event that would result in the compromise of that key.","RemediationProcedure": "**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam). 2. In the left navigation pane, choose `Customer managed keys` . 3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT` 4. Underneath the \"General configuration\" panel open the tab \"Key rotation\" 5. Check the \"Automatically rotate this KMS key every year.\" checkbox**From Command Line:**1. Run the following command to enable key rotation: ```aws kms enable-key-rotation --key-id  ```","AdditionalInformation": ""}],"description": "Ensure rotation for customer created symmetric CMKs is enabled","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.9": {"name": "3.9","checks": {"vpc_flow_logs_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html","Description": "VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. After you've created a flow log, you can view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow Logs be enabled for packet \"Rejects\" for VPCs.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if VPC Flow logs are enabled:**From Console:**1. Sign into the management console 2. Select `Services` then `VPC`3. In the left navigation pane, select `Your VPCs`4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. Ensure a Log Flow exists that has `Active` in the `Status` column.**From Command Line:**1. Run `describe-vpcs` command (OSX/Linux/UNIX) to list the VPC networks available in the current AWS region: ``` aws ec2 describe-vpcs --region  --query Vpcs[].VpcId ``` 2. The command output returns the `VpcId` available in the selected region. 3. Run `describe-flow-logs` command (OSX/Linux/UNIX) using the VPC ID to determine if the selected virtual network has the Flow Logs feature enabled: ``` aws ec2 describe-flow-logs --filter \"Name=resource-id,Values=\" ``` 4. If there are no Flow Logs created for the selected VPC, the command output will return an `empty list []`. 5. Repeat step 3 for other VPCs available in the same region. 6. Change the region by updating `--region` and repeat steps 1 - 5 for all the VPCs.","ImpactStatement": "By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods:1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html","AssessmentStatus": "Automated","RationaleStatement": "VPC Flow Logs provide visibility into network traffic that traverses the VPC and can be used to detect anomalous traffic or insight during security workflows.","RemediationProcedure": "Perform the following to determine if VPC Flow logs is enabled:**From Console:**1. Sign into the management console 2. Select `Services` then `VPC`3. In the left navigation pane, select `Your VPCs`4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. If no Flow Log exists, click `Create Flow Log`7. For Filter, select `Reject` 8. Enter in a `Role` and `Destination Log Group`9. Click `Create Log Flow`10. Click on `CloudWatch Logs Group` **Note:** Setting the filter to \"Reject\" will dramatically reduce the logging data accumulation for this recommendation and provide sufficient information for the purposes of breach detection, research and remediation. However, during periods of least privilege security group engineering, setting this the filter to \"All\" can be very helpful in discovering existing traffic flows required for proper operation of an already running environment.**From Command Line:**1. Create a policy document and name it as `role_policy_document.json` and paste the following content: ``` {\"Version\": \"2012-10-17\",\"Statement\": [{\"Sid\": \"test\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"ec2.amazonaws.com\"},\"Action\": \"sts:AssumeRole\"}] } ``` 2. Create another policy document and name it as `iam_policy.json` and paste the following content: ``` {\"Version\": \"2012-10-17\",\"Statement\": [{\"Effect\": \"Allow\",\"Action\":[\"logs:CreateLogGroup\",\"logs:CreateLogStream\",\"logs:DescribeLogGroups\",\"logs:DescribeLogStreams\",\"logs:PutLogEvents\",\"logs:GetLogEvents\",\"logs:FilterLogEvents\"],\"Resource\": \"*\"}] } ``` 3. Run the below command to create an IAM role: ``` aws iam create-role --role-name  --assume-role-policy-document file://role_policy_document.json``` 4. Run the below command to create an IAM policy: ``` aws iam create-policy --policy-name  --policy-document file://iam-policy.json ``` 5. Run `attach-group-policy` command using the IAM policy ARN returned at the previous step to attach the policy to the IAM role (if the command succeeds, no output is returned): ``` aws iam attach-group-policy --policy-arn arn:aws:iam:::policy/ --group-name  ``` 6. Run `describe-vpcs` to get the VpcId available in the selected region: ``` aws ec2 describe-vpcs --region  ``` 7. The command output should return the VPC Id available in the selected region. 8. Run `create-flow-logs` to create a flow log for the vpc: ``` aws ec2 create-flow-logs --resource-type VPC --resource-ids  --traffic-type REJECT --log-group-name  --deliver-logs-permission-arn  ``` 9. Repeat step 8 for other vpcs available in the selected region. 10. Change the region by updating --region and repeat remediation procedure for other vpcs.","AdditionalInformation": ""}],"description": "Ensure VPC flow logging is enabled in all VPCs","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"4.1": {"name": "4.1","checks": {"cloudwatch_log_metric_filter_unauthorized_api_calls": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://aws.amazon.com/sns/:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with \"Name\":` note ``- From value associated with \"CloudWatchLogsLogGroupArn\" note Example: for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:::log-group:NewGroup:*,  would be NewGroup- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name <\"Name\" as shown in describe-trails>`Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this `` that you captured in step 1:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.errorCode = *UnauthorizedOperation) || ($.errorCode = AccessDenied*) || ($.sourceIPAddress!=delivery.logs.amazonaws.com) || ($.eventName!=HeadBucket) }\", ```4. Note the \"filterName\" `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName == `unauthorized_api_calls_metric`]\" ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "This alert may be triggered by normal read-only console activities that attempt to opportunistically gather optional information, but gracefully fail if they don't have permissions.If an excessive number of alerts are being generated then an organization may wish to consider adding read access to the limited IAM user permissions simply to quiet the alerts.In some cases doing this may allow the users to actually view some areas of the system - any additional access given should be reviewed for alignment with the original limited IAM user intent.","AssessmentStatus": "Automated","RationaleStatement": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for unauthorized API calls and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"cloudtrail_log_group_name\" --filter-name \"\" --metric-transformations metricName=unauthorized_api_calls_metric,metricNamespace=CISBenchmark,metricValue=1 --filter-pattern \"{ ($.errorCode = \"*UnauthorizedOperation\") || ($.errorCode = \"AccessDenied*\") || ($.sourceIPAddress!=\"delivery.logs.amazonaws.com\") || ($.eventName!=\"HeadBucket\") }\" ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. **Note**: Capture the TopicArn displayed when creating the SNS Topic in Step 2.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"unauthorized_api_calls_alarm\" --metric-name \"unauthorized_api_calls_metric\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for unauthorized API calls","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.2": {"name": "4.2","checks": {"cloudwatch_log_metric_filter_sign_in_without_mfa": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/viewing_metrics_with_cloudwatch.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all `CloudTrails`:``` aws cloudtrail describe-trails ```- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region `CloudTrail` is active``` aws cloudtrail get-trail-status --name  ```Ensure in the output that `IsLogging` is set to `TRUE`- Ensure identified Multi-region 'Cloudtrail' captures all Management Events``` aws cloudtrail get-event-selectors --trail-name  ```Ensure in the output there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }\" ```Or (To reduce false positives incase Single Sign-On (SSO) is used in organization):``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring for single-factor console logins will increase visibility into accounts that are not protected by MFA.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Management Console sign-in without MFA and the `` taken from audit step 1.Use Command: ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }' ```Or (To reduce false positives incase Single Sign-On (SSO) is used in organization):``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored -Filter pattern set to `{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\"}` reduces false alarms raised when user logs in via SSO account."}],"description": "Ensure a log metric filter and alarm exist for Management Console sign-in without MFA","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.3": {"name": "4.3","checks": {"cloudwatch_log_metric_filter_root_usage": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for 'root' login attempts.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails:`aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring for 'root' account logins will provide visibility into the use of a fully privileged account and an opportunity to reduce the use of it.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for 'Root' account usage and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `` --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "**Configuring log metric filter and alarm on Multi-region (global) CloudTrail**- ensures that activities from all regions (used as well as unused) are monitored- ensures that activities on all supported global services are monitored- ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for usage of 'root' account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.4": {"name": "4.4","checks": {"cloudwatch_log_metric_filter_policy_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established changes made to Identity and Access Management (IAM) policies.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails:`aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to IAM policies will help ensure authentication and authorization controls remain intact.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for IAM policy changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `` --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for IAM policy changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.5": {"name": "4.5","checks": {"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to CloudTrail's configuration will help ensure sustained visibility to activities performed in the AWS account.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for cloudtrail configuration changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for CloudTrail configuration changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.6": {"name": "4.6","checks": {"cloudwatch_log_metric_filter_authentication_failures": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for failed console authentication attempts.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring failed console logins may decrease lead time to detect an attempt to brute force a credential, which may provide an indicator, such as source IP, that can be used in other event correlation.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS management Console Login Failures and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for AWS Management Console authentication failures","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.7": {"name": "4.7","checks": {"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for customer created CMKs which have changed state to disabled or scheduled deletion.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Data encrypted with disabled or deleted keys will no longer be accessible.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for disabled or scheduled for deletion CMK's and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for disabling or scheduled deletion of customer created CMKs","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.8": {"name": "4.8","checks": {"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes to S3 bucket policies.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to S3 bucket policies may reduce time to detect and correct permissive policies on sensitive S3 buckets.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for S3 bucket policy changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for S3 bucket policy changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.9": {"name": "4.9","checks": {"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to AWS Config configuration will help ensure sustained visibility of configuration items within the AWS account.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Configuration changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for AWS Config configuration changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"5.1": {"name": "5.1","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Security.html#VPC_Security_Comparison","Description": "The Network Access Control List (NACL) function provide stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.","DefaultValue": null,"AuditProcedure": "**From Console:**Perform the following to determine if the account is configured as prescribed: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL, perform the following:- Select the network ACL- Click the `Inbound Rules` tab- Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` and shows `ALLOW`**Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.","RemediationProcedure": "**From Console:**Perform the following: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL to remediate, perform the following:- Select the network ACL- Click the `Inbound Rules` tab- Click `Edit inbound rules`- Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule- Click `Save`","AdditionalInformation": ""}],"description": "Ensure no Network ACLs allow ingress from 0.0.0.0/0 to remote server administration ports","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"5.2": {"name": "5.2","checks": {"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#deleting-security-group-rule","Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.","ImpactStatement": "When updating an existing environment, ensure that administrators have access to remote server administration ports through another mechanism before removing access by deleting the 0.0.0.0/0 inbound rule.","AssessmentStatus": "Automated","RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.","RemediationProcedure": "Perform the following to implement the prescribed state:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Click the `Edit inbound rules` button 4. Identify the rules to be edited or removed 5. Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule 6. Click `Save rules`","AdditionalInformation": ""}],"description": "Ensure no security groups allow ingress from 0.0.0.0/0 to remote server administration ports","checks_status": {"fail": 0,"pass": 3,"total": 3,"manual": 0}},"5.3": {"name": "5.3","checks": {"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#deleting-security-group-rule","Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `::/0` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.","ImpactStatement": "When updating an existing environment, ensure that administrators have access to remote server administration ports through another mechanism before removing access by deleting the ::/0 inbound rule.","AssessmentStatus": "Automated","RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.","RemediationProcedure": "Perform the following to implement the prescribed state:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Click the `Edit inbound rules` button 4. Identify the rules to be edited or removed 5. Either A) update the Source field to a range other than ::/0, or, B) Click `Delete` to remove the offending inbound rule 6. Click `Save rules`","AdditionalInformation": ""}],"description": "Ensure no security groups allow ingress from ::/0 to remote server administration ports","checks_status": {"fail": 0,"pass": 3,"total": 3,"manual": 0}},"5.4": {"name": "5.4","checks": {"ec2_securitygroup_default_restrict_traffic": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "5. Networking","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#default-security-group","Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic.The default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation.**NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:Security Group State1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exist 4. Click the `Outbound Rules` tab 5. Ensure no rules existSecurity Group Members1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all default groups in all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. Copy the id of the default security group. 5. Change to the EC2 Management Console at https://console.aws.amazon.com/ec2/v2/home 6. In the filter column type 'Security Group ID : < security group id from #4 >'","ImpactStatement": "Implementing this recommendation in an existing VPC containing operating resources requires extremely careful migration planning as the default security groups are likely to be enabling many ports that are unknown. Enabling VPC flow logging (of accepts) in an existing environment that is known to be breach free will reveal the current pattern of ports being used for each instance to communicate successfully.","AssessmentStatus": "Automated","RationaleStatement": "Configuring all VPC default security groups to restrict all traffic will encourage least privilege security group development and mindful placement of AWS resources into security groups which will in-turn reduce the exposure of those resources.","RemediationProcedure": "Security Group MembersPerform the following to implement the prescribed state:1. Identify AWS resources that exist within the default security group 2. Create a set of least privilege security groups for those resources 3. Place the resources in those security groups 4. Remove the resources noted in #1 from the default security groupSecurity Group State1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Remove any inbound rules 4. Click the `Outbound Rules` tab 5. Remove any Outbound rulesRecommended:IAM groups allow you to edit the \"name\" field. After remediating default groups rules for all VPCs in all regions, edit this field to add text similar to \"DO NOT USE. DO NOT ADD RULES\"","AdditionalInformation": ""}],"description": "Ensure the default security group of every VPC restricts all traffic","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"5.5": {"name": "5.5","checks": {"vpc_peering_routing_tables_with_least_privilege": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "5. Networking","References": "https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/peering-configurations-partial-access.html:https://docs.aws.amazon.com/cli/latest/reference/ec2/create-vpc-peering-connection.html","Description": "Once a VPC peering connection is established, routing tables must be updated to establish any connections between the peered VPCs. These routes can be as specific as desired - even peering a VPC to only a single host on the other side of the connection.","DefaultValue": null,"AuditProcedure": "Review routing tables of peered VPCs for whether they route all subnets of each VPC and whether that is necessary to accomplish the intended purposes for peering the VPCs.**From Command Line:**1. List all the route tables from a VPC and check if \"GatewayId\" is pointing to a __ (e.g. pcx-1a2b3c4d) and if \"DestinationCidrBlock\" is as specific as desired. ``` aws ec2 describe-route-tables --filter \"Name=vpc-id,Values=\" --query \"RouteTables[*].{RouteTableId:RouteTableId, VpcId:VpcId, Routes:Routes, AssociatedSubnets:Associations[*].SubnetId}\" ```","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Being highly selective in peering routing tables is a very effective way of minimizing the impact of breach as resources outside of these routes are inaccessible to the peered VPC.","RemediationProcedure": "Remove and add route table entries to ensure that the least number of subnets or hosts as is required to accomplish the purpose for peering are routable.**From Command Line:**1. For each __ containing routes non compliant with your routing policy (which grants more than desired \"least access\"), delete the non compliant route: ``` aws ec2 delete-route --route-table-id  --destination-cidr-block  ```2. Create a new compliant route: ``` aws ec2 create-route --route-table-id  --destination-cidr-block  --vpc-peering-connection-id  ```","AdditionalInformation": "If an organization has AWS transit gateway implemented in their VPC architecture they should look to apply the recommendation above for \"least access\" routing architecture at the AWS transit gateway level in combination with what must be implemented at the standard VPC route table. More specifically, to route traffic between two or more VPCs via a transit gateway VPCs must have an attachment to a transit gateway route table as well as a route, therefore to avoid routing traffic between VPCs an attachment to the transit gateway route table should only be added where there is an intention to route traffic between the VPCs. As transit gateways are able to host multiple route tables it is possible to group VPCs by attaching them to a common route table."}],"description": "Ensure routing tables for VPC peering are \"least access\"","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"5.6": {"name": "5.6","checks": {"ec2_instance_imdsv2_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://aws.amazon.com/blogs/security/defense-in-depth-open-firewalls-reverse-proxies-ssrf-vulnerabilities-ec2-instance-metadata-service/:https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html","Description": "When enabling the Metadata Service on AWS EC2 instances, users have the option of using either Instance Metadata Service Version 1 (IMDSv1; a request/response method) or Instance Metadata Service Version 2 (IMDSv2; a session-oriented method).","DefaultValue": null,"AuditProcedure": "From Console:1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/2. Under the Instances menu, select Instances.3. For each Instance, select the instance, then choose Actions > Modify instance metadata options.4. If the Instance metadata service is enabled, verify whether IMDSv2 is set to required. From Command Line:1. Use the describe-instances CLI command2. Ensure for all ec2 instances that the metadata-options.http-tokens setting is set to required.3. Repeat for all active regions.```aws ec2 describe-instances --filters \"\"Name=metadata-options.http-tokens\",\"Values=optional\" \"\"Name=metadata-options.state\"\",\"\"Values=applied\"\" --query \"\"Reservations[*].Instances[*].\"\" ``` ","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Allowing Version 1 of the service may open EC2 instances to Server-Side Request Forgery (SSRF) attacks, so Amazon recommends utilizing Version 2 for better instance security.","RemediationProcedure": "From Console:1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/ 2. Under the Instances menu, select Instances.3. For each Instance, select the instance, then choose Actions > Modify instance metadata options.4. If the Instance metadata service is enabled, set IMDSv2 to Required. From Command Line:```aws ec2 modify-instance-metadata-options --instance-id  --http-tokens required``` ","AdditionalInformation": ""}],"description": "Ensure that EC2 Metadata Service only allows IMDSv2","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.10": {"name": "1.10","checks": {"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://tools.ietf.org/html/rfc6238:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#enable-mfa-for-privileged-users:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://blogs.aws.amazon.com/security/post/Tx2SJJYE082KBUK/How-to-Delegate-Management-of-Multi-Factor-Authentication-to-AWS-IAM-Users","Description": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if a MFA device is enabled for all IAM users having a console password:**From Console:**1. Open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left pane, select `Users`3. If the `MFA` or `Password age` columns are not visible in the table, click the gear icon at the upper right corner of the table and ensure a checkmark is next to both, then click `Close`. 4. Ensure that for each user where the `Password age` column shows a password age, the `MFA` column shows `Virtual`, `U2F Security Key`, or `Hardware`.**From Command Line:**1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their password and MFA status: ```aws iam generate-credential-report ``` ```aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,8``` 2. The output of this command will produce a table similar to the following: ```user,password_enabled,mfa_activeelise,false,falsebrandon,true,truerakesh,false,falsehelene,false,falseparas,true,trueanitha,false,false``` 3. For any column having `password_enabled` set to `true` , ensure `mfa_active` is also set to `true.`","ImpactStatement": "AWS will soon end support for SMS multi-factor authentication (MFA). New customers are not allowed to use this feature. We recommend that existing customers switch to one of the following alternative methods of MFA.","AssessmentStatus": "Automated","RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that displays a time-sensitive key and have knowledge of a credential.","RemediationProcedure": "Perform the following to enable MFA:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at 'https://console.aws.amazon.com/iam/' 2. In the left pane, select `Users`. 3. In the `User Name` list, choose the name of the intended MFA user. 4. Choose the `Security Credentials` tab, and then choose `Manage MFA Device`. 5. In the `Manage MFA Device wizard`, choose `Virtual MFA` device, and then choose `Continue`. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes.6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see Virtual MFA Applications at https://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications). If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.- In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application. When you are finished, the virtual MFA device starts generating one-time passwords.8. In the `Manage MFA Device wizard`, in the `MFA Code 1 box`, type the `one-time password` that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second `one-time password` into the `MFA Code 2 box`.9. Click `Assign MFA`.","AdditionalInformation": "**Forced IAM User Self-Service Remediation**Amazon has published a pattern that forces users to self-service setup MFA before they have access to their complete permissions set. Until they complete this step, they cannot access their full permissions. This pattern can be used on new AWS accounts. It can also be used on existing accounts - it is recommended users are given instructions and a grace period to accomplish MFA enrollment before active enforcement on existing AWS accounts."}],"description": "Ensure multi-factor authentication (MFA) is enabled for all IAM users that have a console password","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.11": {"name": "1.11","checks": {"iam_user_no_setup_initial_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/cli/latest/reference/iam/delete-access-key.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html","Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. Programmatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. AWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if access keys were created upon user creation and are being used and rotated as prescribed:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on a User where column `Password age` and `Access key age` is not set to `None` 5. Click on `Security credentials` Tab 6. Compare the user 'Creation time` to the Access Key `Created` date. 6. For any that match, the key was created during initial user setup.- Keys that were created at the same time as the user profile and do not have a last used date should be deleted. Refer to the remediation below.**From Command Line:**1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their access keys utilization: ```aws iam generate-credential-report ``` ```aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,9,11,14,16 ``` 2. The output of this command will produce a table similar to the following: ``` user,password_enabled,access_key_1_active,access_key_1_last_used_date,access_key_2_active,access_key_2_last_used_dateelise,false,true,2015-04-16T15:14:00+00:00,false,N/Abrandon,true,true,N/A,false,N/Arakesh,false,false,N/A,false,N/Ahelene,false,true,2015-11-18T17:47:00+00:00,false,N/Aparas,true,true,2016-08-28T12:04:00+00:00,true,2016-03-04T10:11:00+00:00anitha,true,true,2016-06-08T11:43:00+00:00,true,N/A``` 3. For any user having `password_enabled` set to `true` AND `access_key_last_used_date` set to `N/A` refer to the remediation below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Requiring the additional steps be taken by the user for programmatic access after their profile has been created will give a stronger indication of intent that access keys are [a] necessary for their work and [b] once the access key is established on an account that the keys may be in use somewhere in the organization.**Note**: Even if it is known the user will need access keys, require them to create the keys themselves or put in a support ticket to have them created as a separate step from user creation.","RemediationProcedure": "Perform the following to delete access keys that do not pass the audit:**From Console:**1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. As an Administrator - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used. 7. As an IAM User- Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used.**From Command Line:** ``` aws iam delete-access-key --access-key-id  --user-name  ```","AdditionalInformation": "Credential report does not appear to contain \"Key Creation Date\""}],"description": "Do not setup access keys during initial user setup for all IAM users that have a console password","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.12": {"name": "1.12","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#remove-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_admin-change-user.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html","Description": "AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in 45 or greater days be deactivated or removed.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if unused credentials exist:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM` 4. Click on `Users` 5. Click the `Settings` (gear) icon. 6. Select `Console last sign-in`, `Access key last used`, and `Access Key Id` 7. Click on `Close`8. Check and ensure that `Console last sign-in` is less than 45 days ago.**Note** - `Never` means the user has never logged in.9. Check and ensure that `Access key age` is less than 45 days and that `Access key last used` does not say `None`If the user hasn't signed into the Console in the last 45 days or Access keys are over 45 days old refer to the remediation.**From Command Line:****Download Credential Report:**1. Run the following commands: ```aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,5,6,9,10,11,14,15,16 | grep -v '^' ```**Ensure unused credentials do not exist:**2. For each user having `password_enabled` set to `TRUE` , ensure `password_last_used_date` is less than `45` days ago.- When `password_enabled` is set to `TRUE` and `password_last_used` is set to `No_Information` , ensure `password_last_changed` is less than 45 days ago.3. For each user having an `access_key_1_active` or `access_key_2_active` to `TRUE` , ensure the corresponding `access_key_n_last_used_date` is less than `45` days ago.- When a user having an `access_key_x_active` (where x is 1 or 2) to `TRUE` and corresponding access_key_x_last_used_date is set to `N/A', ensure `access_key_x_last_rotated` is less than 45 days ago.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Disabling or removing unnecessary credentials will reduce the window of opportunity for credentials associated with a compromised or abandoned account to be used.","RemediationProcedure": "**From Console:**Perform the following to manage Unused Password (IAM user console access)1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. Select user whose `Console last sign-in` is greater than 45 days 7. Click `Security credentials` 8. In section `Sign-in credentials`, `Console password` click `Manage`9. Under Console Access select `Disable` 10.Click `Apply`Perform the following to deactivate Access Keys:1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. Select any access keys that are over 45 days old and that have been used and - Click on `Make Inactive` 7. Select any access keys that are over 45 days old and that have not been used and - Click the X to `Delete`","AdditionalInformation": " is excluded in the audit since the root account should not be used for day to day business and would likely be unused for more than 45 days."}],"description": "Ensure credentials unused for 45 days or greater are disabled","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"1.13": {"name": "1.13","checks": {"iam_user_two_active_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html","Description": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)","DefaultValue": null,"AuditProcedure": "**From Console:**1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. Under `Access Keys` section, in the Status column, check the current status for each access key associated with the IAM user. If the selected IAM user has more than one access key activated then the users access configuration does not adhere to security best practices and the risk of accidental exposures increases. - Repeat steps no. 3 โ€“ 5 for each IAM user in your AWS account.**From Command Line:**1. Run `list-users` command to list all IAM users within your account: ``` aws iam list-users --query \"Users[*].UserName\" ``` The command output should return an array that contains all your IAM user names.2. Run `list-access-keys` command using the IAM user name list to return the current status of each access key associated with the selected IAM user: ``` aws iam list-access-keys --user-name  ``` The command output should expose the metadata `(\"Username\", \"AccessKeyId\", \"Status\", \"CreateDate\")` for each access key on that user account.3. Check the `Status` property value for each key returned to determine each keys current state. If the `Status` property value for more than one IAM access key is set to `Active`, the user access configuration does not adhere to this recommendation, refer to the remediation below.- Repeat steps no. 2 and 3 for each IAM user in your AWS account.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API. One of the best ways to protect your account is to not allow users to have multiple access keys.","RemediationProcedure": "**From Console:**1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. In `Access Keys` section, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working. 6. In the same `Access Keys` section, identify your non-operational access keys (other than the chosen one) and deactivate it by clicking the `Make Inactive` link. 7. If you receive the `Change Key Status` confirmation box, click `Deactivate` to switch off the selected key. 8. Repeat steps no. 3 โ€“ 7 for each IAM user in your AWS account.**From Command Line:**1. Using the IAM user and access key information provided in the `Audit CLI`, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working.2. Run the `update-access-key` command below using the IAM user name and the non-operational access key IDs to deactivate the unnecessary key(s). Refer to the Audit section to identify the unnecessary access key ID for the selected IAM user**Note** - the command does not return any output: ``` aws iam update-access-key --access-key-id  --status Inactive --user-name  ``` 3. To confirm that the selected access key pair has been successfully `deactivated` run the `list-access-keys` audit command again for that IAM User: ``` aws iam list-access-keys --user-name  ``` - The command output should expose the metadata for each access key associated with the IAM user. If the non-operational key pair(s) `Status` is set to `Inactive`, the key has been successfully deactivated and the IAM user access configuration adheres now to this recommendation.4. Repeat steps no. 1 โ€“ 3 for each IAM user in your AWS account.","AdditionalInformation": ""}],"description": "Ensure there is only one active access key available for any single IAM user","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.14": {"name": "1.14","checks": {"iam_rotate_access_key_90_days": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#rotate-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html","Description": "Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if access keys are rotated as prescribed:**From Console:**1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click `setting` icon 4. Select `Console last sign-in` 5. Click `Close` 6. Ensure that `Access key age` is less than 90 days ago. note) `None` in the `Access key age` means the user has not used the access key.**From Command Line:**``` aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d ``` The `access_key_1_last_rotated` field in this file notes The date and time, in ISO 8601 date-time format, when the user's access key was created or last changed. If the user does not have an active access key, the value in this field is N/A (not applicable).","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Rotating access keys will reduce the window of opportunity for an access key that is associated with a compromised or terminated account to be used.Access keys should be rotated to ensure that data cannot be accessed with an old key which might have been lost, cracked, or stolen.","RemediationProcedure": "Perform the following to rotate access keys:**From Console:**1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click on `Security Credentials`4. As an Administrator - Click on `Make Inactive` for keys that have not been rotated in `90` Days 5. As an IAM User- Click on `Make Inactive` or `Delete` for keys which have not been rotated or used in `90` Days 6. Click on `Create Access Key`7. Update programmatic call with new Access Key credentials**From Command Line:**1. While the first access key is still active, create a second access key, which is active by default. Run the following command: ``` aws iam create-access-key ```At this point, the user has two active access keys.2. Update all applications and tools to use the new access key. 3. Determine whether the first access key is still in use by using this command: ``` aws iam get-access-key-last-used ``` 4. One approach is to wait several days and then check the old access key for any use before proceeding.Even if step Step 3 indicates no use of the old key, it is recommended that you do not immediately delete the first access key. Instead, change the state of the first access key to Inactive using this command: ``` aws iam update-access-key ``` 5. Use only the new access key to confirm that your applications are working. Any applications and tools that still use the original access key will stop working at this point because they no longer have access to AWS resources. If you find such an application or tool, you can switch its state back to Active to reenable the first access key. Then return to step Step 2 and update this application to use the new key.6. After you wait some period of time to ensure that all applications and tools have been updated, you can delete the first access key with this command: ``` aws iam delete-access-key ```","AdditionalInformation": ""}],"description": "Ensure access keys are rotated every 90 days or less","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.15": {"name": "1.15","checks": {"iam_policy_attached_only_to_group_or_roles": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html","Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. Only the third implementation is recommended.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if an inline policy is set or a policy is directly attached to users:1. Run the following to get a list of IAM users: ```aws iam list-users --query 'Users[*].UserName' --output text``` 2. For each user returned, run the following command to determine if any policies are attached to them: ```aws iam list-attached-user-policies --user-name aws iam list-user-policies --user-name ``` 3. If any policies are returned, the user has an inline policy or direct policy attachment.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Assigning IAM policy only through groups unifies permissions management to a single, flexible layer consistent with organizational functional roles. By unifying permissions management, the likelihood of excessive permissions is reduced.","RemediationProcedure": "Perform the following to create an IAM group and assign a policy to it:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups` and then click `Create New Group` . 3. In the `Group Name` box, type the name of the group and then click `Next Step` . 4. In the list of policies, select the check box for each policy that you want to apply to all members of the group. Then click `Next Step` . 5. Click `Create Group` Perform the following to add a user to a given group:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups`3. Select the group to add a user to 4. Click `Add Users To Group`5. Select the users to be added to the group 6. Click `Add Users` Perform the following to remove a direct association between a user and policy:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left navigation pane, click on Users 3. For each user:- Select the user- Click on the `Permissions` tab- Expand `Permissions policies` - Click `X` for each policy; then click Detach or Remove (depending on policy type)","AdditionalInformation": ""}],"description": "Ensure IAM Users Receive Permissions Only Through Groups","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.16": {"name": "1.16","checks": {"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://docs.aws.amazon.com/cli/latest/reference/iam/index.html#cli-aws-iam","Description": "IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered a standard security advice to grant _least privilege_ -that is, granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform _only_ those tasks, instead of allowing full administrative privileges.","DefaultValue": null,"AuditProcedure": "Perform the following to determine what policies are created:**From Command Line:**1. Run the following to get a list of IAM policies: ```aws iam list-policies --only-attached --output text ``` 2. For each policy returned, run the following command to determine if any policies is allowing full administrative privileges on the account: ```aws iam get-policy-version --policy-arn  --version-id  ``` 3. In output ensure policy should not have any Statement block with `\"Effect\": \"Allow\"` and `Action` set to `\"*\"` and `Resource` set to `\"*\"`","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "It's more secure to start with a minimum set of permissions and grant additional permissions as necessary, rather than starting with permissions that are too lenient and then trying to tighten them later.Providing full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions.IAM policies that have a statement with \"Effect\": \"Allow\" with \"Action\": \"\\*\" over \"Resource\": \"\\*\" should be removed.","RemediationProcedure": "**From Console:**Perform the following to detach the policy that has full administrative privileges:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click Policies and then search for the policy name found in the audit step. 3. Select the policy that needs to be deleted. 4. In the policy action menu, select first `Detach`5. Select all Users, Groups, Roles that have this policy attached 6. Click `Detach Policy`7. In the policy action menu, select `Detach` **From Command Line:**Perform the following to detach the policy that has full administrative privileges as found in the audit step:1. Lists all IAM users, groups, and roles that the specified managed policy is attached to.```aws iam list-entities-for-policy --policy-arn  ``` 2. Detach the policy from all IAM Users: ```aws iam detach-user-policy --user-name  --policy-arn  ``` 3. Detach the policy from all IAM Groups: ```aws iam detach-group-policy --group-name  --policy-arn  ``` 4. Detach the policy from all IAM Roles: ```aws iam detach-role-policy --role-name  --policy-arn  ```","AdditionalInformation": ""}],"description": "Ensure IAM policies that allow full \"*:*\" administrative privileges are not attached","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"1.17": {"name": "1.17","checks": {"iam_support_role_created": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://aws.amazon.com/premiumsupport/pricing/:https://docs.aws.amazon.com/cli/latest/reference/iam/list-policies.html:https://docs.aws.amazon.com/cli/latest/reference/iam/attach-role-policy.html:https://docs.aws.amazon.com/cli/latest/reference/iam/list-entities-for-policy.html","Description": "AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.","DefaultValue": null,"AuditProcedure": "**From Command Line:**1. List IAM policies, filter for the 'AWSSupportAccess' managed policy, and note the \"Arn\" element value: ``` aws iam list-policies --query \"Policies[?PolicyName == 'AWSSupportAccess']\" ``` 2. Check if the 'AWSSupportAccess' policy is attached to any role:``` aws iam list-entities-for-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess ```3. In Output, Ensure `PolicyRoles` does not return empty. 'Example: Example: PolicyRoles: [ ]'If it returns empty refer to the remediation below.","ImpactStatement": "All AWS Support plans include an unlimited number of account and billing support cases, with no long-term contracts. Support billing calculations are performed on a per-account basis for all plans. Enterprise Support plan customers have the option to include multiple enabled accounts in an aggregated monthly billing calculation. Monthly charges for the Business and Enterprise support plans are based on each month's AWS usage charges, subject to a monthly minimum, billed in advance.","AssessmentStatus": "Automated","RationaleStatement": "By implementing least privilege for access control, an IAM Role will require an appropriate IAM Policy to allow Support Center Access in order to manage Incidents with AWS Support.","RemediationProcedure": "**From Command Line:**1. Create an IAM role for managing incidents with AWS:- Create a trust relationship policy document that allows  to manage AWS incidents, and save it locally as /tmp/TrustPolicy.json: ```{\"Version\": \"2012-10-17\",\"Statement\": [{\"Effect\": \"Allow\",\"Principal\": {\"AWS\": \"\"},\"Action\": \"sts:AssumeRole\"}]} ``` 2. Create the IAM role using the above trust policy: ``` aws iam create-role --role-name  --assume-role-policy-document file:///tmp/TrustPolicy.json ``` 3. Attach 'AWSSupportAccess' managed policy to the created IAM role: ``` aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess --role-name  ```","AdditionalInformation": "AWSSupportAccess policy is a global AWS resource. It has same ARN as `arn:aws:iam::aws:policy/AWSSupportAccess` for every account."}],"description": "Ensure a support role has been created to manage incidents with AWS Support","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.18": {"name": "1.18","checks": {"ec2_instance_profile_attached": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html","Description": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. \"AWS Access\" means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.","DefaultValue": null,"AuditProcedure": "Where an instance is associated with a Role:For instances that are known to perform AWS actions, ensure that they belong to an instance role that has the necessary permissions:1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Open the EC2 Dashboard and choose \"Instances\" 3. Click the EC2 instance that performs AWS actions, in the lower pane details find \"IAM Role\" 4. If the Role is blank, the instance is not assigned to one. 5. If the Role is filled in, it does not mean the instance might not \\*also\\* have credentials encoded on it for some activities.Where an Instance Contains Embedded Credentials:- On the instance that is known to perform AWS actions, audit all scripts and environment variables to ensure that none of them contain AWS credentials.Where an Instance Application Contains Embedded Credentials:- Applications that run on an instance may also have credentials embedded. This is a bad practice, but even worse if the source code is stored in a public code repository such as github. When an application contains credentials can be determined by eliminating all other sources of credentials and if the application can still access AWS resources - it likely contains embedded credentials. Another method is to examine all source code and configuration files of the application.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "AWS IAM roles reduce the risks associated with sharing and rotating credentials that can be used outside of AWS itself. If credentials are compromised, they can be used from outside of the AWS account they give access to. In contrast, in order to leverage role permissions an attacker would need to gain and maintain access to a specific instance to use the privileges associated with it.Additionally, if credentials are encoded into compiled applications or other hard to change mechanisms, then they are even more unlikely to be properly rotated due to service disruption risks. As time goes on, credentials that cannot be rotated are more likely to be known by an increasing number of individuals who no longer work for the organization owning the credentials.","RemediationProcedure": "IAM roles can only be associated at the launch of an instance. To remediate an instance to add it to a role you must create a new instance.If the instance has no external dependencies on its current private ip or public addresses are elastic IPs:1. In AWS IAM create a new role. Assign a permissions policy if needed permissions are already known. 2. In the AWS console launch a new instance with identical settings to the existing instance, and ensure that the newly created role is selected. 3. Shutdown both the existing instance and the new instance. 4. Detach disks from both instances. 5. Attach the existing instance disks to the new instance. 6. Boot the new instance and you should have the same machine, but with the associated role.**Note:** if your environment has dependencies on a dynamically assigned PRIVATE IP address you can create an AMI from the existing instance, destroy the old one and then when launching from the AMI, manually assign the previous private IP address.**Note: **if your environment has dependencies on a dynamically assigned PUBLIC IP address there is not a way ensure the address is retained and assign an instance role. Dependencies on dynamically assigned public IP addresses are a bad practice and, if possible, you may wish to rebuild the instance with a new elastic IP address and make the investment to remediate affected systems while assigning the system to a role.","AdditionalInformation": ""}],"description": "Ensure IAM instance roles are used for AWS resource access from instances","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.19": {"name": "1.19","checks": {"iam_no_expired_server_certificates_stored": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html:https://docs.aws.amazon.com/cli/latest/reference/iam/delete-server-certificate.html","Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates.Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.","DefaultValue": null,"AuditProcedure": "**From Console:**Getting the certificates expiration information via AWS Management Console is not currently supported.To request information about the SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).**From Command Line:**Run list-server-certificates command to list all the IAM-stored server certificates:``` aws iam list-server-certificates ```The command output should return an array that contains all the SSL/TLS certificates currently stored in IAM and their metadata (name, ID, expiration date, etc):``` {\"ServerCertificateMetadataList\": [{\"ServerCertificateId\": \"EHDGFRW7EJFYTE88D\",\"ServerCertificateName\": \"MyServerCertificate\",\"Expiration\": \"2018-07-10T23:59:59Z\",\"Path\": \"/\",\"Arn\": \"arn:aws:iam::012345678910:server-certificate/MySSLCertificate\",\"UploadDate\": \"2018-06-10T11:56:08Z\"}] } ```Verify the `ServerCertificateName` and `Expiration` parameter value (expiration date) for each SSL/TLS certificate returned by the list-server-certificates command and determine if there are any expired server certificates currently stored in AWS IAM. If so, use the AWS API to remove them.If this command returns: ``` { { \"ServerCertificateMetadataList\": [] } ``` This means that there are no expired certificates, It DOES NOT mean that no certificates exist.","ImpactStatement": "Deleting the certificate could have implications for your application if you are using an expired server certificate with Elastic Load Balancing, CloudFront, etc. One has to make configurations at respective services to ensure there is no interruption in application functionality.","AssessmentStatus": "Automated","RationaleStatement": "Removing expired SSL/TLS certificates eliminates the risk that an invalid certificate will be deployed accidentally to a resource such as AWS Elastic Load Balancer (ELB), which can damage the credibility of the application/website behind the ELB. As a best practice, it is recommended to delete expired certificates.","RemediationProcedure": "**From Console:**Removing expired certificates via AWS Management Console is not currently supported. To delete SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).**From Command Line:**To delete Expired Certificate run following command by replacing  with the name of the certificate to delete:``` aws iam delete-server-certificate --server-certificate-name  ```When the preceding command is successful, it does not return any output.","AdditionalInformation": ""}],"description": "Ensure that all the expired SSL/TLS certificates stored in AWS IAM are removed","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.20": {"name": "1.20","checks": {"accessanalyzer_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/get-analyzer.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/create-analyzer.html","Description": "Enable IAM Access analyzer for IAM policies about all resources in each region.IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access. Access Analyzer analyzes only policies that are applied to resources in the same AWS Region.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Open the IAM console at `https://console.aws.amazon.com/iam/` 2. Choose `Access analyzer` 3. Click 'Analyzers' 4. Ensure that at least one analyzer is present 5. Ensure that the `STATUS` is set to `Active` 6. Repeat these step for each active region**From Command Line:**1. Run the following command: ``` aws accessanalyzer list-analyzers | grep status ``` 2. Ensure that at least one Analyzer the `status` is set to `ACTIVE`3. Repeat the steps above for each active region.If an Access analyzer is not listed for each region or the status is not set to active refer to the remediation procedure below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data. Access Analyzer identifies resources that are shared with external principals by using logic-based reasoning to analyze the resource-based policies in your AWS environment. IAM Access Analyzer continuously monitors all policies for S3 bucket, IAM roles, KMS(Key Management Service) keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues.","RemediationProcedure": "**From Console:**Perform the following to enable IAM Access analyzer for IAM policies:1. Open the IAM console at `https://console.aws.amazon.com/iam/.` 2. Choose `Access analyzer`. 3. Choose `Create analyzer`. 4. On the `Create analyzer` page, confirm that the `Region` displayed is the Region where you want to enable Access Analyzer. 5. Enter a name for the analyzer. `Optional as it will generate a name for you automatically`. 6. Add any tags that you want to apply to the analyzer. `Optional`.7. Choose `Create Analyzer`. 8. Repeat these step for each active region**From Command Line:**Run the following command: ``` aws accessanalyzer create-analyzer --analyzer-name  --type  ``` Repeat this command above for each active region.**Note:** The IAM Access Analyzer is successfully configured only when the account you use has the necessary permissions.","AdditionalInformation": ""}],"description": "Ensure that IAM Access analyzer is enabled for all regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.21": {"name": "1.21","checks": {"iam_check_saml_providers_sts": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "","Description": "In multi-account environments, IAM user centralization facilitates greater user control. User access beyond the initial account is then provided via role assumption. Centralization of users can be accomplished through federation with an external identity provider or through the use of AWS Organizations.","DefaultValue": null,"AuditProcedure": "For multi-account AWS environments with an external identity provider... 1. Determine the master account for identity federation or IAM user management 2. Login to that account through the AWS Management Console 3. Click `Services`4. Click `IAM`5. Click `Identity providers` 6. Verify the configurationThen..., determine all accounts that should not have local users present. For each account...1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click `Services`5. Click `IAM`6. Click `Users` 7. Confirm that no IAM users representing individuals are presentFor multi-account AWS environments implementing AWS Organizations without an external identity provider... 1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click `Services`5. Click `IAM`6. Click `Users` 7. Confirm that no IAM users representing individuals are present","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Centralizing IAM user management to a single identity store reduces complexity and thus the likelihood of access management errors.","RemediationProcedure": "The remediation procedure will vary based on the individual organization's implementation of identity federation and/or AWS Organizations with the acceptance criteria that no non-service IAM users, and non-root accounts, are present outside the account providing centralized IAM user management.","AdditionalInformation": ""}],"description": "Ensure IAM users are managed centrally via identity federation or AWS Organizations for multi-account environments","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.22": {"name": "1.22","checks": {},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html","Description": "AWS CloudShell is a convenient way of running CLI commands against AWS services; a managed IAM policy ('AWSCloudShellFullAccess') provides full access to CloudShell, which allows file upload and download capability between a user's local system and the CloudShell environment. Within the CloudShell environment a user has sudo permissions, and can access the internet. So it is feasible to install file transfer software (for example) and move data from CloudShell to external internet servers.","DefaultValue": null,"AuditProcedure": "**From Console** 1. Open the IAM console at https://console.aws.amazon.com/iam/2. In the left pane, select Policies3. Search for and select AWSCloudShellFullAccess4. On the Entities attached tab, ensure that there are no entities using this policy **From Command Line**1. List IAM policies, filter for the 'AWSCloudShellFullAccess' managed policy, and note the \"\"Arn\"\" element value:```aws iam list-policies --query \"\"Policies[?PolicyName == 'AWSCloudShellFullAccess']\"\"``` 2. Check if the 'AWSCloudShellFullAccess' policy is attached to any role: ```aws iam list-entities-for-policy --policy-arn arn:aws:iam::aws:policy/AWSCloudShellFullAccess```3. In Output, Ensure PolicyRoles returns empty. 'Example: Example: PolicyRoles: [ ]'If it does not return empty refer to the remediation below.Note: Keep in mind that other policies may grant access.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Access to this policy should be restricted as it presents a potential channel for data exfiltration by malicious cloud admins that are given full permissions to the service. AWS documentation describes how to create a more restrictive IAM policy which denies file transfer permissions.","RemediationProcedure": "**From Console**1. Open the IAM console at https://console.aws.amazon.com/iam/2. In the left pane, select Policies 3. Search for and select AWSCloudShellFullAccess4. On the Entities attached tab, for each item, check the box and select Detach","AdditionalInformation": ""}],"description": "Ensure access to AWSCloudShellFullAccess is restricted","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.10": {"name": "3.10","checks": {"cloudtrail_s3_dataevents_write_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html","Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to CloudTrail dashboard at `https://console.aws.amazon.com/cloudtrail/` 2. In the left panel, click `Trails` and then click on the CloudTrail Name that you want to examine. 3. Review `General details` 4. Confirm that `Multi-region trail` is set to `Yes` 5. Scroll down to `Data events` 6. Confirm that it reads: Data events: S3 Bucket Name: All current and future S3 buckets Read: Enabled Write: Enabled 7. Repeat steps 2 to 6 to verify that Multi-region trail and Data events logging of S3 buckets in CloudTrail. If the CloudTrails do not have multi-region and data events configured for S3 refer to the remediation below.**From Command Line:**1. Run `list-trails` command to list the names of all Amazon CloudTrail trails currently available in all AWS regions: ``` aws cloudtrail list-trails ``` 2. The command output will be a list of all the trail names to include. \"TrailARN\": \"arn:aws:cloudtrail:::trail/\", \"Name\": \"\", \"HomeRegion\": \"\" 3. Next run 'get-trail- command to determine Multi-region. ``` aws cloudtrail get-trail --name  --region  ``` 4. The command output should include: \"IsMultiRegionTrail\": true, 5. Next run `get-event-selectors` command using the `Name` of the trail and the `region` returned in step 2 to determine if Data events logging feature is enabled within the selected CloudTrail trail for all S3 buckets: ``` aws cloudtrail get-event-selectors --region  --trail-name  --query EventSelectors[*].DataResources[] ``` 6. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. \"Type\": \"AWS::S3::Object\",\"Values\": [\"arn:aws:s3\" 7. If the `get-event-selectors` command returns an empty array '[]', the Data events are not included in the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. 8. Repeat steps 1 to 5 for auditing each CloudTrail to determine if Data events for S3 are covered. If Multi-region is not set to true and the Data events does not show S3 defined as shown refer to the remediation procedure below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity within your S3 Buckets using Amazon CloudWatch Events.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, check the `Write` event checkbox, so that `object-level` logging for Write events is enabled. 6. Repeat steps 2 to 5 to enable object-level logging of write events for other S3 buckets.**From Command Line:**1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier: ``` aws cloudtrail put-event-selectors --region  --trail-name  --event-selectors '[{ \"ReadWriteType\": \"WriteOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::/\"] }] }]' ``` 2. The command output will be `object-level` event trail configuration. 3. If you want to enable it for all buckets at once then change Values parameter to `[\"arn:aws:s3\"]` in command given above. 4. Repeat step 1 for each s3 bucket to update `object-level` logging of write events. 5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.","AdditionalInformation": ""}],"description": "Ensure that Object-level logging for write events is enabled for S3 bucket","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.11": {"name": "3.11","checks": {"cloudtrail_s3_dataevents_read_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html","Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. If the current status for `Object-level` logging is set to `Disabled`, then object-level logging of read events for the selected s3 bucket is not set. 5. If the current status for `Object-level` logging is set to `Enabled`, but the Read event check-box is unchecked, then object-level logging of read events for the selected s3 bucket is not set. 6. Repeat steps 2 to 5 to verify `object-level` logging for `read` events of your other S3 buckets.**From Command Line:** 1. Run `describe-trails` command to list the names of all Amazon CloudTrail trails currently available in the selected AWS region: ``` aws cloudtrail describe-trails --region  --output table --query trailList[*].Name ``` 2. The command output will be table of the requested trail names. 3. Run `get-event-selectors` command using the name of the trail returned at the previous step and custom query filters to determine if Data events logging feature is enabled within the selected CloudTrail trail configuration for s3 bucket resources: ``` aws cloudtrail get-event-selectors --region  --trail-name  --query EventSelectors[*].DataResources[] ``` 4. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. 5. If the `get-event-selectors` command returns an empty array, the Data events are not included into the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. 6. Repeat steps 1 to 5 for auditing each s3 bucket to identify other trails that are missing the capability to log Data events. 7. Change the AWS region by updating the `--region` command parameter and perform the audit process for other regions.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity using Amazon CloudWatch Events.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, check the Read event checkbox, so that `object-level` logging for `Read` events is enabled. 6. Repeat steps 2 to 5 to enable `object-level` logging of read events for other S3 buckets.**From Command Line:** 1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier: ``` aws cloudtrail put-event-selectors --region  --trail-name  --event-selectors '[{ \"ReadWriteType\": \"ReadOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::/\"] }] }]' ``` 2. The command output will be `object-level` event trail configuration. 3. If you want to enable it for all buckets at ones then change Values parameter to `[\"arn:aws:s3\"]` in command given above. 4. Repeat step 1 for each s3 bucket to update `object-level` logging of read events. 5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.","AdditionalInformation": ""}],"description": "Ensure that Object-level logging for read events is enabled for S3 bucket","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.10": {"name": "4.10","checks": {"cloudwatch_log_metric_filter_security_group_changes": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Security Groups are a stateful packet filter that controls ingress and egress traffic within a VPC. It is recommended that a metric filter and alarm be established for detecting changes to Security Groups.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName== '']\" ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to security group will help ensure that resources and services are not unintentionally exposed.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for security groups changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"\" --filter-name \"\" --metric-transformations metricName= \"\" ,metricNamespace=\"CISBenchmark\",metricValue=1 --filter-pattern \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name \"\" ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn \"\" --protocol  --notification-endpoint \"\" ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"\" --metric-name \"\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions \"\" ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for security group changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.11": {"name": "4.11","checks": {"cloudwatch_changes_to_network_acls_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets within a VPC. It is recommended that a metric filter and alarm be established for changes made to NACLs.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to NACLs will help ensure that AWS resources and services are not unintentionally exposed.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for NACL changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for changes to Network Access Control Lists (NACL)","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.12": {"name": "4.12","checks": {"cloudwatch_changes_to_network_gateways_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Network gateways are required to send/receive traffic to a destination outside of a VPC. It is recommended that a metric filter and alarm be established for changes to network gateways.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to network gateways will help ensure that all ingress/egress traffic traverses the VPC border via a controlled path.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for network gateways changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for changes to network gateways","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.13": {"name": "4.13","checks": {"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Routing tables are used to route network traffic between subnets and to network gateways. It is recommended that a metric filter and alarm be established for changes to route tables.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to route tables will help ensure that all VPC traffic flows through an expected path.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for route table changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for route table changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.14": {"name": "4.14","checks": {"cloudwatch_changes_to_vpcs_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is possible to have more than 1 VPC within an account, in addition it is also possible to create a peer connection between 2 VPCs enabling network traffic to route between VPCs. It is recommended that a metric filter and alarm be established for changes made to VPCs.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to VPC will help ensure VPC traffic flow is not getting impacted.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for VPC changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for VPC changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.15": {"name": "4.15","checks": {"cloudwatch_log_metric_filter_aws_organizations_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_security_incident-response.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for AWS Organizations changes made in the master AWS Account.","DefaultValue": null,"AuditProcedure": "1. Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: - Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails:``` aws cloudtrail describe-trails ``` - Identify Multi region Cloudtrails, Trails with `\"IsMultiRegionTrail\"` set to true - From value associated with CloudWatchLogsLogGroupArn note **Example:** for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:::log-group:NewGroup:*,  would be NewGroup- Ensure Identified Multi region CloudTrail is active: ``` aws cloudtrail get-trail-status --name  ``` Ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events: ``` aws cloudtrail get-event-selectors --trail-name  ``` - Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to true and `ReadWriteType` set to `All`.2. Get a list of all associated metric filters for this : ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }\" ``` 4. Note the `` value associated with the filterPattern found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4: ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the AlarmActions value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic: ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. Example of valid \"SubscriptionArn\":``` \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring AWS Organizations changes can help you prevent any unwanted, accidental or intentional modifications that may lead to unauthorized access or other security breaches. This monitoring technique helps you to ensure that any unexpected changes performed within your AWS Organizations can be investigated and any unwanted changes can be rolled back.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Organizations changes and the `` taken from audit step 1: ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }' ``` **Note:** You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify: ``` aws sns create-topic --name  ``` **Note:** you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2: ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note:** you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2: ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": ""}],"description": "Ensure a log metric filter and alarm exists for AWS Organizations changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.16": {"name": "4.16","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-get-started.html:https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-enable.html#securityhub-enable-api:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/securityhub/enable-security-hub.html","Description": "Security Hub collects security data from across AWS accounts, services, and supported third-party partner products and helps you analyze your security trends and identify the highest priority security issues. When you enable Security Hub, it begins to consume, aggregate, organize, and prioritize findings from AWS services that you have enabled, such as Amazon GuardDuty, Amazon Inspector, and Amazon Macie. You can also enable integrations with AWS partner security products.","DefaultValue": null,"AuditProcedure": "The process to evaluate AWS Security Hub configuration per region **From Console:**1. Sign in to the AWS Management Console and open the AWS Security Hub console at https://console.aws.amazon.com/securityhub/. 2. On the top right of the console, select the target Region. 3. If presented with the Security Hub > Summary page then Security Hub is set-up for the selected region. 4. If presented with Setup Security Hub or Get Started With Security Hub - follow the online instructions. 5. Repeat steps 2 to 4 for each region.","ImpactStatement": "It is recommended AWS Security Hub be enabled in all regions. AWS Security Hub requires AWS Config to be enabled.","AssessmentStatus": "Automated","RationaleStatement": "AWS Security Hub provides you with a comprehensive view of your security state in AWS and helps you check your environment against security industry standards and best practices - enabling you to quickly assess the security posture across your AWS accounts.","RemediationProcedure": "To grant the permissions required to enable Security Hub, attach the Security Hub managed policy AWSSecurityHubFullAccess to an IAM user, group, or role.Enabling Security Hub**From Console:**1. Use the credentials of the IAM identity to sign in to the Security Hub console. 2. When you open the Security Hub console for the first time, choose Enable AWS Security Hub. 3. On the welcome page, Security standards list the security standards that Security Hub supports. 4. Choose Enable Security Hub.**From Command Line:**1. Run the enable-security-hub command. To enable the default standards, include `--enable-default-standards`. ``` aws securityhub enable-security-hub --enable-default-standards ```2. To enable the security hub without the default standards, include `--no-enable-default-standards`. ``` aws securityhub enable-security-hub --no-enable-default-standards ```","AdditionalInformation": ""}],"description": "Ensure AWS Security Hub is enabled","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.1.1": {"name": "2.1.1","checks": {"s3_bucket_secure_transport_policy": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "2.1. Simple Storage Service (S3)","References": "https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-policy-for-config-rule/:https://aws.amazon.com/blogs/security/how-to-use-bucket-policies-and-apply-defense-in-depth-to-help-secure-your-amazon-s3-data/:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/get-bucket-policy.html","Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.","DefaultValue": null,"AuditProcedure": "To allow access to HTTPS you can use a condition that checks for the key `\"aws:SecureTransport: true\"`. This means that the request is sent through HTTPS but that HTTP can still be used. So to make sure you do not allow HTTP access confirm that there is a bucket policy that explicitly denies access for HTTP requests and that it contains the key \"aws:SecureTransport\": \"false\".**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions', then Click on `Bucket Policy`. 4. Ensure that a policy is listed that matches: ``` '{\"Sid\": ,\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}' ``` `` and `` will be specific to your account5. Repeat for all the buckets in your AWS account.**From Command Line:**1. List all of the S3 Buckets``` aws s3 ls ``` 2. Using the list of buckets run this command on each of them: ``` aws s3api get-bucket-policy --bucket  | grep aws:SecureTransport ``` 3. Confirm that `aws:SecureTransport` is set to false `aws:SecureTransport:false` 4. Confirm that the policy line has Effect set to Deny 'Effect:Deny'","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "By default, Amazon S3 allows both HTTP and HTTPS requests. To achieve only allowing access to Amazon S3 objects through HTTPS you also have to explicitly deny access to HTTP requests. Bucket policies that allow HTTPS requests without explicitly denying HTTP requests will not comply with this recommendation.","RemediationProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions'. 4. Click 'Bucket Policy' 5. Add this to the existing policy filling in the required information ``` {\"Sid\": \",\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}}} ``` 6. Save 7. Repeat for all the buckets in your AWS account that contain sensitive data.**From Console** using AWS Policy Generator:1. Repeat steps 1-4 above. 2. Click on `Policy Generator` at the bottom of the Bucket Policy Editor 3. Select Policy Type `S3 Bucket Policy` 4. Add Statements - `Effect` = Deny - `Principal` = * - `AWS Service` = Amazon S3 - `Actions` = * - `Amazon Resource Name` =  5. Generate Policy 6. Copy the text and add it to the Bucket Policy.**From Command Line:**1. Export the bucket policy to a json file. ``` aws s3api get-bucket-policy --bucket  --query Policy --output text > policy.json ```2. Modify the policy.json file by adding in this statement: ``` {\"Sid\": \",\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}}} ``` 3. Apply this modified policy back to the S3 bucket: ``` aws s3api put-bucket-policy --bucket  --policy file://policy.json ```","AdditionalInformation": ""}],"description": "Ensure S3 Bucket Policy is set to deny HTTP requests","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"2.1.2": {"name": "2.1.2","checks": {"s3_bucket_no_mfa_delete": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "2.1. Simple Storage Service (S3)","References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete:https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html:https://aws.amazon.com/blogs/security/securing-access-to-aws-using-mfa-part-3/:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_lost-or-broken.html","Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.","DefaultValue": null,"AuditProcedure": "Perform the steps below to confirm MFA delete is configured on an S3 Bucket**From Console:**1. Login to the S3 console at `https://console.aws.amazon.com/s3/`2. Click the `Check` box next to the Bucket name you want to confirm3. In the window under `Properties`4. Confirm that Versioning is `Enabled`5. Confirm that MFA Delete is `Enabled`**From Command Line:**1. Run the `get-bucket-versioning` ``` aws s3api get-bucket-versioning --bucket my-bucket ```Output example: ```  EnabledEnabled ```If the Console or the CLI output does not show Versioning and MFA Delete `enabled` refer to the remediation below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Adding MFA delete to an S3 bucket, requires additional authentication when you change the version state of your bucket or you delete and object version adding another layer of security in the event your security credentials are compromised or unauthorized access is granted.","RemediationProcedure": "Perform the steps below to enable MFA delete on an S3 bucket.Note: -You cannot enable MFA Delete using the AWS Management Console. You must use the AWS CLI or API. -You must use your 'root' account to enable MFA Delete on S3 buckets.**From Command line:**1. Run the s3api put-bucket-versioning command``` aws s3api put-bucket-versioning --profile my-root-profile --bucket Bucket_Name --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa โ€œarn:aws:iam::aws_account_id:mfa/root-account-mfa-device passcodeโ€ ```","AdditionalInformation": ""}],"description": "Ensure MFA Delete is enabled on S3 buckets","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"2.1.3": {"name": "2.1.3","checks": {"macie_is_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "2.1. Simple Storage Service (S3)","References": "https://aws.amazon.com/macie/getting-started/:https://docs.aws.amazon.com/workspaces/latest/adminguide/data-protection.html:https://docs.aws.amazon.com/macie/latest/user/data-classification.html","Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.","DefaultValue": null,"AuditProcedure": "Perform the following steps to determine if Macie is running:**From Console:** 1. Login to the Macie console at https://console.aws.amazon.com/macie/ 2. In the left hand pane click on By job under findings. 3. Confirm that you have a Job setup for your S3 BucketsWhen you log into the Macie console if you aren't taken to the summary page and you don't have a job setup and running then refer to the remediation procedure below.If you are using a 3rd Party tool to manage and protect your s3 data you meet this recommendation.","ImpactStatement": "There is a cost associated with using Amazon Macie. There is also typically a cost associated with 3rd Party tools that perform similar processes and protection.","AssessmentStatus": "Manual","RationaleStatement": "Using a Cloud service or 3rd Party software to continuously monitor and automate the process of data discovery and classification for S3 buckets using machine learning and pattern matching is a strong defense in protecting that information.Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS.","RemediationProcedure": "Perform the steps below to enable and configure Amazon Macie**From Console:**1. Log on to the Macie console at `https://console.aws.amazon.com/macie/`2. Click `Get started`.3. Click `Enable Macie`.Setup a repository for sensitive data discovery results1. In the Left pane, under Settings, click `Discovery results`.2. Make sure `Create bucket` is selected.3. Create a bucket, enter a name for the bucket. The name must be unique across all S3 buckets. In addition, the name must start with a lowercase letter or a number.4. Click on `Advanced`.5. Block all public access, make sure `Yes` is selected.6. KMS encryption, specify the AWS KMS key that you want to use to encrypt the results. The key must be a symmetric, customer master key (CMK) that's in the same Region as the S3 bucket.7. Click on `Save`Create a job to discover sensitive data1. In the left pane, click `S3 buckets`. Macie displays a list of all the S3 buckets for your account.2. Select the `check box` for each bucket that you want Macie to analyze as part of the job3. Click `Create job`.3. Click `Quick create`.4. For the Name and description step, enter a name and, optionally, a description of the job.5. Then click `Next`.6. For the Review and create step, click `Submit`.Review your findings1. In the left pane, click `Findings`.2. To view the details of a specific finding, choose any field other than the check box for the finding.If you are using a 3rd Party tool to manage and protect your s3 data, follow the Vendor documentation for implementing and configuring that tool.","AdditionalInformation": ""}],"description": "Ensure all data in Amazon S3 has been discovered, classified and secured when required.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.1.4": {"name": "2.1.4","checks": {"s3_bucket_level_public_access_block": "PASS","s3_account_level_public_access_blocks": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.1. Simple Storage Service (S3)","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/block-public-access-account.html","Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.","DefaultValue": null,"AuditProcedure": "**If utilizing Block Public Access (bucket settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Ensure that block public access settings are set appropriately for this bucket 5. Repeat for all the buckets in your AWS account.**From Command Line:**1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Find the public access setting on that bucket ``` aws s3api get-public-access-block --bucket  ``` Output if Block Public access is enabled:``` {\"PublicAccessBlockConfiguration\": {\"BlockPublicAcls\": true,\"IgnorePublicAcls\": true,\"BlockPublicPolicy\": true,\"RestrictPublicBuckets\": true} } ```If the output reads `false` for the separate configuration settings then proceed to the remediation.**If utilizing Block Public Access (account settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Choose `Block public access (account settings)` 3. Ensure that block public access settings are set appropriately for your AWS account.**From Command Line:**To check Public access settings for this account status, run the following command, `aws s3control get-public-access-block --account-id  --region `Output if Block Public access is enabled:``` {\"PublicAccessBlockConfiguration\": {\"IgnorePublicAcls\": true, \"BlockPublicPolicy\": true, \"BlockPublicAcls\": true, \"RestrictPublicBuckets\": true} } ```If the output reads `false` for the separate configuration settings then proceed to the remediation.","ImpactStatement": "When you apply Block Public Access settings to an account, the settings apply to all AWS Regions globally. The settings might not take effect in all Regions immediately or simultaneously, but they eventually propagate to all Regions.","AssessmentStatus": "Automated","RationaleStatement": "Amazon S3 `Block public access (bucket settings)` prevents the accidental or malicious public exposure of data contained within the respective bucket(s). Amazon S3 `Block public access (account settings)` prevents the accidental or malicious public exposure of data contained within all buckets of the respective AWS account.Whether blocking public access to all or some buckets is an organizational decision that should be based on data sensitivity, least privilege, and use case.","RemediationProcedure": "**If utilizing Block Public Access (bucket settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Click 'Block all public access' 5. Repeat for all the buckets in your AWS account that contain sensitive data.**From Command Line:**1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Set the Block Public Access to true on that bucket ``` aws s3api put-public-access-block --bucket  --public-access-block-configuration \"BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true\" ```**If utilizing Block Public Access (account settings)****From Console:**If the output reads `true` for the separate configuration settings then it is set on the account.1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Choose `Block Public Access (account settings)` 3. Choose `Edit` to change the block public access settings for all the buckets in your AWS account 4. Choose the settings you want to change, and then choose `Save`. For details about each setting, pause on the `i` icons. 5. When you're asked for confirmation, enter `confirm`. Then Click `Confirm` to save your changes.**From Command Line:**To set Block Public access settings for this account, run the following command: ``` aws s3control put-public-access-block --public-access-block-configuration BlockPublicAcls=true, IgnorePublicAcls=true, BlockPublicPolicy=true, RestrictPublicBuckets=true --account-id  ```","AdditionalInformation": ""}],"description": "Ensure that S3 Buckets are configured with 'Block public access (bucket settings)'","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"2.2.1": {"name": "2.2.1","checks": {"ec2_ebs_volume_encryption": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.2. Elastic Compute Cloud (EC2)","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html:https://aws.amazon.com/blogs/aws/new-opt-in-to-default-encryption-for-new-ebs-volumes/","Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/2. Under `Account attributes`, click `EBS encryption`. 3. Verify `Always encrypt new EBS volumes` displays `Enabled`. 4. Review every region in-use.**Note:** EBS volume encryption is configured per region.**From Command Line:**1. Run``` aws --region  ec2 get-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Review every region in-use.**Note:** EBS volume encryption is configured per region.","ImpactStatement": "Losing access or removing the KMS key in use by the EBS volumes will result in no longer being able to access the volumes.","AssessmentStatus": "Automated","RationaleStatement": "Encrypting data at rest reduces the likelihood that it is unintentionally exposed and can nullify the impact of disclosure if the encryption remains unbroken.","RemediationProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/2. Under `Account attributes`, click `EBS encryption`. 3. Click `Manage`. 4. Click the `Enable` checkbox. 5. Click `Update EBS encryption` 6. Repeat for every region requiring the change.**Note:** EBS volume encryption is configured per region.**From Command Line:**1. Run``` aws --region  ec2 enable-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Repeat every region requiring the change.**Note:** EBS volume encryption is configured per region.","AdditionalInformation": "Default EBS volume encryption only applies to newly created EBS volumes. Existing EBS volumes are **not** converted automatically."}],"description": "Ensure EBS Volume Encryption is Enabled in all Regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.3.1": {"name": "2.3.1","checks": {"rds_instance_storage_encrypted": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "2.3. Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html:https://aws.amazon.com/blogs/database/selecting-the-right-encryption-options-for-amazon-rds-and-amazon-aurora-database-engines/#:~:text=With%20RDS%2Dencrypted%20resources%2C%20data,transparent%20to%20your%20database%20engine.:https://aws.amazon.com/rds/features/security/","Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/ 2. In the navigation pane, under RDS dashboard, click `Databases`. 3. Select the RDS Instance that you want to examine 4. Click `Instance Name` to see details, then click on `Configuration` tab. 5. Under Configuration Details section, In Storage pane search for the `Encryption Enabled` Status. 6. If the current status is set to `Disabled`, Encryption is not enabled for the selected RDS Instance database instance. 7. Repeat steps 3 to 7 to verify encryption status of other RDS Instance in same region. 8. Change region from the top of the navigation bar and repeat audit for other regions.**From Command Line:**1. Run `describe-db-instances` command to list all RDS Instance database names, available in the selected AWS region, Output will return each Instance database identifier-name.``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run again `describe-db-instances` command using the RDS Instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True` Or `False`. ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].StorageEncrypted' ``` 3. If the StorageEncrypted parameter value is `False`, Encryption is not enabled for the selected RDS database instance. 4. Repeat steps 1 to 3 for auditing each RDS Instance and change Region to verify for other regions","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Databases are likely to hold sensitive and critical data, it is highly recommended to implement encryption in order to protect your data from unauthorized access or disclosure. With RDS encryption enabled, the data stored on the instance's underlying storage, the automated backups, read replicas, and snapshots, are all encrypted.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases` 3. Select the Database instance that needs to be encrypted. 4. Click on `Actions` button placed at the top right and select `Take Snapshot`. 5. On the Take Snapshot page, enter a database name of which you want to take a snapshot in the `Snapshot Name` field and click on `Take Snapshot`. 6. Select the newly created snapshot and click on the `Action` button placed at the top right and select `Copy snapshot` from the Action menu. 7. On the Make Copy of DB Snapshot page, perform the following:- In the New DB Snapshot Identifier field, Enter a name for the `new snapshot`. - Check `Copy Tags`, New snapshot must have the same tags as the source snapshot. - Select `Yes` from the `Enable Encryption` dropdown list to enable encryption, You can choose to use the AWS default encryption key or custom key from Master Key dropdown list.8. Click `Copy Snapshot` to create an encrypted copy of the selected instance snapshot. 9. Select the new Snapshot Encrypted Copy and click on the `Action` button placed at the top right and select `Restore Snapshot` button from the Action menu, This will restore the encrypted snapshot to a new database instance. 10. On the Restore DB Instance page, enter a unique name for the new database instance in the DB Instance Identifier field. 11. Review the instance configuration details and click `Restore DB Instance`. 12. As the new instance provisioning process is completed can update application configuration to refer to the endpoint of the new Encrypted database instance Once the database endpoint is changed at the application level, can remove the unencrypted instance.**From Command Line:**1. Run `describe-db-instances` command to list all RDS database names available in the selected AWS region, The command output should return the database instance identifier. ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run `create-db-snapshot` command to create a snapshot for the selected database instance, The command output will return the `new snapshot` with name DB Snapshot Name. ``` aws rds create-db-snapshot --region  --db-snapshot-identifier  --db-instance-identifier  ``` 3. Now run `list-aliases` command to list the KMS keys aliases available in a specified region, The command output should return each `key alias currently available`. For our RDS encryption activation process, locate the ID of the AWS default KMS key. ``` aws kms list-aliases --region  ``` 4. Run `copy-db-snapshot` command using the default KMS key ID for RDS instances returned earlier to create an encrypted copy of the database instance snapshot, The command output will return the `encrypted instance snapshot configuration`. ``` aws rds copy-db-snapshot --region  --source-db-snapshot-identifier  --target-db-snapshot-identifier  --copy-tags --kms-key-id  ``` 5. Run `restore-db-instance-from-db-snapshot` command to restore the encrypted snapshot created at the previous step to a new database instance, If successful, the command output should return the new encrypted database instance configuration. ``` aws rds restore-db-instance-from-db-snapshot --region  --db-instance-identifier  --db-snapshot-identifier  ``` 6. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region, Output will return database instance identifier name Select encrypted database name that we just created DB-Name-Encrypted. ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 7. Run again `describe-db-instances` command using the RDS instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True`. ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].StorageEncrypted' ```","AdditionalInformation": ""}],"description": "Ensure that encryption is enabled for RDS Instances","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"2.3.2": {"name": "2.3.2","checks": {"rds_instance_minor_version_upgrade_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.3. Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_RDS_Managing.html:https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Upgrading.html:https://aws.amazon.com/rds/faqs/","Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases`. 3. Select the RDS instance that wants to examine. 4. Click on the `Maintenance and backups` panel. 5. Under the `Maintenance` section, search for the Auto Minor Version Upgrade status. - If the current status is set to `Disabled`, means the feature is not set and the minor engine upgrades released will not be applied to the selected RDS instance**From Command Line:**1. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region: ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance identifier. 3. Run again `describe-db-instances` command using the RDS instance identifier returned earlier to determine the Auto Minor Version Upgrade status for the selected instance: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].AutoMinorVersionUpgrade' ``` 4. The command output should return the feature current status. If the current status is set to `true`, the feature is enabled and the minor engine upgrades will be applied to the selected RDS instance.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "AWS RDS will occasionally deprecate minor engine versions and provide new ones for an upgrade. When the last version number within the release is replaced, the version changed is considered minor. With Auto Minor Version Upgrade feature enabled, the version upgrades will occur automatically during the specified maintenance window so your RDS instances can get the new features, bug fixes, and security patches for their database engines.","RemediationProcedure": "**From Console:**1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases`. 3. Select the RDS instance that wants to update. 4. Click on the `Modify` button placed on the top right side. 5. On the `Modify DB Instance: ` page, In the `Maintenance` section, select `Auto minor version upgrade` click on the `Yes` radio button. 6. At the bottom of the page click on `Continue`, check to Apply Immediately to apply the changes immediately, or select `Apply during the next scheduled maintenance window` to avoid any downtime. 7. Review the changes and click on `Modify DB Instance`. The instance status should change from available to modifying and back to available. Once the feature is enabled, the `Auto Minor Version Upgrade` status should change to `Yes`.**From Command Line:**1. Run `describe-db-instances` command to list all RDS database instance names, available in the selected AWS region: ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance identifier. 3. Run the `modify-db-instance` command to modify the selected RDS instance configuration this command will apply the changes immediately, Remove `--apply-immediately` to apply changes during the next scheduled maintenance window and avoid any downtime: ``` aws rds modify-db-instance --region  --db-instance-identifier  --auto-minor-version-upgrade --apply-immediately ``` 4. The command output should reveal the new configuration metadata for the RDS instance and check `AutoMinorVersionUpgrade` parameter value. 5. Run `describe-db-instances` command to check if the Auto Minor Version Upgrade feature has been successfully enable: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].AutoMinorVersionUpgrade' ``` 6. The command output should return the feature current status set to `true`, the feature is `enabled` and the minor engine upgrades will be applied to the selected RDS instance.","AdditionalInformation": ""}],"description": "Ensure Auto Minor Version Upgrade feature is Enabled for RDS Instances","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.3.3": {"name": "2.3.3","checks": {"rds_instance_no_public_access": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.3. Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.html:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html:https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html:https://aws.amazon.com/rds/faqs/","Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. Under the navigation panel, On RDS Dashboard, click `Databases`. 3. Select the RDS instance that you want to examine. 4. Click `Instance Name` from the dashboard, Under `Connectivity and Security. 5. On the `Security`, check if the Publicly Accessible flag status is set to `Yes`, follow the below-mentioned steps to check database subnet access. - In the `networking` section, click the subnet link available under `Subnets` - The link will redirect you to the VPC Subnets page. - Select the subnet listed on the page and click the `Route Table` tab from the dashboard bottom panel. If the route table contains any entries with the destination `CIDR block set to 0.0.0.0/0` and with an `Internet Gateway` attached. - The selected RDS database instance was provisioned inside a public subnet, therefore is not running within a logically isolated environment and can be accessible from the Internet. 6. Repeat steps no. 4 and 5 to determine the type (public or private) and subnet for other RDS database instances provisioned in the current region. 8. Change the AWS region from the navigation bar and repeat the audit process for other regions.**From Command Line:**1. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region: ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance `identifier`. 3. Run again `describe-db-instances` command using the `PubliclyAccessible` parameter as query filter to reveal the database instance Publicly Accessible flag status: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].PubliclyAccessible' ``` 4. Check for the Publicly Accessible parameter status, If the Publicly Accessible flag is set to `Yes`. Then selected RDS database instance is publicly accessible and insecure, follow the below-mentioned steps to check database subnet access 5. Run again `describe-db-instances` command using the RDS database instance identifier that you want to check and appropriate filtering to describe the VPC subnet(s) associated with the selected instance: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].DBSubnetGroup.Subnets[]' ``` - The command output should list the subnets available in the selected database subnet group. 6. Run `describe-route-tables` command using the ID of the subnet returned at the previous step to describe the routes of the VPC route table associated with the selected subnet: ``` aws ec2 describe-route-tables --region  --filters \"Name=association.subnet-id,Values=\" --query 'RouteTables[*].Routes[]' ``` - If the command returns the route table associated with database instance subnet ID. Check the `GatewayId` and `DestinationCidrBlock` attributes values returned in the output. If the route table contains any entries with the `GatewayId` value set to `igw-xxxxxxxx` and the `DestinationCidrBlock` value set to `0.0.0.0/0`, the selected RDS database instance was provisioned inside a public subnet. - Or - If the command returns empty results, the route table is implicitly associated with subnet, therefore the audit process continues with the next step 7. Run again `describe-db-instances` command using the RDS database instance identifier that you want to check and appropriate filtering to describe the VPC ID associated with the selected instance: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].DBSubnetGroup.VpcId' ``` - The command output should show the VPC ID in the selected database subnet group 8. Now run `describe-route-tables` command using the ID of the VPC returned at the previous step to describe the routes of the VPC main route table implicitly associated with the selected subnet: ``` aws ec2 describe-route-tables --region  --filters \"Name=vpc-id,Values=\" \"Name=association.main,Values=true\" --query 'RouteTables[*].Routes[]' ``` - The command output returns the VPC main route table implicitly associated with database instance subnet ID. Check the `GatewayId` and `DestinationCidrBlock` attributes values returned in the output. If the route table contains any entries with the `GatewayId` value set to `igw-xxxxxxxx` and the `DestinationCidrBlock` value set to `0.0.0.0/0`, the selected RDS database instance was provisioned inside a public subnet, therefore is not running within a logically isolated environment and does not adhere to AWS security best practices.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Ensure that no public-facing RDS database instances are provisioned in your AWS account and restrict unauthorized access in order to minimize security risks. When the RDS instance allows unrestricted access (0.0.0.0/0), everyone and everything on the Internet can establish a connection to your database and this can increase the opportunity for malicious activities such as brute force attacks, PostgreSQL injections, or DoS/DDoS attacks.","RemediationProcedure": "**From Console:**1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. Under the navigation panel, On RDS Dashboard, click `Databases`. 3. Select the RDS instance that you want to update. 4. Click `Modify` from the dashboard top menu. 5. On the Modify DB Instance panel, under the `Connectivity` section, click on `Additional connectivity configuration` and update the value for `Publicly Accessible` to Not publicly accessible to restrict public access. Follow the below steps to update subnet configurations: - Select the `Connectivity and security` tab, and click on the VPC attribute value inside the `Networking` section. - Select the `Details` tab from the VPC dashboard bottom panel and click on Route table configuration attribute value. - On the Route table details page, select the Routes tab from the dashboard bottom panel and click on `Edit routes`. - On the Edit routes page, update the Destination of Target which is set to `igw-xxxxx` and click on `Save` routes. 6. On the Modify DB Instance panel Click on `Continue` and In the Scheduling of modifications section, perform one of the following actions based on your requirements: - Select Apply during the next scheduled maintenance window to apply the changes automatically during the next scheduled maintenance window. - Select Apply immediately to apply the changes right away. With this option, any pending modifications will be asynchronously applied as soon as possible, regardless of the maintenance window setting for this RDS database instance. Note that any changes available in the pending modifications queue are also applied. If any of the pending modifications require downtime, choosing this option can cause unexpected downtime for the application. 7. Repeat steps 3 to 6 for each RDS instance available in the current region. 8. Change the AWS region from the navigation bar to repeat the process for other regions.**From Command Line:**1. Run `describe-db-instances` command to list all RDS database names identifiers, available in the selected AWS region: ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance identifier. 3. Run `modify-db-instance` command to modify the selected RDS instance configuration. Then use the following command to disable the `Publicly Accessible` flag for the selected RDS instances. This command use the apply-immediately flag. If you want `to avoid any downtime --no-apply-immediately flag can be used`: ``` aws rds modify-db-instance --region  --db-instance-identifier  --no-publicly-accessible --apply-immediately ``` 4. The command output should reveal the `PubliclyAccessible` configuration under pending values and should get applied at the specified time. 5. Updating the Internet Gateway Destination via AWS CLI is not currently supported To update information about Internet Gateway use the AWS Console Procedure. 6. Repeat steps 1 to 5 for each RDS instance provisioned in the current region. 7. Change the AWS region by using the --region filter to repeat the process for other regions.","AdditionalInformation": ""}],"description": "Ensure that public access is not given to RDS Instance","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.4.1": {"name": "2.4.1","checks": {"efs_encryption_at_rest_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "2.4 Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/efs/latest/ug/encryption-at-rest.html:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/efs/index.html#efs","Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).","DefaultValue": null,"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console and Navigate to `Elastic File System (EFS) dashboard. 2. Select `File Systems` from the left navigation panel. 3. Each item on the list has a visible Encrypted field that displays data at rest encryption status. 4. Validate that this field reads `Encrypted` for all EFS file systems in all AWS regions.**From CLI:** 1. Run describe-file-systems command using custom query filters to list the identifiers of all AWS EFS file systems currently available within the selected region: ``` aws efs describe-file-systems --region  --output table --query 'FileSystems[*].FileSystemId' ``` 2. The command output should return a table with the requested file system IDs. 3. Run describe-file-systems command using the ID of the file system that you want to examine as identifier and the necessary query filters: ``` aws efs describe-file-systems --region  --file-system-id  --query 'FileSystems[*].Encrypted' ``` 4. The command output should return the file system encryption status true or false. If the returned value is `false`, the selected AWS EFS file system is not encrypted and if the returned value is `true`, the selected AWS EFS file system is encrypted.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Data should be encrypted at rest to reduce the risk of a data breach via direct access to the storage device.","RemediationProcedure": "**It is important to note that EFS file system data at rest encryption must be turned on when creating the file system.**If an EFS file system has been created without data at rest encryption enabled then you must create another EFS file system with the correct configuration and transfer the data.**Steps to create an EFS file system with data encrypted at rest:****From Console:** 1. Login to the AWS Management Console and Navigate to `Elastic File System (EFS)` dashboard. 2. Select `File Systems` from the left navigation panel. 3. Click `Create File System` button from the dashboard top menu to start the file system setup process. 4. On the `Configure file system access` configuration page, perform the following actions. - Choose the right VPC from the VPC dropdown list. - Within Create mount targets section, select the checkboxes for all of the Availability Zones (AZs) within the selected VPC. These will be your mount targets. - Click `Next step` to continue.5. Perform the following on the `Configure optional settings` page. - Create `tags` to describe your new file system. - Choose `performance mode` based on your requirements. - Check `Enable encryption` checkbox and choose `aws/elasticfilesystem` from Select KMS master key dropdown list to enable encryption for the new file system using the default master key provided and managed by AWS KMS. - Click `Next step` to continue.6. Review the file system configuration details on the `review and create` page and then click `Create File System` to create your new AWS EFS file system. 7. Copy the data from the old unencrypted EFS file system onto the newly create encrypted file system. 8. Remove the unencrypted file system as soon as your data migration to the newly create encrypted file system is completed. 9. Change the AWS region from the navigation bar and repeat the entire process for other aws regions.**From CLI:** 1. Run describe-file-systems command to describe the configuration information available for the selected (unencrypted) file system (see Audit section to identify the right resource): ``` aws efs describe-file-systems --region  --file-system-id  ``` 2. The command output should return the requested configuration information. 3. To provision a new AWS EFS file system, you need to generate a universally unique identifier (UUID) in order to create the token required by the create-file-system command. To create the required token, you can use a randomly generated UUID from \"https://www.uuidgenerator.net\". 4. Run create-file-system command using the unique token created at the previous step. ``` aws efs create-file-system --region  --creation-token  --performance-mode generalPurpose --encrypted ``` 5. The command output should return the new file system configuration metadata. 6. Run create-mount-target command using the newly created EFS file system ID returned at the previous step as identifier and the ID of the Availability Zone (AZ) that will represent the mount target: ``` aws efs create-mount-target --region  --file-system-id  --subnet-id  ``` 7. The command output should return the new mount target metadata. 8. Now you can mount your file system from an EC2 instance. 9. Copy the data from the old unencrypted EFS file system onto the newly create encrypted file system. 10. Remove the unencrypted file system as soon as your data migration to the newly create encrypted file system is completed. ``` aws efs delete-file-system --region  --file-system-id  ``` 11. Change the AWS region by updating the --region and repeat the entire process for other aws regions.","AdditionalInformation": ""}],"description": "Ensure that encryption is enabled for EFS file systems","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}}},"requirements_passed": 52,"requirements_failed": 11,"requirements_manual": 1,"total_requirements": 64,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "7aee25d8-9e9a-44e1-8e01-336bfd9d9582","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "aws_well_architected_framework_reliability_pillar_aws","framework": "AWS-Well-Architected-Framework-Reliability-Pillar","version": "","description": "Best Practices for the AWS Well-Architected Framework Reliability Pillar encompasses the ability of a workload to perform its intended function correctly and consistently when itโ€™s expected to. This includes the ability to operate and test the workload through its total lifecycle.","region": "eu-west-1","requirements": {"REL06-BP01": {"name": "REL06-BP01","checks": {"elb_logging_enabled": "FAIL","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","apigatewayv2_api_access_logging_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","opensearch_service_domains_audit_logging_enabled": null,"opensearch_service_domains_cloudwatch_logging_enabled": null,"awslambda_function_invoke_api_operations_cloudtrail_logging_enabled": "PASS"},"status": "FAIL","attributes": [{"Name": "REL06-BP01 Monitor all components for the workload (Generation)","Section": "Change management","SubSection": "Monitor workload resources","Description": "Monitor components and services of AWS workload effectifely, using tools like Amazon CloudWatch and AWS Health Dashboard. Define relevant metrics, set thresholds, and analyze metrics and logs for early detection of issues.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/rel_monitor_aws_resources_monitor_resources.html#implementation-guidance","WellArchitectedPracticeId": "rel_monitor_aws_resources_monitor_resources","WellArchitectedQuestionId": "monitor-aws-resources"}],"description": "Monitor components and services of AWS workload effectifely, using tools like Amazon CloudWatch and AWS Health Dashboard. Define relevant metrics, set thresholds, and analyze metrics and logs for early detection of issues.","checks_status": {"fail": 5,"pass": 2,"total": 9,"manual": 0}},"REL09-BP03": {"name": "REL09-BP03","checks": {"rds_instance_backup_enabled": "PASS","dynamodb_tables_pitr_enabled": null,"rds_instance_deletion_protection": "FAIL","cloudformation_stacks_termination_protection_enabled": "FAIL"},"status": "FAIL","attributes": [{"Name": "REL09-BP03 Perform data backup automatically","Section": "Failure management","SubSection": "Backup up data","Description": "Configure backups to be taken automatically based on a periodic schedule informed by the Recovery Point Objective (RPO), or by changes in the dataset. Critical datasets with low data loss requirements need to be backed up automatically on a frequent basis, whereas less critical data where some loss is acceptable can be backed up less frequently.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/rel_backing_up_data_automated_backups_data.html#implementation-guidance","WellArchitectedPracticeId": "rel_backing_up_data_automated_backups_data","WellArchitectedQuestionId": "backing-up-data"}],"description": "Configure backups to be taken automatically based on a periodic schedule informed by the Recovery Point Objective (RPO), or by changes in the dataset. Critical datasets with low data loss requirements need to be backed up automatically on a frequent basis, whereas less critical data where some loss is acceptable can be backed up less frequently.","checks_status": {"fail": 2,"pass": 1,"total": 4,"manual": 0}},"REL10-BP01": {"name": "REL10-BP01","checks": {"rds_instance_multi_az": "FAIL"},"status": "FAIL","attributes": [{"Name": "REL10-BP01 Deploy the workload to multiple locations","Section": "Failure management","SubSection": "Use fault isolation to protect your workload","Description": "Distribute workload data and resources across multiple Availability Zones or, where necessary, across AWS Regions. These locations can be as diverse as required.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/reliability-pillar/use-fault-isolation-to-protect-your-workload.html#implementation-guidance.","WellArchitectedPracticeId": "rel_fault_isolation_multiaz_region_system","WellArchitectedQuestionId": "fault-isolation"}],"description": "Distribute workload data and resources across multiple Availability Zones or, where necessary, across AWS Regions. These locations can be as diverse as required.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}}},"requirements_passed": 0,"requirements_failed": 3,"requirements_manual": 0,"total_requirements": 3,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "85c783d4-a01a-4297-b490-216e38ee144e","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "iso27001_2013_aws","framework": "ISO27001","version": "2013","description": "ISO (the International Organization for Standardization) and IEC (the International Electrotechnical Commission) form the specialized system for worldwide standardization. National bodies that are members of ISO or IEC participate in the development of International Standards through technical committees established by the respective organization to deal with particular fields of technical activity. ISO and IEC technical committees collaborate in fields of mutual interest. Other international organizations, governmental and non-governmental, in liaison with ISO and IEC, also take part in the work.","region": "eu-west-1","requirements": {"A.9.2": {"name": "User Access Management","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Category": "A.9 Access Control","Objetive_ID": "A.9.2","Check_Summary": "Ensure no root account access key exists","Objetive_Name": "User Access Management"}],"description": "Ensure no root account access key exists","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"A.9.3": {"name": "User Responsibilities","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null},"status": "PASS","attributes": [{"Category": "A.9 Access Control","Objetive_ID": "A.9.3","Check_Summary": "Ensure credentials unused for 90 days or greater are disabled","Objetive_Name": "User Responsibilities"}],"description": "Ensure credentials unused for 90 days or greater are disabled","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"A.9.4": {"name": "System and Application Access Control","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Category": "A.9 Access Control","Objetive_ID": "A.9.4","Check_Summary": "Ensure no root account access key exists","Objetive_Name": "System and Application Access Control"}],"description": "Ensure no root account access key exists","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"A.10.1": {"name": "Cryptographic Controls","checks": {"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "PASS","attributes": [{"Category": "A.10 Cryptography","Objetive_ID": "A.10.1","Check_Summary": "Detect Customer Master Keys (CMKs) scheduled for deletion","Objetive_Name": "Cryptographic Controls"}],"description": "Detect Customer Master Keys (CMKs) scheduled for deletion","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"A.12.4": {"name": "Logging and Monitoring","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Category": "A.12 Operations Security","Objetive_ID": "A.12.4","Check_Summary": "Ensure CloudTrail is enabled in all regions","Objetive_Name": "Logging and Monitoring"}],"description": "Ensure CloudTrail is enabled in all regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"A.12.6": {"name": "Technical Vulnerability Management","checks": {"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS"},"status": "PASS","attributes": [{"Category": "A.12 Operations Security","Objetive_ID": "A.12.6","Check_Summary": "Ensure the S3 bucket CloudTrail logs to is not publicly accessible","Objetive_Name": "Technical Vulnerability Management"}],"description": "Ensure the S3 bucket CloudTrail logs to is not publicly accessible","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"A.13.1": {"name": "Network Security Management","checks": {"rds_instance_no_public_access": "PASS"},"status": "PASS","attributes": [{"Category": "A.13 Communications Security","Objetive_ID": "A.13.1","Check_Summary": "Ensure RDS instances are not accessible to the world.","Objetive_Name": "Network Security Management"}],"description": "Ensure RDS instances are not accessible to the world.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}}},"requirements_passed": 79,"requirements_failed": 0,"requirements_manual": 0,"total_requirements": 79,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "86401f28-9311-42b9-ac06-a3cdcc9e5e39","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "soc2_aws","framework": "SOC2","version": "","description": "System and Organization Controls (SOC), defined by the American Institute of Certified Public Accountants (AICPA), is the name of a set of reports that's produced during an audit. It's intended for use by service organizations (organizations that provide information systems as a service to other organizations) to issue validated reports of internal controls over those information systems to the users of those services. The reports focus on controls grouped into five categories known as Trust Service Principles.","region": "eu-west-1","requirements": {"p_1_1": {"name": "P1.1 The entity provides notice to data subjects about its privacy practices to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_1_1","Section": "P1.0 - Privacy Criteria Related to Notice and Communication of Objectives Related to Privacy","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The entity provides notice to data subjects about its privacy practices to meet the entityโ€™s objectives related to privacy. The notice is updated and communicated to data subjects in a timely manner for changes to the entityโ€™s privacy practices, including changes in the use of personal information, to meet the entityโ€™s objectives related to privacy. Communicates to Data Subjects - Notice is provided to data subjects regarding the following: Purpose for collecting personal informationChoice and consentTypes of personal information collectedMethods of collection (for example, use of cookies or other tracking techniques)Use, retention, and disposalAccessDisclosure to third partiesSecurity for privacyQuality, including data subjectsโ€™ responsibilities for qualityMonitoring and enforcementIf personal information is collected from sources other than the individual, such sources are described in the privacy notice. Provides Notice to Data Subjects - Notice is provided to data subjects (1) at or before the time personal information is collected or as soon as practical thereafter, (2) at or before the entity changes its privacy notice or as soon as practical thereafter, or (3) before personal information is used for new purposes not previously identified. Covers Entities and Activities in Notice - An objective description of the entities and activities covered is included in the entityโ€™s privacy notice. Uses Clear and Conspicuous Language - The entityโ€™s privacy notice is conspicuous and uses clear language.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_2_1": {"name": "P2.1 The entity communicates choices available regarding the collection, use, retention, disclosure, and disposal of personal information to the data subjects and the consequences, if any, of each choice","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_2_1","Section": "P2.0 - Privacy Criteria Related to Choice and Consent","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The entity communicates choices available regarding the collection, use, retention, disclosure, and disposal of personal information to the data subjects and the consequences, if any, of each choice. Explicit consent for the collection, use, retention, disclosure, and disposal of personal information is obtained from data subjects or other authorized persons, if required. Such consent is obtained only for the intended purpose of the information to meet the entityโ€™s objectives related to privacy. The entityโ€™s basis for determining implicit consent for the collection, use, retention, disclosure, and disposal of personal information is documented. Communicates to Data Subjects - Data subjects are informed (a) about the choices available to them with respect to the collection, use, and disclosure of personal information and (b) that implicit or explicit consent is required to collect, use, and disclose personal information, unless a law or regulation specifically requires or allows otherwise. Communicates Consequences of Denying or Withdrawing Consent - When personal information is collected, data subjects are informed of the consequences of refusing to provide personal information or denying or withdrawing consent to use personal information for purposes identified in the notice. Obtains Implicit or Explicit Consent - Implicit or explicit consent is obtained from data subjects at or before the time personal information is collected or soon thereafter. The individualโ€™s preferences expressed in his or her consent are confirmed and implemented. Documents and Obtains Consent for New Purposes and Uses - If information that was previously collected is to be used for purposes not previously identified in the privacy notice, the new purpose is documented, the data subject is notified, and implicit or explicit consent is obtained prior to such new use or purpose. Obtains Explicit Consent for Sensitive Information - Explicit consent is obtained directly from the data subject when sensitive personal information is collected, used, or disclosed, unless a law or regulation specifically requires otherwise.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_3_1": {"name": "P3.1 Personal information is collected consistent with the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_3_1","Section": "P3.0 - Privacy Criteria Related to Collection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Limits the Collection of Personal Information - The collection of personal information is limited to that necessary to meet the entityโ€™s objectives. Collects Information by Fair and Lawful Means - Methods of collecting personal information are reviewed by management before they are implemented to confirm that personal information is obtained (a) fairly, without intimidation or deception, and (b) lawfully, adhering to all relevant rules of law, whether derived from statute or common law, relating to the collection of personal information. Collects Information From Reliable Sources - Management confirms that third parties from whom personal information is collected (that is, sources other than the individual) are reliable sources that collect information fairly and lawfully. Informs Data Subjects When Additional Information Is Acquired - Data subjects are informed if the entity develops or acquires additional information about them for its use.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_3_2": {"name": "P3.2 For information requiring explicit consent, the entity communicates the need for such consent, as well as the consequences of a failure to provide consent for the request for personal information, and obtains the consent prior to the collection of the information to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_3_2","Section": "P3.0 - Privacy Criteria Related to Collection","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Obtains Explicit Consent for Sensitive Information - Explicit consent is obtained directly from the data subject when sensitive personal information is collected, used, or disclosed, unless a law or regulation specifically requires otherwise. Documents Explicit Consent to Retain Information - Documentation of explicit consent for the collection, use, or disclosure of sensitive personal information is retained in accordance with objectives related to privacy.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_4_1": {"name": "P4.1 The entity limits the use of personal information to the purposes identified in the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_4_1","Section": "P4.0 - Privacy Criteria Related to Use, Retention, and Disposal","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Uses Personal Information for Intended Purposes - Personal information is used only for the intended purposes for which it was collected and only when implicit or explicit consent has been obtained unless a law or regulation specifically requires otherwise.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_4_2": {"name": "P4.2 The entity retains personal information consistent with the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_4_2","Section": "P4.0 - Privacy Criteria Related to Use, Retention, and Disposal","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Retains Personal Information - Personal information is retained for no longer than necessary to fulfill the stated purposes, unless a law or regulation specifically requires otherwise. Protects Personal Information - Policies and procedures have been implemented to protect personal information from erasure or destruction during the specified retention period of the information.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_4_3": {"name": "P4.3 The entity securely disposes of personal information to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_4_3","Section": "P4.0 - Privacy Criteria Related to Use, Retention, and Disposal","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Captures, Identifies, and Flags Requests for Deletion - Requests for deletion of personal information are captured, and information related to the requests is identified and flagged for destruction to meet the entityโ€™s objectives related to privacy. Disposes of, Destroys, and Redacts Personal Information - Personal information no longer retained is anonymized, disposed of, or destroyed in a manner that prevents loss, theft, misuse, or unauthorized access. Destroys Personal Information - Policies and procedures are implemented to erase or otherwise destroy personal information that has been identified for destruction.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_5_1": {"name": "P5.1 The entity grants identified and authenticated data subjects the ability to access their stored personal information for review and, upon request, provides physical or electronic copies of that information to data subjects to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_5_1","Section": "P5.0 - Privacy Criteria Related to Access","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The entity grants identified and authenticated data subjects the ability to access their stored personal information for review and, upon request, provides physical or electronic copies of that information to data subjects to meet the entityโ€™s objectives related to privacy. If access is denied, data subjects are informed of the denial and reason for such denial, as required, to meet the entityโ€™s objectives related to privacy. Authenticates Data Subjectsโ€™ Identity - The identity of data subjects who request access to their personal information is authenticated before they are given access to that information. Permits Data Subjects Access to Their Personal Information - Data subjects are able to determine whether the entity maintains personal information about them and, upon request, may obtain access to their personal information. Provides Understandable Personal Information Within Reasonable Time - Personal information is provided to data subjects in an understandable form, in a reasonable time frame, and at a reasonable cost, if any. Informs Data Subjects If Access Is Denied - When data subjects are denied access to their personal information, the entity informs them of the denial and the reason for the denial in a timely manner, unless prohibited by law or regulation.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_5_2": {"name": "P5.2 The entity corrects, amends, or appends personal information based on information provided by data subjects and communicates such information to third parties, as committed or required, to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_5_2","Section": "P5.0 - Privacy Criteria Related to Access","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The entity corrects, amends, or appends personal information based on information provided by data subjects and communicates such information to third parties, as committed or required, to meet the entityโ€™s objectives related to privacy. If a request for correction is denied, data subjects are informed of the denial and reason for such denial to meet the entityโ€™s objectives related to privacy. Communicates Denial of Access Requests - Data subjects are informed, in writing, of the reason a request for access to their personal information was denied, the source of the entityโ€™s legal right to deny such access, if applicable, and the individualโ€™s right, if any, to challenge such denial, as specifically permitted or required by law or regulation. Permits Data Subjects to Update or Correct Personal Information - Data subjects are able to update or correct personal information held by the entity. The entity provides such updated or corrected information to third parties that were previously provided with the data subjectโ€™s personal information consistent with the entityโ€™s objective related to privacy. Communicates Denial of Correction Requests - Data subjects are informed, in writing, about the reason a request for correction of personal information was denied and how they may appeal.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_6_1": {"name": "P6.1 The entity discloses personal information to third parties with the explicit consent of data subjects, and such consent is obtained prior to disclosure to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_6_1","Section": "P6.0 - Privacy Criteria Related to Disclosure and Notification","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Communicates Privacy Policies to Third Parties - Privacy policies or other specific instructions or requirements for handling personal information are communicated to third parties to whom personal information is disclosed. Discloses Personal Information Only When Appropriate - Personal information is disclosed to third parties only for the purposes for which it was collected or created and only when implicit or explicit consent has been obtained from the data subject, unless a law or regulation specifically requires otherwise. Discloses Personal Information Only to Appropriate Third Parties - Personal information is disclosed only to third parties who have agreements with the entity to protect personal information in a manner consistent with the relevant aspects of the entityโ€™s privacy notice or other specific instructions or requirements. The entity has procedures in place to evaluate that the third parties have effective controls to meet the terms of the agreement, instructions, or requirements.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_6_2": {"name": "P6.2 The entity creates and retains a complete, accurate, and timely record of authorized disclosures of personal information to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_6_2","Section": "P6.0 - Privacy Criteria Related to Disclosure and Notification","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Creates and Retains Record of Authorized Disclosures - The entity creates and maintains a record of authorized disclosures of personal information that is complete, accurate, and timely.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_6_3": {"name": "P6.3 The entity creates and retains a complete, accurate, and timely record of detected or reported unauthorized disclosures (including breaches) of personal information to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_6_3","Section": "P6.0 - Privacy Criteria Related to Disclosure and Notification","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Creates and Retains Record of Detected or Reported Unauthorized Disclosures - The entity creates and maintains a record of detected or reported unauthorized disclosures of personal information that is complete, accurate, and timely.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_6_4": {"name": "P6.4 The entity obtains privacy commitments from vendors and other third parties who have access to personal information to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_6_4","Section": "P6.0 - Privacy Criteria Related to Disclosure and Notification","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The entity obtains privacy commitments from vendors and other third parties who have access to personal information to meet the entityโ€™s objectives related to privacy. The entity assesses those partiesโ€™ compliance on a periodic and as-needed basis and takes corrective action, if necessary. Discloses Personal Information Only to Appropriate Third Parties - Personal information is disclosed only to third parties who have agreements with the entity to protect personal information in a manner consistent with the relevant aspects of the entityโ€™s privacy notice or other specific instructions or requirements. The entity has procedures in place to evaluate that the third parties have effective controls to meet the terms of the agreement, instructions, or requirements. Remediates Misuse of Personal Information by a Third Party - The entity takes remedial action in response to misuse of personal information by a third party to whom the entity has transferred such information.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_6_5": {"name": "P6.5 The entity obtains commitments from vendors and other third parties with access to personal information to notify the entity in the event of actual or suspected unauthorized disclosures of personal information","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_6_5","Section": "P6.0 - Privacy Criteria Related to Disclosure and Notification","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The entity obtains commitments from vendors and other third parties with access to personal information to notify the entity in the event of actual or suspected unauthorized disclosures of personal information. Such notifications are reported to appropriate personnel and acted on in accordance with established incident response procedures to meet the entityโ€™s objectives related to privacy. Remediates Misuse of Personal Information by a Third Party - The entity takes remedial action in response to misuse of personal information by a third party to whom the entity has transferred such information. Reports Actual or Suspected Unauthorized Disclosures - A process exists for obtaining commitments from vendors and other third parties to report to the entity actual or suspected unauthorized disclosures of personal information.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_6_6": {"name": "P6.6 The entity provides notification of breaches and incidents to affected data subjects, regulators, and others to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_6_6","Section": "P6.0 - Privacy Criteria Related to Disclosure and Notification","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Remediates Misuse of Personal Information by a Third Party - The entity takes remedial action in response to misuse of personal information by a third party to whom the entity has transferred such information. Reports Actual or Suspected Unauthorized Disclosures - A process exists for obtaining commitments from vendors and other third parties to report to the entity actual or suspected unauthorized disclosures of personal information.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_6_7": {"name": "P6.7 The entity provides data subjects with an accounting of the personal information held and disclosure of the data subjectsโ€™ personal information, upon the data subjectsโ€™ request, to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_6_7","Section": "P6.0 - Privacy Criteria Related to Disclosure and Notification","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Identifies Types of Personal Information and Handling Process - The types of personal information and sensitive personal information and the related processes, systems, and third parties involved in the handling of such information are identified. Captures, Identifies, and Communicates Requests for Information - Requests for an accounting of personal information held and disclosures of the data subjectsโ€™ personal information are captured, and information related to the requests is identified and communicated to data subjects to meet the entityโ€™s objectives related to privacy.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_7_1": {"name": "P7.1 The entity collects and maintains accurate, up-to-date, complete, and relevant personal information to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_7_1","Section": "P7.0 - Privacy Criteria Related to Quality","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Ensures Accuracy and Completeness of Personal Information - Personal information is accurate and complete for the purposes for which it is to be used. Ensures Relevance of Personal Information - Personal information is relevant to the purposes for which it is to be used.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"p_8_1": {"name": "P8.1 The entity implements a process for receiving, addressing, resolving, and communicating the resolution of inquiries, complaints, and disputes from data subjects and others and periodically monitors compliance to meet the entityโ€™s objectives related to privacy","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "p_8_1","Section": "P8.0 - Privacy Criteria Related to Monitoring and Enforcement","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The entity implements a process for receiving, addressing, resolving, and communicating the resolution of inquiries, complaints, and disputes from data subjects and others and periodically monitors compliance to meet the entityโ€™s objectives related to privacy. Corrections and other necessary actions related to identified deficiencies are made or taken in a timely manner. Communicates to Data Subjectsโ€”Data subjects are informed about how to contact the entity with inquiries, complaints, and disputes. Addresses Inquiries, Complaints, and Disputes - A process is in place to address inquiries, complaints, and disputes. Documents and Communicates Dispute Resolution and Recourse - Each complaint is addressed, and the resolution is documented and communicated to the individual. Documents and Reports Compliance Review Results - Compliance with objectives related to privacy are reviewed and documented, and the results of such reviews are reported to management. If problems are identified, remediation plans are developed and implemented. Documents and Reports Instances of Noncompliance - Instances of noncompliance with objectives related to privacy are documented and reported and, if needed, corrective and disciplinary measures are taken on a timely basis. Performs Ongoing Monitoring - Ongoing procedures are performed for monitoring the effectiveness of controls over personal information and for taking timely corrective actions when necessary.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_1_1": {"name": "CC1.1 COSO Principle 1: The entity demonstrates a commitment to integrity and ethical values","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_1_1","Section": "CC1.0 - Common Criteria Related to Control Environment","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Sets the Tone at the Top - The board of directors and management, at all levels, demonstrate through their directives, actions, and behavior the importance of integrity and ethical values to support the functioning of the system of internal control. Establishes Standards of Conduct - The expectations of the board of directors and senior management concerning integrity and ethical values are defined in the entityโ€™s standards of conduct and understood at all levels of the entity and by outsourced service providers and business partners. Evaluates Adherence to Standards of Conduct - Processes are in place to evaluate the performance of individuals and teams against the entityโ€™s expected standards of conduct. Addresses Deviations in a Timely Manner - Deviations from the entityโ€™s expected standards of conduct are identified and remedied in a timely and consistent manner.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_1_2": {"name": "CC1.2 COSO Principle 2: The board of directors demonstrates independence from management and exercises oversight of the development and performance of internal control","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_1_2","Section": "CC1.0 - Common Criteria Related to Control Environment","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Establishes Oversight Responsibilities - The board of directors identifies and accepts its oversight responsibilities in relation to established requirements and expectations. Applies Relevant Expertise - The board of directors defines, maintains, and periodically evaluates the skills and expertise needed among its members to enable them to ask probing questions of senior management and take commensurate action. Operates Independently - The board of directors has sufficient members who are independent from management and objective in evaluations and decision making. Additional point of focus specifically related to all engagements using the trust services criteria: Supplements Board Expertise - The board of directors supplements its expertise relevant to security, availability, processing integrity, confidentiality, and privacy, as needed, through the use of a subcommittee or consultants.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_1_3": {"name": "CC1.3 COSO Principle 3: Management establishes, with board oversight, structures, reporting lines, and appropriate authorities and responsibilities in the pursuit of objectives","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": "automated","ItemId": "cc_1_3","Section": "CC1.0 - Common Criteria Related to Control Environment","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Considers All Structures of the Entity - Management and the board of directors consider the multiple structures used (including operating units, legal entities, geographic distribution, and outsourced service providers) to support the achievement of objectives. Establishes Reporting Lines - Management designs and evaluates lines of reporting for each entity structure to enable execution of authorities and responsibilities and flow of information to manage the activities of the entity. Defines, Assigns, and Limits Authorities and Responsibilities - Management and the board of directors delegate authority, define responsibilities, and use appropriate processes and technology to assign responsibility and segregate duties as necessary at the various levels of the organization. Additional points of focus specifically related to all engagements using the trust services criteria: Addresses Specific Requirements When Defining Authorities and Responsibilitiesโ€”Management and the board of directors consider requirements relevant to security, availability, processing integrity, confidentiality, and privacy when defining authorities and responsibilities. Considers Interactions With External Parties When Establishing Structures, Reporting Lines, Authorities, and Responsibilities โ€” Management and the board of directors consider the need for the entity to interact with and monitor the activities of external parties when establishing structures, reporting lines, authorities, and responsibilities.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"cc_1_4": {"name": "CC1.4 COSO Principle 4: The entity demonstrates a commitment to attract, develop, and retain competent individuals in alignment with objectives","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_1_4","Section": "CC1.0 - Common Criteria Related to Control Environment","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Establishes Policies and Practices - Policies and practices reflect expectations of competence necessary to support the achievement of objectives. Evaluates Competence and Addresses Shortcomings - The board of directors and management evaluate competence across the entity and in outsourced service providers in relation to established policies and practices and act as necessary to address shortcomings. Attracts, Develops, and Retains Individuals - The entity provides the mentoring and training needed to attract, develop, and retain sufficient and competent personnel and outsourced service providers to support the achievement of objectives. Plans and Prepares for Succession - Senior management and the board of directors develop contingency plans for assignments of responsibility important for internal control. Additional point of focus specifically related to all engagements using the trust services criteria: Considers the Background of Individuals - The entity considers the background of potential and existing personnel, contractors, and vendor employees when determining whether to employ and retain the individuals. Considers the Technical Competency of Individuals - The entity considers the technical competency of potential and existing personnel, contractors, and vendor employees when determining whether to employ and retain the individuals. Provides Training to Maintain Technical Competencies - The entity provides training programs, including continuing education and training, to ensure skill sets and technical competency of existing personnel, contractors, and vendor employees are developed and maintained.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_1_5": {"name": "CC1.5 COSO Principle 5: The entity holds individuals accountable for their internal control responsibilities in the pursuit of objectives","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_1_5","Section": "CC1.0 - Common Criteria Related to Control Environment","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Enforces Accountability Through Structures, Authorities, and Responsibilities - Management and the board of directors establish the mechanisms to communicate and hold individuals accountable for performance of internal control responsibilities across the entity and implement corrective action as necessary. Establishes Performance Measures, Incentives, and Rewards - Management and the board of directors establish performance measures, incentives, and other rewards appropriate for responsibilities at all levels of the entity, reflecting appropriate dimensions of performance and expected standards of conduct, and considering the achievement of both short-term and longer-term objectives. Evaluates Performance Measures, Incentives, and Rewards for Ongoing Relevance - Management and the board of directors align incentives and rewards with the fulfillment of internal control responsibilities in the achievement of objectives. Considers Excessive Pressures - Management and the board of directors evaluate and adjust pressures associated with the achievement of objectives as they assign responsibilities, develop performance measures, and evaluate performance. Evaluates Performance and Rewards or Disciplines Individuals - Management and the board of directors evaluate performance of internal control responsibilities, including adherence to standards of conduct and expected levels of competence, and provide rewards or exercise disciplinary action, as appropriate.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_2_1": {"name": "CC2.1 COSO Principle 13: The entity obtains or generates and uses relevant, quality information to support the functioning of internal control","checks": {"cloudtrail_multi_region_enabled": "PASS","config_recorder_all_regions_enabled": null,"cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null},"status": "PASS","attributes": [{"Type": "automated","ItemId": "cc_2_1","Section": "CC2.0 - Common Criteria Related to Communication and Information","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Identifies Information Requirements - A process is in place to identify the information required and expected to support the functioning of the other components of internal control and the achievement of the entityโ€™s objectives. Captures Internal and External Sources of Data - Information systems capture internal and external sources of data. Processes Relevant Data Into Information - Information systems process and transform relevant data into information. Maintains Quality Throughout Processing - Information systems produce information that is timely, current, accurate, complete, accessible, protected, verifiable, and retained. Information is reviewed to assess its relevance in supporting the internal control components.","checks_status": {"fail": 0,"pass": 1,"total": 4,"manual": 0}},"cc_2_2": {"name": "CC2.2 COSO Principle 14: The entity internally communicates information, including objectives and responsibilities for internal control, necessary to support the functioning of internal control","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_2_2","Section": "CC2.0 - Common Criteria Related to Communication and Information","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Communicates Internal Control Information - A process is in place to communicate required information to enable all personnel to understand and carry out their internal control responsibilities. Communicates With the Board of Directors - Communication exists between management and the board of directors so that both have information needed to fulfill their roles with respect to the entityโ€™s objectives. Provides Separate Communication Lines - Separate communication channels, such as whistle-blower hotlines, are in place and serve as fail-safe mechanisms to enable anonymous or confidential communication when normal channels are inoperative or ineffective. Selects Relevant Method of Communication - The method of communication considers the timing, audience, and nature of the information. Additional point of focus specifically related to all engagements using the trust services criteria: Communicates Responsibilities - Entity personnel with responsibility for designing, developing, implementing,operating, maintaining, or monitoring system controls receive communications about their responsibilities, including changes in their responsibilities, and have the information necessary to carry out those responsibilities. Communicates Information on Reporting Failures, Incidents, Concerns, and Other Mattersโ€”Entity personnel are provided with information on how to report systems failures, incidents, concerns, and other complaints to personnel. Communicates Objectives and Changes to Objectives - The entity communicates its objectives and changes to those objectives to personnel in a timely manner. Communicates Information to Improve Security Knowledge and Awareness - The entity communicates information to improve security knowledge and awareness and to model appropriate security behaviors to personnel through a security awareness training program. Additional points of focus that apply only when an engagement using the trust services criteria is performed at the system level: Communicates Information About System Operation and Boundaries - The entity prepares and communicates information about the design and operation of the system and its boundaries to authorized personnel to enable them to understand their role in the system and the results of system operation. Communicates System Objectives - The entity communicates its objectives to personnel to enable them to carry out their responsibilities. Communicates System Changes - System changes that affect responsibilities or the achievement of the entity's objectives are communicated in a timely manner.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_2_3": {"name": "CC2.3 COSO Principle 15: The entity communicates with external parties regarding matters affecting the functioning of internal control","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_2_3","Section": "CC2.0 - Common Criteria Related to Communication and Information","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Communicates to External Parties - Processes are in place to communicate relevant and timely information to external parties, including shareholders, partners, owners, regulators, customers, financial analysts, and other external parties. Enables Inbound Communications - Open communication channels allow input from customers, consumers, suppliers, external auditors, regulators, financial analysts, and others, providing management and the board of directors with relevant information. Communicates With the Board of Directors - Relevant information resulting from assessments conducted by external parties is communicated to the board of directors. Provides Separate Communication Lines - Separate communication channels, such as whistle-blower hotlines, are in place and serve as fail-safe mechanisms to enable anonymous or confidential communication when normal channels are inoperative or ineffective. Selects Relevant Method of Communication - The method of communication considers the timing, audience, and nature of the communication and legal, regulatory, and fiduciary requirements and expectations. Communicates Objectives Related to Confidentiality and Changes to Objectives - The entity communicates, to external users, vendors, business partners and others whose products and services are part of the system, objectives and changes to objectives related to confidentiality. Additional point of focus that applies only to an engagement using the trust services criteria for privacy: Communicates Objectives Related to Privacy and Changes to Objectives - The entity communicates, to external users, vendors, business partners and others whose products and services are part of the system, objectives related to privacy and changes to those objectives. Additional points of focus that apply only when an engagement using the trust services criteria is performed at the system level: Communicates Information About System Operation and Boundaries - The entity prepares and communicates information about the design and operation of the system and its boundaries to authorized external users to permit users to understand their role in the system and the results of system operation. Communicates System Objectives - The entity communicates its system objectives to appropriate external users. Communicates System Responsibilities - External users with responsibility for designing, developing, implementing, operating, maintaining, and monitoring system controls receive communications about their responsibilities and have the information necessary to carry out those responsibilities. Communicates Information on Reporting System Failures, Incidents, Concerns, and Other Matters - External users are provided with information on how to report systems failures, incidents, concerns, and other complaints to appropriate personnel.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_3_1": {"name": "CC3.1 COSO Principle 6: The entity specifies objectives with sufficient clarity to enable the identification and assessment of risks relating to objectives","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Type": "automated","ItemId": "cc_3_1","Section": "CC3.0 - Common Criteria Related to Risk Assessment","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Operations Objectives: Reflects Management's Choices - Operations objectives reflect management's choices about structure, industry considerations, and performance of the entity. Considers Tolerances for Risk - Management considers the acceptable levels of variation relative to the achievement of operations objectives. External Financial Reporting Objectives: Complies With Applicable Accounting Standards - Financial reporting objectives are consistent with accounting principles suitable and available for that entity. The accounting principles selected are appropriate in the circumstances. External Nonfinancial Reporting Objectives: Complies With Externally Established Frameworks - Management establishes objectives consistent with laws and regulations or standards and frameworks of recognized external organizations. Reflects Entity Activities - External reporting reflects the underlying transactions and events within a range of acceptable limits. Considers the Required Level of Precisionโ€”Management reflects the required level of precision and accuracy suitable for user needs and based on criteria established by third parties in nonfinancial reporting. Internal Reporting Objectives: Reflects Management's Choices - Internal reporting provides management with accurate and complete information regarding management's choices and information needed in managing the entity. Considers the Required Level of Precisionโ€”Management reflects the required level of precision and accuracy suitable for user needs in nonfinancial reporting objectives and materiality within financial reporting objectives. Reflects Entity Activitiesโ€”Internal reporting reflects the underlying transactions and events within a range of acceptable limits. Compliance Objectives: Reflects External Laws and Regulations - Laws and regulations establish minimum standards of conduct, which the entity integrates into compliance objectives. Considers Tolerances for Risk - Management considers the acceptable levels of variation relative to the achievement of operations objectives. Additional point of focus specifically related to all engagements using the trust services criteria: Establishes Sub-objectives to Support Objectivesโ€”Management identifies sub-objectives related to security, availability, processing integrity, confidentiality, and privacy to support the achievement of the entityโ€™s objectives related to reporting, operations, and compliance.","checks_status": {"fail": 0,"pass": 2,"total": 3,"manual": 0}},"cc_3_2": {"name": "CC3.2 COSO Principle 7: The entity identifies risks to the achievement of its objectives across the entity and analyzes risks as a basis for determining how the risks should be managed","checks": {"guardduty_is_enabled": "PASS","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "automated","ItemId": "cc_3_2","Section": "CC3.0 - Common Criteria Related to Risk Assessment","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Includes Entity, Subsidiary, Division, Operating Unit, and Functional Levels - The entity identifies and assesses risk at the entity, subsidiary, division, operating unit, and functional levels relevant to the achievement of objectives. Analyzes Internal and External Factors - Risk identification considers both internal and external factors and their impact on the achievement of objectives. Involves Appropriate Levels of Management - The entity puts into place effective risk assessment mechanisms that involve appropriate levels of management. Estimates Significance of Risks Identified - Identified risks are analyzed through a process that includes estimating the potential significance of the risk. Determines How to Respond to Risks - Risk assessment includes considering how the risk should be managed and whether to accept, avoid, reduce, or share the risk. Additional points of focus specifically related to all engagements using the trust services criteria: Identifies and Assesses Criticality of Information Assets and Identifies Threats and Vulnerabilities - The entity's risk identification and assessment process includes (1) identifying information assets, including physical devices and systems, virtual devices, software, data and data flows, external information systems, and organizational roles; (2) assessing the criticality of those information assets; (3) identifying the threats to the assets from intentional (including malicious) and unintentional acts and environmental events; and (4) identifying the vulnerabilities of the identified assets.","checks_status": {"fail": 3,"pass": 1,"total": 5,"manual": 0}},"cc_3_3": {"name": "CC3.3 COSO Principle 8: The entity considers the potential for fraud in assessing risks to the achievement of objectives","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_3_3","Section": "CC3.0 - Common Criteria Related to Risk Assessment","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Considers Various Types of Fraud - The assessment of fraud considers fraudulent reporting, possible loss of assets, and corruption resulting from the various ways that fraud and misconduct can occur. Assesses Incentives and Pressures - The assessment of fraud risks considers incentives and pressures. Assesses Opportunities - The assessment of fraud risk considers opportunities for unauthorized acquisition,use, or disposal of assets, altering the entityโ€™s reporting records, or committing other inappropriate acts. Assesses Attitudes and Rationalizations - The assessment of fraud risk considers how management and other personnel might engage in or justify inappropriate actions. Additional point of focus specifically related to all engagements using the trust services criteria: Considers the Risks Related to the Use of IT and Access to Information - The assessment of fraud risks includes consideration of threats and vulnerabilities that arise specifically from the use of IT and access to information.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_3_4": {"name": "CC3.4 COSO Principle 9: The entity identifies and assesses changes that could significantly impact the system of internal control","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Type": "automated","ItemId": "cc_3_4","Section": "CC3.0 - Common Criteria Related to Risk Assessment","Service": "config","SubGroup": null,"SubSection": null}],"description": "Assesses Changes in the External Environment - The risk identification process considers changes to the regulatory, economic, and physical environment in which the entity operates. Assesses Changes in the Business Model - The entity considers the potential impacts of new business lines, dramatically altered compositions of existing business lines, acquired or divested business operations on the system of internal control, rapid growth, changing reliance on foreign geographies, and new technologies. Assesses Changes in Leadership - The entity considers changes in management and respective attitudes and philosophies on the system of internal control. Assess Changes in Systems and Technology - The risk identification process considers changes arising from changes in the entityโ€™s systems and changes in the technology environment. Assess Changes in Vendor and Business Partner Relationships - The risk identification process considers changes in vendor and business partner relationships.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"cc_4_1": {"name": "CC4.1 COSO Principle 16: The entity selects, develops, and performs ongoing and/or separate evaluations to ascertain whether the components of internal control are present and functioning","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_4_1","Section": "CC4.0 - Monitoring Activities","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Considers a Mix of Ongoing and Separate Evaluations - Management includes a balance of ongoing and separate evaluations. Considers Rate of Change - Management considers the rate of change in business and business processes when selecting and developing ongoing and separate evaluations. Establishes Baseline Understanding - The design and current state of an internal control system are used to establish a baseline for ongoing and separate evaluations. Uses Knowledgeable Personnel - Evaluators performing ongoing and separate evaluations have sufficient knowledge to understand what is being evaluated. Integrates With Business Processes - Ongoing evaluations are built into the business processes and adjust to changing conditions. Adjusts Scope and Frequencyโ€”Management varies the scope and frequency of separate evaluations depending on risk. Objectively Evaluates - Separate evaluations are performed periodically to provide objective feedback. Considers Different Types of Ongoing and Separate Evaluations - Management uses a variety of different types of ongoing and separate evaluations, including penetration testing, independent certification made against established specifications (for example, ISO certifications), and internal audit assessments.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_4_2": {"name": "CC4.2 COSO Principle 17: The entity evaluates and communicates internal control deficiencies in a timely manner to those parties responsible for taking corrective action, including senior management and the board of directors, as appropriate","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": "automated","ItemId": "cc_4_2","Section": "CC4.0 - Monitoring Activities","Service": "guardduty","SubGroup": null,"SubSection": null}],"description": "Assesses Results - Management and the board of directors, as appropriate, assess results of ongoing and separate evaluations. Communicates Deficiencies - Deficiencies are communicated to parties responsible for taking corrective action and to senior management and the board of directors, as appropriate. Monitors Corrective Action - Management tracks whether deficiencies are remedied on a timely basis.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"cc_5_1": {"name": "CC5.1 COSO Principle 10: The entity selects and develops control activities that contribute to the mitigation of risks to the achievement of objectives to acceptable levels","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_5_1","Section": "CC5.0 - Control Activities","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Integrates With Risk Assessment - Control activities help ensure that risk responses that address and mitigate risks are carried out. Considers Entity-Specific Factors - Management considers how the environment, complexity, nature, and scope of its operations, as well as the specific characteristics of its organization, affect the selection and development of control activities. Determines Relevant Business Processes - Management determines which relevant business processes require control activities. Evaluates a Mix of 2017 Data Submitted Types - Control activities include a range and variety of controls and may include a balance of approaches to mitigate risks, considering both manual and automated controls, and preventive and detective controls. Considers at What Level Activities Are Applied - Management considers control activities at various levels in the entity. Addresses Segregation of Duties - Management segregates incompatible duties, and where such segregation is not practical, management selects and develops alternative control activities.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_5_2": {"name": "CC5.2 COSO Principle 11: The entity also selects and develops general control activities over technology to support the achievement of objectives","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_5_2","Section": "CC5.0 - Control Activities","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Determines Dependency Between the Use of Technology in Business Processes and Technology General Controls - Management understands and determines the dependency and linkage between business processes, automated control activities, and technology general controls. Establishes Relevant Technology Infrastructure Control Activities - Management selects and develops control activities over the technology infrastructure, which are designed and implemented to help ensure the completeness, accuracy, and availability of technology processing. Establishes Relevant Security Management Process Controls Activities - Management selects and develops control activities that are designed and implemented to restrict technology access rights to authorized users commensurate with their job responsibilities and to protect the entityโ€™s assets from external threats. Establishes Relevant Technology Acquisition, Development, and Maintenance Process Control Activities - Management selects and develops control activities over the acquisition, development, and maintenance of technology and its infrastructure to achieve managementโ€™s objectives.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_5_3": {"name": "CCC5.3 COSO Principle 12: The entity deploys control activities through policies that establish what is expected and in procedures that put policies into action","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_5_3","Section": "CC5.0 - Control Activities","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Establishes Policies and Procedures to Support Deployment of Management โ€˜s Directives - Management establishes control activities that are built into business processes and employeesโ€™ day-to-day activities through policies establishing what is expected and relevant procedures specifying actions. Establishes Responsibility and Accountability for Executing Policies and Procedures - Management establishes responsibility and accountability for control activities with management (or other designated personnel) of the business unit or function in which the relevant risks reside. Performs in a Timely Manner - Responsible personnel perform control activities in a timely manner as defined by the policies and procedures. Takes Corrective Action - Responsible personnel investigate and act on matters identified as a result of executing control activities. Performs Using Competent Personnel - Competent personnel with sufficient authority perform control activities with diligence and continuing focus. Reassesses Policies and Procedures - Management periodically reviews control activities to determine their continued relevance and refreshes them when necessary.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_6_1": {"name": "CC6.1 The entity implements logical access security software, infrastructure, and architectures over protected information assets to protect them from security events to meet the entity's objectives","checks": {"s3_bucket_public_access": null},"status": "PASS","attributes": [{"Type": "automated","ItemId": "cc_6_1","Section": "CC6.0 - Logical and Physical Access","Service": "s3","SubGroup": null,"SubSection": null}],"description": "Identifies and Manages the Inventory of Information Assets - The entity identifies, inventories, classifies, and manages information assets. Restricts Logical Access - Logical access to information assets, including hardware, data (at-rest, during processing, or in transmission), software, administrative authorities, mobile devices, output, and offline system components is restricted through the use of access control software and rule sets. Identifies and Authenticates Users - Persons, infrastructure and software are identified and authenticated prior to accessing information assets, whether locally or remotely. Considers Network Segmentation - Network segmentation permits unrelated portions of the entity's information system to be isolated from each other. Manages Points of Access - Points of access by outside entities and the types of data that flow through the points of access are identified, inventoried, and managed. The types of individuals and systems using each point of access are identified, documented, and managed. Restricts Access to Information Assets - Combinations of data classification, separate data structures, port restrictions, access protocol restrictions, user identification, and digital certificates are used to establish access control rules for information assets. Manages Identification and Authentication - Identification and authentication requirements are established, documented, and managed for individuals and systems accessing entity information, infrastructure and software. Manages Credentials for Infrastructure and Software - New internal and external infrastructure and software are registered, authorized, and documented prior to being granted access credentials and implemented on the network or access point. Credentials are removed and access is disabled when access is no longer required or the infrastructure and software are no longer in use. Uses Encryption to Protect Data - The entity uses encryption to supplement other measures used to protect data-at-rest, when such protections are deemed appropriate based on assessed risk. Protects Encryption Keys - Processes are in place to protect encryption keys during generation, storage, use, and destruction.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"cc_6_2": {"name": "CC6.2 Prior to issuing system credentials and granting system access, the entity registers and authorizes new internal and external users whose access is administered by the entity","checks": {"rds_instance_no_public_access": "PASS"},"status": "PASS","attributes": [{"Type": "automated","ItemId": "cc_6_2","Section": "CC6.0 - Logical and Physical Access","Service": "rds","SubGroup": null,"SubSection": null}],"description": "Prior to issuing system credentials and granting system access, the entity registers and authorizes new internal and external users whose access is administered by the entity. For those users whose access is administered by the entity, user system credentials are removed when user access is no longer authorized. Controls Access Credentials to Protected Assets - Information asset access credentials are created based on an authorization from the system's asset owner or authorized custodian. Removes Access to Protected Assets When Appropriate - Processes are in place to remove credential access when an individual no longer requires such access. Reviews Appropriateness of Access Credentials - The appropriateness of access credentials is reviewed on a periodic basis for unnecessary and inappropriate individuals with credentials.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"cc_6_3": {"name": "CC6.3 The entity authorizes, modifies, or removes access to data, software, functions, and other protected information assets based on roles, responsibilities, or the system design and changes, giving consideration to the concepts of least privilege and segregation of duties, to meet the entityโ€™s objectives","checks": {"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": "automated","ItemId": "cc_6_3","Section": "CC6.0 - Logical and Physical Access","Service": "iam","SubGroup": null,"SubSection": null}],"description": "Creates or Modifies Access to Protected Information Assets - Processes are in place to create or modify access to protected information assets based on authorization from the assetโ€™s owner. Removes Access to Protected Information Assets - Processes are in place to remove access to protected information assets when an individual no longer requires access. Uses Role-Based Access Controls - Role-based access control is utilized to support segregation of incompatible functions.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"cc_6_4": {"name": "CC6.4 The entity restricts physical access to facilities and protected information assets to authorized personnel to meet the entityโ€™s objectives","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_6_4","Section": "CC6.0 - Logical and Physical Access","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Creates or Modifies Physical Access - Processes are in place to create or modify physical access to facilities such as data centers, office spaces, and work areas, based on authorization from the system's asset owner. Removes Physical Access - Processes are in place to remove access to physical resources when an individual no longer requires access. Reviews Physical Access - Processes are in place to periodically review physical access to ensure consistency with job responsibilities.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_6_5": {"name": "CC6.5 The entity discontinues logical and physical protections over physical assets only after the ability to read or recover data and software from those assets has been diminished and is no longer required to meet the entityโ€™s objectives","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_6_5","Section": "CC6.0 - Logical and Physical Access","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Identifies Data and Software for Disposal - Procedures are in place to identify data and software stored on equipment to be disposed and to render such data and software unreadable. Removes Data and Software From Entity Control - Procedures are in place to remove data and software stored on equipment to be removed from the physical control of the entity and to render such data and software unreadable.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_6_6": {"name": "CC6.6 The entity implements logical access security measures to protect against threats from sources outside its system boundaries","checks": {"ec2_instance_public_ip": "FAIL"},"status": "FAIL","attributes": [{"Type": "automated","ItemId": "cc_6_6","Section": "CC6.0 - Logical and Physical Access","Service": "ec2","SubGroup": null,"SubSection": null}],"description": "Restricts Access โ€” The types of activities that can occur through a communication channel (for example, FTP site, router port) are restricted. Protects Identification and Authentication Credentials โ€” Identification and authentication credentials are protected during transmission outside its system boundaries. Requires Additional Authentication or Credentials โ€” Additional authentication information or credentials are required when accessing the system from outside its boundaries. Implements Boundary Protection Systems โ€” Boundary protection systems (for example, firewalls, demilitarized zones, and intrusion detection systems) are implemented to protect external access points from attempts and unauthorized access and are monitored to detect such attempts.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"cc_6_7": {"name": "CC6.7 The entity restricts the transmission, movement, and removal of information to authorized internal and external users and processes, and protects it during transmission, movement, or removal to meet the entityโ€™s objectives","checks": {"acm_certificates_expiration_check": "PASS"},"status": "PASS","attributes": [{"Type": "automated","ItemId": "cc_6_7","Section": "CC6.0 - Logical and Physical Access","Service": "acm","SubGroup": null,"SubSection": null}],"description": "Restricts the Ability to Perform Transmission - Data loss prevention processes and technologies are used to restrict ability to authorize and execute transmission, movement and removal of information. Uses Encryption Technologies or Secure Communication Channels to Protect Data - Encryption technologies or secured communication channels are used to protect transmission of data and other communications beyond connectivity access points. Protects Removal Media - Encryption technologies and physical asset protections are used for removable media (such as USB drives and back-up tapes), as appropriate. Protects Mobile Devices - Processes are in place to protect mobile devices (such as laptops, smart phones and tablets) that serve as information assets.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"cc_6_8": {"name": "CC6.8 The entity implements controls to prevent or detect and act upon the introduction of unauthorized or malicious software to meet the entityโ€™s objectives","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": "automated","ItemId": "cc_6_8","Section": "CC6.0 - Logical and Physical Access","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Restricts Application and Software Installation - The ability to install applications and software is restricted to authorized individuals. Detects Unauthorized Changes to Software and Configuration Parameters - Processes are in place to detect changes to software and configuration parameters that may be indicative of unauthorized or malicious software. Uses a Defined Change Control Process - A management-defined change control process is used for the implementation of software. Uses Antivirus and Anti-Malware Software - Antivirus and anti-malware software is implemented and maintained to provide for the interception or detection and remediation of malware. Scans Information Assets from Outside the Entity for Malware and Other Unauthorized Software - Procedures are in place to scan information assets that have been transferred or returned to the entityโ€™s custody for malware and other unauthorized software and to remove any items detected prior to its implementation on the network.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"cc_7_1": {"name": "CC7.1 To meet its objectives, the entity uses detection and monitoring procedures to identify (1) changes to configurations that result in the introduction of new vulnerabilities, and (2) susceptibilities to newly discovered vulnerabilities","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": "automated","ItemId": "cc_7_1","Section": "CC7.0 - System Operations","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Uses Defined Configuration Standards - Management has defined configuration standards. Monitors Infrastructure and Software - The entity monitors infrastructure and software for noncompliance with the standards, which could threaten the achievement of the entity's objectives. Implements Change-Detection Mechanisms - The IT system includes a change-detection mechanism (for example, file integrity monitoring tools) to alert personnel to unauthorized modifications of critical system files, configuration files, or content files. Detects Unknown or Unauthorized Components - Procedures are in place to detect the introduction of unknown or unauthorized components. Conducts Vulnerability Scans - The entity conducts vulnerability scans designed to identify potential vulnerabilities or misconfigurations on a periodic basis and after any significant change in the environment and takes action to remediate identified deficiencies on a timely basis.","checks_status": {"fail": 2,"pass": 2,"total": 4,"manual": 0}},"cc_7_2": {"name": "CC7.2 The entity monitors system components and the operation of those components for anomalies that are indicative of malicious acts, natural disasters, and errors affecting the entity's ability to meet its objectives; anomalies are analyzed to determine whether they represent security events","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","ec2_instance_imdsv2_enabled": "PASS","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": "automated","ItemId": "cc_7_2","Section": "CC7.0 - System Operations","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implements Detection Policies, Procedures, and Tools - Detection policies and procedures are defined and implemented, and detection tools are implemented on Infrastructure and software to identify anomalies in the operation or unusual activity on systems. Procedures may include (1) a defined governance process for security event detection and management that includes provision of resources; (2) use of intelligence sources to identify newly discovered threats and vulnerabilities; and (3) logging of unusual system activities. Designs Detection Measures - Detection measures are designed to identify anomalies that could result from actual or attempted (1) compromise of physical barriers; (2) unauthorized actions of authorized personnel; (3) use of compromised identification and authentication credentials; (4) unauthorized access from outside the system boundaries; (5) compromise of authorized external parties; and (6) implementation or connection of unauthorized hardware and software. Implements Filters to Analyze Anomalies - Management has implemented procedures to filter, summarize, and analyze anomalies to identify security events. Monitors Detection Tools for Effective Operation - Management has implemented processes to monitor the effectiveness of detection tools.","checks_status": {"fail": 7,"pass": 6,"total": 21,"manual": 0}},"cc_7_3": {"name": "CC7.3 The entity evaluates security events to determine whether they could or have resulted in a failure of the entity to meet its objectives (security incidents) and, if so, takes actions to prevent or address such failures","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","apigateway_restapi_logging_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL","s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": "automated","ItemId": "cc_7_3","Section": "CC7.0 - System Operations","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Responds to Security Incidents - Procedures are in place for responding to security incidents and evaluating the effectiveness of those policies and procedures on a periodic basis. Communicates and Reviews Detected Security Events - Detected security events are communicated to and reviewed by the individuals responsible for the management of the security program and actions are taken, if necessary. Develops and Implements Procedures to Analyze Security Incidents - Procedures are in place to analyze security incidents and determine system impact. Assesses the Impact on Personal Information - Detected security events are evaluated to determine whether they could or did result in the unauthorized disclosure or use of personal information and whether there has been a failure to comply with applicable laws or regulations. Determines Personal Information Used or Disclosed - When an unauthorized use or disclosure of personal information has occurred, the affected information is identified.","checks_status": {"fail": 10,"pass": 3,"total": 17,"manual": 0}},"cc_7_4": {"name": "CC7.4 The entity responds to identified security incidents by executing a defined incident response program to understand, contain, remediate, and communicate security incidents, as appropriate","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"guardduty_no_high_severity_findings": "FAIL","redshift_cluster_automated_snapshot": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": "automated","ItemId": "cc_7_4","Section": "CC7.0 - System Operations","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Assigns Roles and Responsibilities - Roles and responsibilities for the design, implementation, maintenance, and execution of the incident response program are assigned, including the use of external resources when necessary. Contains Security Incidents - Procedures are in place to contain security incidents that actively threaten entity objectives. Mitigates Ongoing Security Incidents - Procedures are in place to mitigate the effects of ongoing security incidents. Ends Threats Posed by Security Incidents - Procedures are in place to end the threats posed by security incidents through closure of the vulnerability, removal of unauthorized access, and other remediation actions. Restores Operations - Procedures are in place to restore data and business operations to an interim state that permits the achievement of entity objectives. Develops and Implements Communication Protocols for Security Incidents - Protocols for communicating security incidents and actions taken to affected parties are developed and implemented to meet the entity's objectives. Obtains Understanding of Nature of Incident and Determines Containment Strategy - An understanding of the nature (for example, the method by which the incident occurred and the affected system resources) and severity of the security incident is obtained to determine the appropriate containment strategy, including (1) a determination of the appropriate response time frame, and (2) the determination and execution of the containment approach. Remediates Identified Vulnerabilities - Identified vulnerabilities are remediated through the development and execution of remediation activities. Communicates Remediation Activities - Remediation activities are documented and communicated in accordance with the incident response program. Evaluates the Effectiveness of Incident Response - The design of incident response activities is evaluated for effectiveness on a periodic basis. Periodically Evaluates Incidents - Periodically, management reviews incidents related to security, availability, processing integrity, confidentiality, and privacy and identifies the need for system changes based on incident patterns and root causes. Communicates Unauthorized Use and Disclosure - Events that resulted in unauthorized use or disclosure of personal information are communicated to the data subjects, legal and regulatory authorities, and others as required. Application of Sanctions - The conduct of individuals and organizations operating under the authority of the entity and involved in the unauthorized use or disclosure of personal information is evaluated and, if appropriate, sanctioned in accordance with entity policies and legal and regulatory requirements.","checks_status": {"fail": 3,"pass": 3,"total": 16,"manual": 0}},"cc_7_5": {"name": "CC7.5 The entity identifies, develops, and implements activities to recover from identified security incidents","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_7_5","Section": "CC7.0 - System Operations","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Restores the Affected Environment - The activities restore the affected environment to functional operation by rebuilding systems, updating software, installing patches, and changing configurations, as needed. Communicates Information About the Event - Communications about the nature of the incident, recovery actions taken, and activities required for the prevention of future security events are made to management and others as appropriate (internal and external). Determines Root Cause of the Event - The root cause of the event is determined. Implements Changes to Prevent and Detect Recurrences - Additional architecture or changes to preventive and detective controls, or both, are implemented to prevent and detect recurrences on a timely basis. Improves Response and Recovery Procedures - Lessons learned are analyzed, and the incident response plan and recovery procedures are improved. Implements Incident Recovery Plan Testing - Incident recovery plan testing is performed on a periodic basis. The testing includes (1) development of testing scenarios based on threat likelihood and magnitude; (2) consideration of relevant system components from across the entity that can impair availability; (3) scenarios that consider the potential for the lack of availability of key personnel; and (4) revision of continuity plans and systems based on test results.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_8_1": {"name": "CC8.1 The entity authorizes, designs, develops or acquires, configures, documents, tests, approves, and implements changes to infrastructure, data, software, and procedures to meet its objectives","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Type": "automated","ItemId": "cc_8_1","Section": "CC8.0 - Change Management","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Manages Changes Throughout the System Lifecycle - A process for managing system changes throughout the lifecycle of the system and its components (infrastructure, data, software and procedures) is used to support system availability and processing integrity. Authorizes Changes - A process is in place to authorize system changes prior to development. Designs and Develops Changes - A process is in place to design and develop system changes. Documents Changes - A process is in place to document system changes to support ongoing maintenance of the system and to support system users in performing their responsibilities. Tracks System Changes - A process is in place to track system changes prior to implementation. Configures Software - A process is in place to select and implement the configuration parameters used to control the functionality of software. Tests System Changes - A process is in place to test system changes prior to implementation. Approves System Changes - A process is in place to approve system changes prior to implementation. Deploys System Changes - A process is in place to implement system changes. Identifies and Evaluates System Changes - Objectives affected by system changes are identified, and the ability of the modified system to meet the objectives is evaluated throughout the system development life cycle. Identifies Changes in Infrastructure, Data, Software, and Procedures Required to Remediate Incidents - Changes in infrastructure, data, software, and procedures required to remediate incidents to continue to meet objectives are identified, and the change process is initiated upon identification. Creates Baseline Configuration of IT Technology - A baseline configuration of IT and control systems is created and maintained. Provides for Changes Necessary in Emergency Situations - A process is in place for authorizing, designing, testing, approving and implementing changes necessary in emergency situations (that is, changes that need to be implemented in an urgent timeframe). Protects Confidential Information - The entity protects confidential information during system design, development, testing, implementation, and change processes to meet the entityโ€™s objectives related to confidentiality. Protects Personal Information - The entity protects personal information during system design, development, testing, implementation, and change processes to meet the entityโ€™s objectives related to privacy.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"cc_9_1": {"name": "CC9.1 The entity identifies, selects, and develops risk mitigation activities for risks arising from potential business disruptions","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_9_1","Section": "CC9.0 - Risk Mitigation","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Considers Mitigation of Risks of Business Disruption - Risk mitigation activities include the development of planned policies, procedures, communications, and alternative processing solutions to respond to, mitigate, and recover from security events that disrupt business operations. Those policies and procedures include monitoring processes and information and communications to meet the entity's objectives during response, mitigation, and recovery efforts. Considers the Use of Insurance to Mitigate Financial Impact Risks - The risk management activities consider the use of insurance to offset the financial impact of loss events that would otherwise impair the ability of the entity to meet its objectives.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_9_2": {"name": "CC9.2 The entity assesses and manages risks associated with vendors and business partners","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_9_2","Section": "CC9.0 - Risk Mitigation","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Establishes Requirements for Vendor and Business Partner Engagements - The entity establishes specific requirements for a vendor and business partner engagement that includes (1) scope of services and product specifications, (2) roles and responsibilities, (3) compliance requirements, and (4) service levels. Assesses Vendor and Business Partner Risks - The entity assesses, on a periodic basis, the risks that vendors and business partners (and those entitiesโ€™ vendors and business partners) represent to the achievement of the entity's objectives. Assigns Responsibility and Accountability for Managing Vendors and Business Partners - The entity assigns responsibility and accountability for the management of risks associated with vendors and business partners. Establishes Communication Protocols for Vendors and Business Partners - The entity establishes communication and resolution protocols for service or product issues related to vendors and business partners. Establishes Exception Handling Procedures From Vendors and Business Partners - The entity establishes exception handling procedures for service or product issues related to vendors and business partners. Assesses Vendor and Business Partner Performance - The entity periodically assesses the performance of vendors and business partners. Implements Procedures for Addressing Issues Identified During Vendor and Business Partner Assessments - The entity implements procedures for addressing issues identified with vendor and business partner relationships. Implements Procedures for Terminating Vendor and Business Partner Relationships - The entity implements procedures for terminating vendor and business partner relationships. Obtains Confidentiality Commitments from Vendors and Business Partners - The entity obtains confidentiality commitments that are consistent with the entityโ€™s confidentiality commitments and requirements from vendors and business partners who have access to confidential information. Assesses Compliance With Confidentiality Commitments of Vendors and Business Partners - On a periodic and as-needed basis, the entity assesses compliance by vendors and business partners with the entityโ€™s confidentiality commitments and requirements. Obtains Privacy Commitments from Vendors and Business Partners - The entity obtains privacy commitments, consistent with the entityโ€™s privacy commitments and requirements, from vendors and business partners who have access to personal information. Assesses Compliance with Privacy Commitments of Vendors and Business Partners - On a periodic and as-needed basis, the entity assesses compliance by vendors and business partners with the entityโ€™s privacy commitments and requirements and takes corrective action as necessary.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_a_1_1": {"name": "A1.1 The entity maintains, monitors, and evaluates current processing capacity and use of system components (infrastructure, data, and software) to manage capacity demand and to enable the implementation of additional capacity to help meet its objectives","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_a_1_1","Section": "CCA1.0 - Additional Criterial for Availability","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Measures Current Usage - The use of the system components is measured to establish a baseline for capacity management and to use when evaluating the risk of impaired availability due to capacity constraints. Forecasts Capacity - The expected average and peak use of system components is forecasted and compared to system capacity and associated tolerances. Forecasting considers capacity in the event of the failure of system components that constrain capacity. Makes Changes Based on Forecasts - The system change management process is initiated when forecasted usage exceeds capacity tolerances.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_a_1_2": {"name": "A1.2 The entity authorizes, designs, develops or acquires, implements, operates, approves, maintains, and monitors environmental protections, software, data back-up processes, and recovery infrastructure to meet its objectives","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","redshift_cluster_automated_snapshot": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": "automated","ItemId": "cc_a_1_2","Section": "CCA1.0 - Additional Criterial for Availability","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Measures Current Usage - The use of the system components is measured to establish a baseline for capacity management and to use when evaluating the risk of impaired availability due to capacity constraints. Forecasts Capacity - The expected average and peak use of system components is forecasted and compared to system capacity and associated tolerances. Forecasting considers capacity in the event of the failure of system components that constrain capacity. Makes Changes Based on Forecasts - The system change management process is initiated when forecasted usage exceeds capacity tolerances.","checks_status": {"fail": 6,"pass": 3,"total": 16,"manual": 0}},"cc_a_1_3": {"name": "A1.3 The entity tests recovery plan procedures supporting system recovery to meet its objectives","checks": {},"status": "PASS","attributes": [{"Type": "manual","ItemId": "cc_a_1_3","Section": "CCA1.0 - Additional Criterial for Availability","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implements Business Continuity Plan Testing - Business continuity plan testing is performed on a periodic basis. The testing includes (1) development of testing scenarios based on threat likelihood and magnitude; (2) consideration of system components from across the entity that can impair the availability; (3) scenarios that consider the potential for the lack of availability of key personnel; and (4) revision of continuity plans and systems based on test results. Tests Integrity and Completeness of Back-Up Data - The integrity and completeness of back-up information is tested on a periodic basis.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"cc_c_1_1": {"name": "C1.1 The entity identifies and maintains confidential information to meet the entityโ€™s objectives related to confidentiality","checks": {"rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": "automated","ItemId": "cc_c_1_1","Section": "CCC1.0 - Additional Criterial for Confidentiality","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Identifies Confidential information - Procedures are in place to identify and designate confidential information when it is received or created and to determine the period over which the confidential information is to be retained. Protects Confidential Information from Destruction - Procedures are in place to protect confidential information from erasure or destruction during the specified retention period of the information","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"cc_c_1_2": {"name": "C1.2 The entity disposes of confidential information to meet the entityโ€™s objectives related to confidentiality","checks": {"s3_bucket_object_versioning": "FAIL"},"status": "FAIL","attributes": [{"Type": "automated","ItemId": "cc_c_1_2","Section": "CCC1.0 - Additional Criterial for Confidentiality","Service": "s3","SubGroup": null,"SubSection": null}],"description": "Identifies Confidential Information for Destruction - Procedures are in place to identify confidential information requiring destruction when the end of the retention period is reached. Destroys Confidential Information - Procedures are in place to erase or otherwise destroy confidential information that has been identified for destruction.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}}},"requirements_passed": 10,"requirements_failed": 10,"requirements_manual": 36,"total_requirements": 56,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "8f43ba1e-a5fb-42c5-95ca-d0b199c62975","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "cis_3.0_aws","framework": "CIS","version": "3.0","description": "The CIS Amazon Web Services Foundations Benchmark provides prescriptive guidance for configuring security options for a subset of Amazon Web Services with an emphasis on foundational, testable, and architecture agnostic settings.","region": "eu-west-1","requirements": {"1.1": {"name": "1.1","checks": {"account_maintain_current_contact_details": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/manage-account-payment.html#contact-info","Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization.An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.","DefaultValue": null,"AuditProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:*Billing ) 1. Sign in to the AWS Management Console and open the Billing and Cost Management console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose Account. 3. On the Account Settings page, review and verify the current details. 4. Under Contact Information, review and verify the current details.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question, so it is in both the customers' and AWS' best interests that prompt contact can be established. This is best achieved by setting AWS account contact details to point to resources which have multiple individuals as recipients, such as email aliases and PABX hunt groups.","RemediationProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:*Billing ). 1. Sign in to the AWS Management Console and open the Billing and Cost Management console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose Account. 3. On the Account Settings page, next to Account Settings, choose Edit. 4. Next to the field that you need to update, choose Edit. 5. After you have entered your changes, choose Save changes. 6. After you have made your changes, choose Done. 7. To edit your contact information, under Contact Information, choose Edit. 8. For the fields that you want to change, type your updated information, and then choose Update.","AdditionalInformation": ""}],"description": "Maintain current contact details","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.2": {"name": "1.2","checks": {"account_security_contact_information_is_registered": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "","Description": "AWS provides customers with the option of specifying the contact information for account's security team. It is recommended that this information be provided.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if security contact information is present:**From Console:**1. Click on your account name at the top right corner of the console 2. From the drop-down menu Click `My Account`3. Scroll down to the `Alternate Contacts` section 4. Ensure contact information is specified in the `Security` section","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them.","RemediationProcedure": "Perform the following to establish security contact information:**From Console:**1. Click on your account name at the top right corner of the console. 2. From the drop-down menu Click `My Account`3. Scroll down to the `Alternate Contacts` section 4. Enter contact information in the `Security` section**Note:** Consider specifying an internal email distribution list to ensure emails are regularly monitored by more than one individual.","AdditionalInformation": ""}],"description": "Ensure security contact information is registered","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.3": {"name": "1.3","checks": {"account_security_questions_are_registered_in_the_aws_account": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "","Description": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS account as the 'root' user 2. On the top right you will see the __ 3. Click on the __ 4. From the drop-down menu Click `My Account`5. In the `Configure Security Challenge Questions` section on the `Personal Information` page, configure three security challenge questions. 6. Click `Save questions` .","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "When creating a new AWS account, a default super user is automatically created. This account is referred to as the 'root user' or 'root' account. It is recommended that the use of this account be limited and highly controlled. During events in which the 'root' password is no longer accessible or the MFA token associated with 'root' is lost/destroyed it is possible, through authentication using secret questions and associated answers, to recover 'root' user login access.","RemediationProcedure": "**From Console:**1. Login to the AWS Account as the 'root' user 2. Click on the __ from the top right of the console 3. From the drop-down menu Click _My Account_ 4. Scroll down to the `Configure Security Questions` section 5. Click on `Edit`6. Click on each `Question` - From the drop-down select an appropriate question- Click on the `Answer` section- Enter an appropriate answer - Follow process for all 3 questions 7. Click `Update` when complete 8. Save Questions and Answers and place in a secure physical location","AdditionalInformation": ""}],"description": "Ensure security questions are registered in the AWS account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.4": {"name": "1.4","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html:http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetAccountSummary.html:https://aws.amazon.com/blogs/security/an-easier-way-to-determine-the-presence-of-aws-account-access-keys/","Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be removed.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has access keys:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on `Credential Report`5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `` user, ensure the `access_key_1_active` and `access_key_2_active` fields are set to `FALSE` .**From Command Line:**Run the following command: ```aws iam get-account-summary | grep \"AccountAccessKeysPresent\"``` If no 'root' access keys exist the output will show \"AccountAccessKeysPresent\": 0,. If the output shows a \"1\" than 'root' keys exist, refer to the remediation procedure below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Removing access keys associated with the 'root' user account limits vectors by which the account can be compromised. Additionally, removing the 'root' access keys encourages the creation and use of role based accounts that are least privileged.","RemediationProcedure": "Perform the following to delete or disable active 'root' user access keys**From Console:**1. Sign in to the AWS Management Console as 'root' and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. Click on __ at the top right and select `My Security Credentials` from the drop down list 3. On the pop out screen Click on `Continue to Security Credentials`4. Click on `Access Keys` _(Access Key ID and Secret Access Key)_ 5. Under the `Status` column if there are any Keys which are Active- Click on `Make Inactive` - (Temporarily disable Key - may be needed again)- Click `Delete` - (Deleted keys cannot be recovered)","AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions is not enabled by default. However, on request to AWS support enables 'root' access only through access-keys (CLI, API methods) for us-gov cloud region."}],"description": "Ensure no 'root' user account access key exists","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.5": {"name": "1.5","checks": {"iam_root_mfa_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html#enable-virt-mfa-for-root","Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device.**Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has MFA setup:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on `Credential Report`5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `` user, ensure the `mfa_active` field is set to `TRUE` .**From Command Line:**1. Run the following command: ```aws iam get-account-summary | grep \"AccountMFAEnabled\" ``` 2. Ensure the AccountMFAEnabled property is set to 1","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that emits a time-sensitive key and have knowledge of a credential.","RemediationProcedure": "Perform the following to establish MFA for the 'root' user account:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the 'root' AWS account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials.2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA`4. In the wizard, choose `A virtual MFA` device and then choose `Next Step` . 5. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes. 6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see [Virtual MFA Applications](http://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications).) If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.- In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application.When you are finished, the virtual MFA device starts generating one-time passwords.In the Manage MFA Device wizard, in the Authentication Code 1 box, type the one-time password that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second one-time password into the Authentication Code 2 box. Choose Assign Virtual MFA.","AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions does not have console access. This recommendation is not applicable for us-gov cloud regions."}],"description": "Ensure MFA is enabled for the 'root' user account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.6": {"name": "1.6","checks": {"iam_root_hardware_mfa_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html#enable-hw-mfa-for-root","Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the 'root' user account be protected with a hardware MFA.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has a hardware MFA setup:1. Run the following command to determine if the 'root' account has MFA setup: ```aws iam get-account-summary | grep \"AccountMFAEnabled\" ```The `AccountMFAEnabled` property is set to `1` will ensure that the 'root' user account has MFA (Virtual or Hardware) Enabled. If `AccountMFAEnabled` property is set to `0` the account is not compliant with this recommendation.2. If `AccountMFAEnabled` property is set to `1`, determine 'root' account has Hardware MFA enabled. Run the following command to list all virtual MFA devices: ```aws iam list-virtual-mfa-devices``` If the output contains one MFA with the following Serial Number, it means the MFA is virtual, not hardware and the account is not compliant with this recommendation: `\"SerialNumber\": \"arn:aws:iam::__:mfa/root-account-mfa-device\"`","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "A hardware MFA has a smaller attack surface than a virtual MFA. For example, a hardware MFA does not suffer the attack surface introduced by the mobile smartphone on which a virtual MFA resides.**Note**: Using hardware MFA for many, many AWS accounts may create a logistical device management issue. If this is the case, consider implementing this Level 2 recommendation selectively to the highest security AWS accounts and the Level 1 recommendation applied to the remaining accounts.","RemediationProcedure": "Perform the following to establish a hardware MFA for the 'root' user account:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the AWS 'root' user account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials. 2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA`4. In the wizard, choose `A hardware MFA` device and then choose `Next Step` . 5. In the `Serial Number` box, enter the serial number that is found on the back of the MFA device. 6. In the `Authentication Code 1` box, enter the six-digit number displayed by the MFA device. You might need to press the button on the front of the device to display the number. 7. Wait 30 seconds while the device refreshes the code, and then enter the next six-digit number into the `Authentication Code 2` box. You might need to press the button on the front of the device again to display the second number. 8. Choose `Next Step` . The MFA device is now associated with the AWS account. The next time you use your AWS account credentials to sign in, you must type a code from the hardware MFA device.Remediation for this recommendation is not available through AWS CLI.","AdditionalInformation": "IAM User account 'root' for us-gov cloud regions does not have console access. This control is not applicable for us-gov cloud regions."}],"description": "Ensure hardware MFA is enabled for the 'root' user account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.7": {"name": "1.7","checks": {"iam_avoid_root_usage": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html:https://docs.aws.amazon.com/general/latest/gr/aws_tasks-that-require-root.html","Description": "With the creation of an AWS account, a 'root user' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console at `https://console.aws.amazon.com/iam/` 2. In the left pane, click `Credential Report` 3. Click on `Download Report` 4. Open of Save the file locally 5. Locate the `` under the user column 6. Review `password_last_used, access_key_1_last_used_date, access_key_2_last_used_date` to determine when the 'root user' was last used.**From Command Line:**Run the following CLI commands to provide a credential report for determining the last time the 'root user' was used: ``` aws iam generate-credential-report ``` ``` aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,5,11,16 | grep -B1 '' ```Review `password_last_used`, `access_key_1_last_used_date`, `access_key_2_last_used_date` to determine when the _root user_ was last used.**Note:** There are a few conditions under which the use of the 'root' user account is required. Please see the reference links for all of the tasks that require use of the 'root' user.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "The 'root user' has unrestricted access to and control over all account resources. Use of it is inconsistent with the principles of least privilege and separation of duties, and can lead to unnecessary harm due to error or account compromise.","RemediationProcedure": "If you find that the 'root' user account is being used for daily activity to include administrative tasks that do not require the 'root' user:1. Change the 'root' user password. 2. Deactivate or delete any access keys associate with the 'root' user.**Remember, anyone who has 'root' user credentials for your AWS account has unrestricted access to and control of all the resources in your account, including billing information.","AdditionalInformation": "The 'root' user for us-gov cloud regions is not enabled by default. However, on request to AWS support, they can enable the 'root' user and grant access only through access-keys (CLI, API methods) for us-gov cloud region. If the 'root' user for us-gov cloud regions is enabled, this recommendation is applicable.Monitoring usage of the 'root' user can be accomplished by implementing recommendation 3.3 Ensure a log metric filter and alarm exist for usage of the 'root' user."}],"description": "Eliminate use of the 'root' user for administrative and daily tasks","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.8": {"name": "1.8","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy","Description": "Password policies are, in part, used to enforce password complexity requirements. IAM password policies can be used to ensure password are at least a given length. It is recommended that the password policy require a minimum password length 14.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Minimum password length\" is set to 14 or greater.**From Command Line:** ``` aws iam get-account-password-policy ``` Ensure the output of the above command includes \"MinimumPasswordLength\": 14 (or higher)","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Setting a password complexity policy increases account resiliency against brute force login attempts.","RemediationProcedure": "Perform the following to set the password policy as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Set \"Minimum password length\" to `14` or greater. 5. Click \"Apply password policy\"**From Command Line:** ```aws iam update-account-password-policy --minimum-password-length 14 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.","AdditionalInformation": ""}],"description": "Ensure IAM password policy requires minimum length of 14 or greater","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.9": {"name": "1.9","checks": {"iam_password_policy_reuse_24": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy","Description": "IAM password policies can prevent the reuse of a given password by the same user. It is recommended that the password policy prevent the reuse of passwords.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Prevent password reuse\" is checked 5. Ensure \"Number of passwords to remember\" is set to 24**From Command Line:** ``` aws iam get-account-password-policy``` Ensure the output of the above command includes \"PasswordReusePrevention\": 24","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Preventing password reuse increases account resiliency against brute force login attempts.","RemediationProcedure": "Perform the following to set the password policy as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Check \"Prevent password reuse\" 5. Set \"Number of passwords to remember\" is set to `24` **From Command Line:** ```aws iam update-account-password-policy --password-reuse-prevention 24 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.","AdditionalInformation": ""}],"description": "Ensure IAM password policy prevents password reuse","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.1": {"name": "3.1","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html?icmpid=docs_cloudtrail_console#logging-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-services.html#cloud-trail-supported-services-data-events","Description": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail provides a history of AWS API calls for an account, including API calls made via the Management Console, SDKs, command line tools, and higher-level AWS services (such as CloudFormation).","DefaultValue": null,"AuditProcedure": "Perform the following to determine if CloudTrail is enabled for all regions:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane- You will be presented with a list of trails across all regions 3. Ensure at least one Trail has `All` specified in the `Region` column 4. Click on a trail via the link in the _Name_ column 5. Ensure `Logging` is set to `ON`6. Ensure `Apply trail to all regions` is set to `Yes` 7. In section `Management Events` ensure `Read/Write Events` set to `ALL`**From Command Line:** ```aws cloudtrail describe-trails ``` Ensure `IsMultiRegionTrail` is set to `true```` aws cloudtrail get-trail-status --name  ``` Ensure `IsLogging` is set to `true` ``` aws cloudtrail get-event-selectors --trail-name  ``` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`","ImpactStatement": "S3 lifecycle features can be used to manage the accumulation and management of logs over time. See the following AWS resource for more information on these features:1. https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html","AssessmentStatus": "Automated","RationaleStatement": "The AWS API call history produced by CloudTrail enables security analysis, resource change tracking, and compliance auditing. Additionally, - ensuring that a multi-regions trail exists will ensure that unexpected activity occurring in otherwise unused regions is detected- ensuring that a multi-regions trail exists will ensure that `Global Service Logging` is enabled for a trail by default to capture recording of events generated onAWS global services- for a multi-regions trail, ensuring that management events configured for all type of Read/Writes ensures recording of management operations that are performed on all resources in an AWS account","RemediationProcedure": "Perform the following to enable global (Multi-region) CloudTrail logging:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on _Trails_ on the left navigation pane 3. Click `Get Started Now` , if presented- Click `Add new trail` - Enter a trail name in the `Trail name` box- Set the `Apply trail to all regions` option to `Yes` - Specify an S3 bucket name in the `S3 bucket` box- Click `Create`4. If 1 or more trails already exist, select the target trail to enable for global logging 5. Click the edit icon (pencil) next to `Apply trail to all regions` , Click `Yes` and Click `Save`. 6. Click the edit icon (pencil) next to `Management Events` click `All` for setting `Read/Write Events` and Click `Save`.**From Command Line:** ``` aws cloudtrail create-trail --name  --bucket-name  --is-multi-region-trailaws cloudtrail update-trail --name  --is-multi-region-trail ```Note: Creating CloudTrail via CLI without providing any overriding options configures `Management Events` to set `All` type of `Read/Writes` by default.","AdditionalInformation": ""}],"description": "Ensure CloudTrail is enabled in all regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"3.2": {"name": "3.2","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-enabling.html","Description": "CloudTrail log file validation creates a digitally signed digest file containing a hash of each log that CloudTrail writes to S3. These digest files can be used to determine whether a log file was changed, deleted, or unchanged after CloudTrail delivered the log. It is recommended that file validation be enabled on all CloudTrails.","DefaultValue": null,"AuditProcedure": "Perform the following on each trail to determine if log file validation is enabled:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. For Every Trail: - Click on a trail via the link in the _Name_ column - Under the `General details` section, ensure `Log file validation` is set to `Enabled` **From Command Line:** ``` aws cloudtrail describe-trails ``` Ensure `LogFileValidationEnabled` is set to `true` for each trail","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling log file validation will provide additional integrity checking of CloudTrail logs.","RemediationProcedure": "Perform the following to enable log file validation on a given trail:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. Click on target trail 4. Within the `General details` section click `edit` 5. Under the `Advanced settings` section 6. Check the enable box under `Log file validation`7. Click `Save changes` **From Command Line:** ``` aws cloudtrail update-trail --name  --enable-log-file-validation ``` Note that periodic validation of logs using these digests can be performed by running the following command: ``` aws cloudtrail validate-logs --trail-arn  --start-time  --end-time  ```","AdditionalInformation": ""}],"description": "Ensure CloudTrail log file validation is enabled","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.3": {"name": "3.3","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/cli/latest/reference/configservice/describe-configuration-recorder-status.html","Description": "AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items (AWS resources), any configuration changes between resources. It is recommended AWS Config be enabled in all regions.","DefaultValue": null,"AuditProcedure": "Process to evaluate AWS Config configuration per region**From Console:**1. Sign in to the AWS Management Console and open the AWS Config console at [https://console.aws.amazon.com/config/](https://console.aws.amazon.com/config/). 2. On the top right of the console select target Region. 3. If presented with Setup AWS Config - follow remediation procedure: 4. On the Resource inventory page, Click on edit (the gear icon). The Set Up AWS Config page appears. 5. Ensure 1 or both check-boxes under \"All Resources\" is checked.- Include global resources related to IAM resources - which needs to be enabled in 1 region only 6. Ensure the correct S3 bucket has been defined. 7. Ensure the correct SNS topic has been defined. 8. Repeat steps 2 to 7 for each region.**From Command Line:**1. Run this command to show all AWS Config recorders and their properties: ``` aws configservice describe-configuration-recorders ``` 2. Evaluate the output to ensure that there's at least one recorder for which `recordingGroup` object includes `\"allSupported\": true` AND `\"includeGlobalResourceTypes\": true`Note: There is one more parameter \"ResourceTypes\" in recordingGroup object. We don't need to check the same as whenever we set \"allSupported\": true, AWS enforces resource types to be empty (\"ResourceTypes\":[])Sample Output:``` {\"ConfigurationRecorders\": [{\"recordingGroup\": {\"allSupported\": true,\"resourceTypes\": [],\"includeGlobalResourceTypes\": true},\"roleARN\": \"arn:aws:iam:::role/service-role/\",\"name\": \"default\"}] } ```3. Run this command to show the status for all AWS Config recorders: ``` aws configservice describe-configuration-recorder-status ``` 4. In the output, find recorders with `name` key matching the recorders that met criteria in step 2. Ensure that at least one of them includes `\"recording\": true` and `\"lastStatus\": \"SUCCESS\"`","ImpactStatement": "It is recommended AWS Config be enabled in all regions.","AssessmentStatus": "Automated","RationaleStatement": "The AWS configuration item history captured by AWS Config enables security analysis, resource change tracking, and compliance auditing.","RemediationProcedure": "To implement AWS Config configuration:**From Console:**1. Select the region you want to focus on in the top right of the console 2. Click `Services`3. Click `Config`4. Define which resources you want to record in the selected region 5. Choose to include global resources (IAM resources) 6. Specify an S3 bucket in the same account or in another managed AWS account 7. Create an SNS Topic from the same AWS account or another managed AWS account**From Command Line:**1. Ensure there is an appropriate S3 bucket, SNS topic, and IAM role per the [AWS Config Service prerequisites](http://docs.aws.amazon.com/config/latest/developerguide/gs-cli-prereq.html). 2. Run this command to set up the configuration recorder ``` aws configservice subscribe --s3-bucket my-config-bucket --sns-topic arn:aws:sns:us-east-1:012345678912:my-config-notice --iam-role arn:aws:iam::012345678912:role/myConfigRole ``` 3. Run this command to start the configuration recorder: ``` start-configuration-recorder --configuration-recorder-name  ```","AdditionalInformation": ""}],"description": "Ensure AWS Config is enabled in all regions","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.4": {"name": "3.4","checks": {"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html","Description": "S3 Bucket Access Logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that bucket access logging be enabled on the CloudTrail S3 bucket.","DefaultValue": null,"AuditProcedure": "Perform the following ensure the CloudTrail S3 bucket has access logging is enabled:**From Console:**1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home) 2. In the API activity history pane on the left, click Trails 3. In the Trails pane, note the bucket names in the S3 bucket column 4. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 5. Under `All Buckets` click on a target S3 bucket 6. Click on `Properties` in the top right of the console 7. Under `Bucket:` _ `` _ click on `Logging`8. Ensure `Enabled` is checked.**From Command Line:**1. Get the name of the S3 bucket that CloudTrail is logging to: ```aws cloudtrail describe-trails --query 'trailList[*].S3BucketName'``` 2. Ensure Bucket Logging is enabled: ``` aws s3api get-bucket-logging --bucket  ``` Ensure command does not returns empty output.Sample Output for a bucket with logging enabled:``` {\"LoggingEnabled\": {\"TargetPrefix\": \"\",\"TargetBucket\": \"\"} } ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "By enabling S3 bucket logging on target S3 buckets, it is possible to capture all events which may affect objects within any target buckets. Configuring logs to be placed in a separate bucket allows access to log information which can be useful in security and incident response workflows.","RemediationProcedure": "Perform the following to enable S3 bucket logging:**From Console:**1. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 2. Under `All Buckets` click on the target S3 bucket 3. Click on `Properties` in the top right of the console 4. Under `Bucket:`  click on `Logging`5. Configure bucket logging- Click on the `Enabled` checkbox- Select Target Bucket from list- Enter a Target Prefix 6. Click `Save`.**From Command Line:**1. Get the name of the S3 bucket that CloudTrail is logging to: ``` aws cloudtrail describe-trails --region  --query trailList[*].S3BucketName ``` 2. Copy and add target bucket name at ``, Prefix for logfile at `` and optionally add an email address in the following template and save it as ``: ``` {\"LoggingEnabled\": {\"TargetBucket\": \"\",\"TargetPrefix\": \"\",\"TargetGrants\": [{\"Grantee\": {\"Type\": \"AmazonCustomerByEmail\",\"EmailAddress\": \"\"},\"Permission\": \"FULL_CONTROL\"}]}} ``` 3. Run the `put-bucket-logging` command with bucket name and `` as input, for more information refer at [put-bucket-logging](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-logging.html): ``` aws s3api put-bucket-logging --bucket  --bucket-logging-status file:// ```","AdditionalInformation": ""}],"description": "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.5": {"name": "3.5","checks": {"cloudtrail_kms_encryption_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html:https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html","Description": "AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if CloudTrail is configured to use SSE-KMS:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Select a Trail 4. Under the `S3` section, ensure `Encrypt log files` is set to `Yes` and a KMS key ID is specified in the `KSM Key Id` field.**From Command Line:**1. Run the following command: ```aws cloudtrail describe-trails``` 2. For each trail listed, SSE-KMS is enabled if the trail has a `KmsKeyId` property defined.","ImpactStatement": "Customer created keys incur an additional cost. See https://aws.amazon.com/kms/pricing/ for more information.","AssessmentStatus": "Automated","RationaleStatement": "Configuring CloudTrail to use SSE-KMS provides additional confidentiality controls on log data as a given user must have S3 read permission on the corresponding log bucket and must be granted decrypt permission by the CMK policy.","RemediationProcedure": "Perform the following to configure CloudTrail to use SSE-KMS:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Click on a Trail 4. Under the `S3` section click on the edit button (pencil icon) 5. Click `Advanced`6. Select an existing CMK from the `KMS key Id` drop-down menu- Note: Ensure the CMK is located in the same region as the S3 bucket- Note: You will need to apply a KMS Key policy on the selected CMK in order for CloudTrail as a service to encrypt and decrypt log files using the CMK provided. Steps are provided [here](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-kms-key-policy-for-cloudtrail.html) for editing the selected CMK Key policy 7. Click `Save`8. You will see a notification message stating that you need to have decrypt permissions on the specified KMS key to decrypt log files. 9. Click `Yes` **From Command Line:** ``` aws cloudtrail update-trail --name  --kms-id  aws kms put-key-policy --key-id  --policy  ```","AdditionalInformation": "3 statements which need to be added to the CMK policy:1\\. Enable Cloudtrail to describe CMK properties ``` 
{\"Sid\": \"Allow CloudTrail access\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"cloudtrail.amazonaws.com\"},\"Action\": \"kms:DescribeKey\",\"Resource\": \"*\" } ``` 2\\. Granting encrypt permissions ``` 
{\"Sid\": \"Allow CloudTrail to encrypt logs\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"cloudtrail.amazonaws.com\"},\"Action\": \"kms:GenerateDataKey*\",\"Resource\": \"*\",\"Condition\": {\"StringLike\": {\"kms:EncryptionContext:aws:cloudtrail:arn\": [\"arn:aws:cloudtrail:*:aws-account-id:trail/*\"]}} } ``` 3\\. Granting decrypt permissions ``` 
{\"Sid\": \"Enable CloudTrail log decrypt permissions\",\"Effect\": \"Allow\",\"Principal\": {\"AWS\": \"arn:aws:iam::aws-account-id:user/username\"},\"Action\": \"kms:Decrypt\",\"Resource\": \"*\",\"Condition\": {\"Null\": {\"kms:EncryptionContext:aws:cloudtrail:arn\": \"false\"}} } ```"}],"description": "Ensure CloudTrail logs are encrypted at rest using KMS CMKs","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.6": {"name": "3.6","checks": {"kms_cmk_rotation_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://aws.amazon.com/kms/pricing/:https://csrc.nist.gov/publications/detail/sp/800-57-part-1/rev-5/final","Description": "AWS Key Management Service (KMS) allows customers to rotate the backing key which is key material stored within the KMS which is tied to the key ID of the Customer Created customer master key (CMK). It is the backing key that is used to perform cryptographic operations such as encryption and decryption. Automated key rotation currently retains all prior backing keys so that decryption of encrypted data can take place transparently. It is recommended that CMK key rotation be enabled for symmetric keys. Key rotation can not be enabled for any asymmetric CMK.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Sign in to the AWS Management Console and open the KMS console at: https://console.aws.amazon.com/kms. 2. In the left navigation pane, click Customer-managed keys. 3. Select a customer managed CMK where Key spec = SYMMETRIC_DEFAULT. 4. Select the Key rotation tab. 5. Ensure the Automatically rotate this KMS key every year checkbox is checked. 6. Repeat steps 3โ€“5 for all customer-managed CMKs where 'Key spec = SYMMETRIC_DEFAULT'.","ImpactStatement": "Creation, management, and storage of CMKs may require additional time from and administrator.","AssessmentStatus": "Automated","RationaleStatement": "Rotating encryption keys helps reduce the potential impact of a compromised key as data encrypted with a new key cannot be accessed with a previous key that may have been exposed. Keys should be rotated every year, or upon event that would result in the compromise of that key.","RemediationProcedure": "**From Console:**1. Sign in to the AWS Management Console and open the KMS console at: https://console.aws.amazon.com/kms. 2. In the left navigation pane, click Customer-managed keys. 3. Select a key where Key spec = SYMMETRIC_DEFAULT that does not have automatic rotation enabled. 4. Select the Key rotation tab. 5. Check the Automatically rotate this KMS key every year checkbox. 6. Click Save. 7. Repeat steps 3โ€“6 for all customer-managed CMKs that do not have automatic rotation enabled.","AdditionalInformation": ""}],"description": "Ensure rotation for customer created symmetric CMKs is enabled","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.7": {"name": "3.7","checks": {"vpc_flow_logs_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html","Description": "VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. After you've created a flow log, you can view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow Logs be enabled for packet \"Rejects\" for VPCs.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if VPC Flow logs are enabled:**From Console:**1. Sign into the management console 2. Select `Services` then `VPC`3. In the left navigation pane, select `Your VPCs`4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. Ensure a Log Flow exists that has `Active` in the `Status` column.**From Command Line:**1. Run `describe-vpcs` command (OSX/Linux/UNIX) to list the VPC networks available in the current AWS region: ``` aws ec2 describe-vpcs --region  --query Vpcs[].VpcId ``` 2. The command output returns the `VpcId` available in the selected region. 3. Run `describe-flow-logs` command (OSX/Linux/UNIX) using the VPC ID to determine if the selected virtual network has the Flow Logs feature enabled: ``` aws ec2 describe-flow-logs --filter \"Name=resource-id,Values=\" ``` 4. If there are no Flow Logs created for the selected VPC, the command output will return an `empty list []`. 5. Repeat step 3 for other VPCs available in the same region. 6. Change the region by updating `--region` and repeat steps 1 - 5 for all the VPCs.","ImpactStatement": "By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods:1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html","AssessmentStatus": "Automated","RationaleStatement": "VPC Flow Logs provide visibility into network traffic that traverses the VPC and can be used to detect anomalous traffic or insight during security workflows.","RemediationProcedure": "Perform the following to determine if VPC Flow logs is enabled:**From Console:**1. Sign into the management console 2. Select `Services` then `VPC`3. In the left navigation pane, select `Your VPCs`4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. If no Flow Log exists, click `Create Flow Log`7. For Filter, select `Reject` 8. Enter in a `Role` and `Destination Log Group`9. Click `Create Log Flow`10. Click on `CloudWatch Logs Group` **Note:** Setting the filter to \"Reject\" will dramatically reduce the logging data accumulation for this recommendation and provide sufficient information for the purposes of breach detection, research and remediation. However, during periods of least privilege security group engineering, setting this the filter to \"All\" can be very helpful in discovering existing traffic flows required for proper operation of an already running environment.**From Command Line:**1. Create a policy document and name it as `role_policy_document.json` and paste the following content: ``` {\"Version\": \"2012-10-17\",\"Statement\": [{\"Sid\": \"test\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"ec2.amazonaws.com\"},\"Action\": \"sts:AssumeRole\"}] } ``` 2. Create another policy document and name it as `iam_policy.json` and paste the following content: ``` {\"Version\": \"2012-10-17\",\"Statement\": [{\"Effect\": \"Allow\",\"Action\":[\"logs:CreateLogGroup\",\"logs:CreateLogStream\",\"logs:DescribeLogGroups\",\"logs:DescribeLogStreams\",\"logs:PutLogEvents\",\"logs:GetLogEvents\",\"logs:FilterLogEvents\"],\"Resource\": \"*\"}] } ``` 3. Run the below command to create an IAM role: ``` aws iam create-role --role-name  --assume-role-policy-document file://role_policy_document.json``` 4. Run the below command to create an IAM policy: ``` aws iam create-policy --policy-name  --policy-document file://iam-policy.json ``` 5. Run `attach-group-policy` command using the IAM policy ARN returned at the previous step to attach the policy to the IAM role (if the command succeeds, no output is returned): ``` aws iam attach-group-policy --policy-arn arn:aws:iam:::policy/ --group-name  ``` 6. Run `describe-vpcs` to get the VpcId available in the selected region: ``` aws ec2 describe-vpcs --region  ``` 7. The command output should return the VPC Id available in the selected region. 8. Run `create-flow-logs` to create a flow log for the vpc: ``` aws ec2 create-flow-logs --resource-type VPC --resource-ids  --traffic-type REJECT --log-group-name  --deliver-logs-permission-arn  ``` 9. Repeat step 8 for other vpcs available in the selected region. 10. Change the region by updating --region and repeat remediation procedure for other vpcs.","AdditionalInformation": ""}],"description": "Ensure VPC flow logging is enabled in all VPCs","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.8": {"name": "3.8","checks": {"cloudtrail_s3_dataevents_write_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html","Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to CloudTrail dashboard at https://console.aws.amazon.com/cloudtrail/ 2. In the left panel, click Trails and then click on the CloudTrail Name that you want to examine. 3. Review General details 4. Confirm that Multi-region trail is set to Yes 5. Scroll down to Data events 6. Confirm that it reads: Data Events:S3 Log selector template Log all events If 'basic events selectors' is being used it should read: Data events: S3 Bucket Name: All current and future S3 buckets Write: Enabled 7. Repeat steps 2 to 6 to verify that Multi-region trail and Data events logging of S3 buckets in CloudTrail. If the CloudTrails do not have multi-region and data events configured for S3 refer to the remediation below..","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity within your S3 Buckets using Amazon CloudWatch Events.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at https://console.aws.amazon.com/s3/ 2. In the left navigation panel, click buckets and then click on the S3 Bucket Name that you want to examine. 3. Click Properties tab to see in detail bucket configuration. 4. In the AWS Cloud Trail data events' section select the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by slicking the Configure in Cloudtrailbutton or navigating to the Cloudtrail console linkhttps://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, Select the data Data Events check box. 6. Select S3 from the `Data event type drop down. 7. Select Log all events from the Log selector template drop down. 8. Repeat steps 2 to 5 to enable object-level logging of write events for other S3 buckets.","AdditionalInformation": ""}],"description": "Ensure that Object-level logging for write events is enabled for S3 bucket","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.9": {"name": "3.9","checks": {"cloudtrail_s3_dataevents_read_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html","Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. If the current status for `Object-level` logging is set to `Disabled`, then object-level logging of read events for the selected s3 bucket is not set. 5. If the current status for `Object-level` logging is set to `Enabled`, but the Read event check-box is unchecked, then object-level logging of read events for the selected s3 bucket is not set. 6. Repeat steps 2 to 5 to verify `object-level` logging for `read` events of your other S3 buckets.**From Command Line:** 1. Run `describe-trails` command to list the names of all Amazon CloudTrail trails currently available in the selected AWS region: ``` aws cloudtrail describe-trails --region  --output table --query trailList[*].Name ``` 2. The command output will be table of the requested trail names. 3. Run `get-event-selectors` command using the name of the trail returned at the previous step and custom query filters to determine if Data events logging feature is enabled within the selected CloudTrail trail configuration for s3 bucket resources: ``` aws cloudtrail get-event-selectors --region  --trail-name  --query EventSelectors[*].DataResources[] ``` 4. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. 5. If the `get-event-selectors` command returns an empty array, the Data events are not included into the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. 6. Repeat steps 1 to 5 for auditing each s3 bucket to identify other trails that are missing the capability to log Data events. 7. Change the AWS region by updating the `--region` command parameter and perform the audit process for other regions.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity using Amazon CloudWatch Events.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, check the Read event checkbox, so that `object-level` logging for `Read` events is enabled. 6. Repeat steps 2 to 5 to enable `object-level` logging of read events for other S3 buckets.**From Command Line:** 1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier: ``` aws cloudtrail put-event-selectors --region  --trail-name  --event-selectors '[{ \"ReadWriteType\": \"ReadOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::/\"] }] }]' ``` 2. The command output will be `object-level` event trail configuration. 3. If you want to enable it for all buckets at ones then change Values parameter to `[\"arn:aws:s3\"]` in command given above. 4. Repeat step 1 for each s3 bucket to update `object-level` logging of read events. 5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.","AdditionalInformation": ""}],"description": "Ensure that Object-level logging for read events is enabled for S3 bucket","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.1": {"name": "4.1","checks": {"cloudwatch_log_metric_filter_unauthorized_api_calls": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://aws.amazon.com/sns/:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with \"Name\":` note ``- From value associated with \"CloudWatchLogsLogGroupArn\" note Example: for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:::log-group:NewGroup:*,  would be NewGroup- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name <\"Name\" as shown in describe-trails>`Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this `` that you captured in step 1:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.errorCode = *UnauthorizedOperation) || ($.errorCode = AccessDenied*) || ($.sourceIPAddress!=delivery.logs.amazonaws.com) || ($.eventName!=HeadBucket) }\", ```4. Note the \"filterName\" `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName == `unauthorized_api_calls_metric`]\" ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "This alert may be triggered by normal read-only console activities that attempt to opportunistically gather optional information, but gracefully fail if they don't have permissions.If an excessive number of alerts are being generated then an organization may wish to consider adding read access to the limited IAM user permissions simply to quiet the alerts.In some cases doing this may allow the users to actually view some areas of the system - any additional access given should be reviewed for alignment with the original limited IAM user intent.","AssessmentStatus": "Automated","RationaleStatement": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for unauthorized API calls and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"cloudtrail_log_group_name\" --filter-name \"\" --metric-transformations metricName=unauthorized_api_calls_metric,metricNamespace=CISBenchmark,metricValue=1 --filter-pattern \"{ ($.errorCode = \"*UnauthorizedOperation\") || ($.errorCode = \"AccessDenied*\") || ($.sourceIPAddress!=\"delivery.logs.amazonaws.com\") || ($.eventName!=\"HeadBucket\") }\" ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. **Note**: Capture the TopicArn displayed when creating the SNS Topic in Step 2.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"unauthorized_api_calls_alarm\" --metric-name \"unauthorized_api_calls_metric\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for unauthorized API calls","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.2": {"name": "4.2","checks": {"cloudwatch_log_metric_filter_sign_in_without_mfa": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/viewing_metrics_with_cloudwatch.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all `CloudTrails`:``` aws cloudtrail describe-trails ```- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region `CloudTrail` is active``` aws cloudtrail get-trail-status --name  ```Ensure in the output that `IsLogging` is set to `TRUE`- Ensure identified Multi-region 'Cloudtrail' captures all Management Events``` aws cloudtrail get-event-selectors --trail-name  ```Ensure in the output there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }\" ```Or (To reduce false positives incase Single Sign-On (SSO) is used in organization):``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring for single-factor console logins will increase visibility into accounts that are not protected by MFA.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Management Console sign-in without MFA and the `` taken from audit step 1.Use Command: ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }' ```Or (To reduce false positives incase Single Sign-On (SSO) is used in organization):``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored -Filter pattern set to `{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\"}` reduces false alarms raised when user logs in via SSO account."}],"description": "Ensure a log metric filter and alarm exist for Management Console sign-in without MFA","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.3": {"name": "4.3","checks": {"cloudwatch_log_metric_filter_root_usage": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for 'root' login attempts.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails:`aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring for 'root' account logins will provide visibility into the use of a fully privileged account and an opportunity to reduce the use of it.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for 'Root' account usage and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `` --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "**Configuring log metric filter and alarm on Multi-region (global) CloudTrail**- ensures that activities from all regions (used as well as unused) are monitored- ensures that activities on all supported global services are monitored- ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for usage of 'root' account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.4": {"name": "4.4","checks": {"cloudwatch_log_metric_filter_policy_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established changes made to Identity and Access Management (IAM) policies.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails:`aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to IAM policies will help ensure authentication and authorization controls remain intact.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for IAM policy changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `` --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for IAM policy changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.5": {"name": "4.5","checks": {"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to CloudTrail's configuration will help ensure sustained visibility to activities performed in the AWS account.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for cloudtrail configuration changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for CloudTrail configuration changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.6": {"name": "4.6","checks": {"cloudwatch_log_metric_filter_authentication_failures": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for failed console authentication attempts.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring failed console logins may decrease lead time to detect an attempt to brute force a credential, which may provide an indicator, such as source IP, that can be used in other event correlation.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS management Console Login Failures and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for AWS Management Console authentication failures","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.7": {"name": "4.7","checks": {"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for customer created CMKs which have changed state to disabled or scheduled deletion.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Data encrypted with disabled or deleted keys will no longer be accessible.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for disabled or scheduled for deletion CMK's and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for disabling or scheduled deletion of customer created CMKs","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.8": {"name": "4.8","checks": {"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes to S3 bucket policies.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to S3 bucket policies may reduce time to detect and correct permissive policies on sensitive S3 buckets.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for S3 bucket policy changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for S3 bucket policy changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.9": {"name": "4.9","checks": {"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to AWS Config configuration will help ensure sustained visibility of configuration items within the AWS account.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Configuration changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for AWS Config configuration changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"5.1": {"name": "5.1","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Security.html#VPC_Security_Comparison","Description": "The Network Access Control List (NACL) function provide stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.","DefaultValue": null,"AuditProcedure": "**From Console:**Perform the following to determine if the account is configured as prescribed: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL, perform the following:- Select the network ACL- Click the `Inbound Rules` tab- Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` and shows `ALLOW`**Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.","RemediationProcedure": "**From Console:**Perform the following: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL to remediate, perform the following:- Select the network ACL- Click the `Inbound Rules` tab- Click `Edit inbound rules`- Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule- Click `Save`","AdditionalInformation": ""}],"description": "Ensure no Network ACLs allow ingress from 0.0.0.0/0 to remote server administration ports","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"5.2": {"name": "5.2","checks": {"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#deleting-security-group-rule","Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.","ImpactStatement": "When updating an existing environment, ensure that administrators have access to remote server administration ports through another mechanism before removing access by deleting the 0.0.0.0/0 inbound rule.","AssessmentStatus": "Automated","RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.","RemediationProcedure": "Perform the following to implement the prescribed state:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Click the `Edit inbound rules` button 4. Identify the rules to be edited or removed 5. Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule 6. Click `Save rules`","AdditionalInformation": ""}],"description": "Ensure no security groups allow ingress from 0.0.0.0/0 to remote server administration ports","checks_status": {"fail": 0,"pass": 3,"total": 3,"manual": 0}},"5.3": {"name": "5.3","checks": {"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#deleting-security-group-rule","Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `::/0` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.","ImpactStatement": "When updating an existing environment, ensure that administrators have access to remote server administration ports through another mechanism before removing access by deleting the ::/0 inbound rule.","AssessmentStatus": "Automated","RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.","RemediationProcedure": "Perform the following to implement the prescribed state:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Click the `Edit inbound rules` button 4. Identify the rules to be edited or removed 5. Either A) update the Source field to a range other than ::/0, or, B) Click `Delete` to remove the offending inbound rule 6. Click `Save rules`","AdditionalInformation": ""}],"description": "Ensure no security groups allow ingress from ::/0 to remote server administration ports","checks_status": {"fail": 0,"pass": 3,"total": 3,"manual": 0}},"5.4": {"name": "5.4","checks": {"ec2_securitygroup_default_restrict_traffic": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "5. Networking","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#default-security-group","Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic.The default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation.**NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:Security Group State1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exist 4. Click the `Outbound Rules` tab 5. Ensure no rules existSecurity Group Members1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all default groups in all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. Copy the id of the default security group. 5. Change to the EC2 Management Console at https://console.aws.amazon.com/ec2/v2/home 6. In the filter column type 'Security Group ID : < security group id from #4 >'","ImpactStatement": "Implementing this recommendation in an existing VPC containing operating resources requires extremely careful migration planning as the default security groups are likely to be enabling many ports that are unknown. Enabling VPC flow logging (of accepts) in an existing environment that is known to be breach free will reveal the current pattern of ports being used for each instance to communicate successfully.","AssessmentStatus": "Automated","RationaleStatement": "Configuring all VPC default security groups to restrict all traffic will encourage least privilege security group development and mindful placement of AWS resources into security groups which will in-turn reduce the exposure of those resources.","RemediationProcedure": "Security Group MembersPerform the following to implement the prescribed state:1. Identify AWS resources that exist within the default security group 2. Create a set of least privilege security groups for those resources 3. Place the resources in those security groups 4. Remove the resources noted in #1 from the default security groupSecurity Group State1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Remove any inbound rules 4. Click the `Outbound Rules` tab 5. Remove any Outbound rulesRecommended:IAM groups allow you to edit the \"name\" field. After remediating default groups rules for all VPCs in all regions, edit this field to add text similar to \"DO NOT USE. DO NOT ADD RULES\"","AdditionalInformation": ""}],"description": "Ensure the default security group of every VPC restricts all traffic","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"5.5": {"name": "5.5","checks": {"vpc_peering_routing_tables_with_least_privilege": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "5. Networking","References": "https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/peering-configurations-partial-access.html:https://docs.aws.amazon.com/cli/latest/reference/ec2/create-vpc-peering-connection.html","Description": "Once a VPC peering connection is established, routing tables must be updated to establish any connections between the peered VPCs. These routes can be as specific as desired - even peering a VPC to only a single host on the other side of the connection.","DefaultValue": null,"AuditProcedure": "Review routing tables of peered VPCs for whether they route all subnets of each VPC and whether that is necessary to accomplish the intended purposes for peering the VPCs.**From Command Line:**1. List all the route tables from a VPC and check if \"GatewayId\" is pointing to a __ (e.g. pcx-1a2b3c4d) and if \"DestinationCidrBlock\" is as specific as desired. ``` aws ec2 describe-route-tables --filter \"Name=vpc-id,Values=\" --query \"RouteTables[*].{RouteTableId:RouteTableId, VpcId:VpcId, Routes:Routes, AssociatedSubnets:Associations[*].SubnetId}\" ```","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Being highly selective in peering routing tables is a very effective way of minimizing the impact of breach as resources outside of these routes are inaccessible to the peered VPC.","RemediationProcedure": "Remove and add route table entries to ensure that the least number of subnets or hosts as is required to accomplish the purpose for peering are routable.**From Command Line:**1. For each __ containing routes non compliant with your routing policy (which grants more than desired \"least access\"), delete the non compliant route: ``` aws ec2 delete-route --route-table-id  --destination-cidr-block  ```2. Create a new compliant route: ``` aws ec2 create-route --route-table-id  --destination-cidr-block  --vpc-peering-connection-id  ```","AdditionalInformation": "If an organization has AWS transit gateway implemented in their VPC architecture they should look to apply the recommendation above for \"least access\" routing architecture at the AWS transit gateway level in combination with what must be implemented at the standard VPC route table. More specifically, to route traffic between two or more VPCs via a transit gateway VPCs must have an attachment to a transit gateway route table as well as a route, therefore to avoid routing traffic between VPCs an attachment to the transit gateway route table should only be added where there is an intention to route traffic between the VPCs. As transit gateways are able to host multiple route tables it is possible to group VPCs by attaching them to a common route table."}],"description": "Ensure routing tables for VPC peering are \"least access\"","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"5.6": {"name": "5.6","checks": {"ec2_instance_imdsv2_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://aws.amazon.com/blogs/security/defense-in-depth-open-firewalls-reverse-proxies-ssrf-vulnerabilities-ec2-instance-metadata-service/:https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html","Description": "When enabling the Metadata Service on AWS EC2 instances, users have the option of using either Instance Metadata Service Version 1 (IMDSv1; a request/response method) or Instance Metadata Service Version 2 (IMDSv2; a session-oriented method).","DefaultValue": null,"AuditProcedure": "From Console: 1. Sign in to the AWS Management Console and navigate to the EC2 dashboard at https://console.aws.amazon.com/ec2/. 2. In the left navigation panel, under the INSTANCES section, choose Instances. 3. Select the EC2 instance that you want to examine. 4. Check for the IMDSv2 status, and ensure that it is set to Required. From Command Line: 1. Run the describe-instances command using appropriate filtering to list the IDs of all the existing EC2 instances currently available in the selected region: aws ec2 describe-instances --region  --output table --query 'Reservations[*].Instances[*].InstanceId' 2. The command output should return a table with the requested instance IDs. 3. Now run the describe-instances command using an instance ID returned at the previous step and custom filtering to determine whether the selected instance has IMDSv2: aws ec2 describe-instances --region  --instance-ids  --query 'Reservations[*].Instances[*].MetadataOptions' --output table 4. Ensure for all ec2 instances HttpTokens is set to required and State is set to applied. 5. Repeat steps no. 3 and 4 to verify other EC2 instances provisioned within the current region. 6. Repeat steps no. 1 โ€“ 5 to perform the audit process for other AWS regions.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Instance metadata is data about your instance that you can use to configure or manage the running instance. Instance metadata is divided into categories, for example, host name, events, and security groups. When enabling the Metadata Service on AWS EC2 instances, users have the option of using either Instance Metadata Service Version 1 (IMDSv1; a request/response method) or Instance Metadata Service Version 2 (IMDSv2; a session-oriented method). With IMDSv2, every request is now protected by session authentication. A session begins and ends a series of requests that software running on an EC2 instance uses to access the locally-stored EC2 instance metadata and credentials. Allowing Version 1 of the service may open EC2 instances to Server-Side Request Forgery (SSRF) attacks, so Amazon recommends utilizing Version 2 for better instance security.","RemediationProcedure": "From Console: 1. Sign in to the AWS Management Console and navigate to the EC2 dashboard at https://console.aws.amazon.com/ec2/. 2. In the left navigation panel, under the INSTANCES section, choose Instances. 3. Select the EC2 instance that you want to examine. 4. Choose Actions > Instance Settings > Modify instance metadata options. 5. Ensure Instance metadata service is set to Enable and set IMDSv2 to Required. 6. Repeat steps no. 1 โ€“ 5 to perform the remediation process for other EC2 Instances in the all applicable AWS region(s). From Command Line: 1. Run the describe-instances command using appropriate filtering to list the IDs of all the existing EC2 instances currently available in the selected region: aws ec2 describe-instances --region  --output table -- query 'Reservations[*].Instances[*].InstanceId' 2. The command output should return a table with the requested instance IDs. 3. Now run the modify-instance-metadata-options command using an instance ID returned at the previous step to update the Instance Metadata Version: aws ec2 modify-instance-metadata-options --instance-id  --http-tokens required --region  4. Repeat steps no. 1 โ€“ 3 to perform the remediation process for other EC2 Instances in the same AWS region. 5. Change the region by updating --region and repeat the entire process for other regions.","AdditionalInformation": ""}],"description": "Ensure that EC2 Metadata Service only allows IMDSv2","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.10": {"name": "1.10","checks": {"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://tools.ietf.org/html/rfc6238:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#enable-mfa-for-privileged-users:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://blogs.aws.amazon.com/security/post/Tx2SJJYE082KBUK/How-to-Delegate-Management-of-Multi-Factor-Authentication-to-AWS-IAM-Users","Description": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if a MFA device is enabled for all IAM users having a console password:**From Console:**1. Open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left pane, select `Users`3. If the `MFA` or `Password age` columns are not visible in the table, click the gear icon at the upper right corner of the table and ensure a checkmark is next to both, then click `Close`. 4. Ensure that for each user where the `Password age` column shows a password age, the `MFA` column shows `Virtual`, `U2F Security Key`, or `Hardware`.**From Command Line:**1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their password and MFA status: ```aws iam generate-credential-report ``` ```aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,8``` 2. The output of this command will produce a table similar to the following: ```user,password_enabled,mfa_activeelise,false,falsebrandon,true,truerakesh,false,falsehelene,false,falseparas,true,trueanitha,false,false``` 3. For any column having `password_enabled` set to `true` , ensure `mfa_active` is also set to `true.`","ImpactStatement": "AWS will soon end support for SMS multi-factor authentication (MFA). New customers are not allowed to use this feature. We recommend that existing customers switch to one of the following alternative methods of MFA.","AssessmentStatus": "Automated","RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that displays a time-sensitive key and have knowledge of a credential.","RemediationProcedure": "Perform the following to enable MFA:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at 'https://console.aws.amazon.com/iam/' 2. In the left pane, select `Users`. 3. In the `User Name` list, choose the name of the intended MFA user. 4. Choose the `Security Credentials` tab, and then choose `Manage MFA Device`. 5. In the `Manage MFA Device wizard`, choose `Virtual MFA` device, and then choose `Continue`. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes.6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see Virtual MFA Applications at https://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications). If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.- In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application. When you are finished, the virtual MFA device starts generating one-time passwords.8. In the `Manage MFA Device wizard`, in the `MFA Code 1 box`, type the `one-time password` that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second `one-time password` into the `MFA Code 2 box`.9. Click `Assign MFA`.","AdditionalInformation": "**Forced IAM User Self-Service Remediation**Amazon has published a pattern that forces users to self-service setup MFA before they have access to their complete permissions set. Until they complete this step, they cannot access their full permissions. This pattern can be used on new AWS accounts. It can also be used on existing accounts - it is recommended users are given instructions and a grace period to accomplish MFA enrollment before active enforcement on existing AWS accounts."}],"description": "Ensure multi-factor authentication (MFA) is enabled for all IAM users that have a console password","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.11": {"name": "1.11","checks": {"iam_user_no_setup_initial_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/cli/latest/reference/iam/delete-access-key.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html","Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. Programmatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. AWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if access keys were created upon user creation and are being used and rotated as prescribed:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on a User where column `Password age` and `Access key age` is not set to `None` 5. Click on `Security credentials` Tab 6. Compare the user 'Creation time` to the Access Key `Created` date. 6. For any that match, the key was created during initial user setup.- Keys that were created at the same time as the user profile and do not have a last used date should be deleted. Refer to the remediation below.**From Command Line:**1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their access keys utilization: ```aws iam generate-credential-report ``` ```aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,9,11,14,16 ``` 2. The output of this command will produce a table similar to the following: ``` user,password_enabled,access_key_1_active,access_key_1_last_used_date,access_key_2_active,access_key_2_last_used_dateelise,false,true,2015-04-16T15:14:00+00:00,false,N/Abrandon,true,true,N/A,false,N/Arakesh,false,false,N/A,false,N/Ahelene,false,true,2015-11-18T17:47:00+00:00,false,N/Aparas,true,true,2016-08-28T12:04:00+00:00,true,2016-03-04T10:11:00+00:00anitha,true,true,2016-06-08T11:43:00+00:00,true,N/A``` 3. For any user having `password_enabled` set to `true` AND `access_key_last_used_date` set to `N/A` refer to the remediation below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Requiring the additional steps be taken by the user for programmatic access after their profile has been created will give a stronger indication of intent that access keys are [a] necessary for their work and [b] once the access key is established on an account that the keys may be in use somewhere in the organization.**Note**: Even if it is known the user will need access keys, require them to create the keys themselves or put in a support ticket to have them created as a separate step from user creation.","RemediationProcedure": "Perform the following to delete access keys that do not pass the audit:**From Console:**1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. As an Administrator - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used. 7. As an IAM User- Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used.**From Command Line:** ``` aws iam delete-access-key --access-key-id  --user-name  ```","AdditionalInformation": "Credential report does not appear to contain \"Key Creation Date\""}],"description": "Do not setup access keys during initial user setup for all IAM users that have a console password","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.12": {"name": "1.12","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#remove-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_admin-change-user.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html","Description": "AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in 45 or greater days be deactivated or removed.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if unused credentials exist:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM` 4. Click on `Users` 5. Click the `Settings` (gear) icon. 6. Select `Console last sign-in`, `Access key last used`, and `Access Key Id` 7. Click on `Close`8. Check and ensure that `Console last sign-in` is less than 45 days ago.**Note** - `Never` means the user has never logged in.9. Check and ensure that `Access key age` is less than 45 days and that `Access key last used` does not say `None`If the user hasn't signed into the Console in the last 45 days or Access keys are over 45 days old refer to the remediation.**From Command Line:****Download Credential Report:**1. Run the following commands: ```aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,5,6,9,10,11,14,15,16 | grep -v '^' ```**Ensure unused credentials do not exist:**2. For each user having `password_enabled` set to `TRUE` , ensure `password_last_used_date` is less than `45` days ago.- When `password_enabled` is set to `TRUE` and `password_last_used` is set to `No_Information` , ensure `password_last_changed` is less than 45 days ago.3. For each user having an `access_key_1_active` or `access_key_2_active` to `TRUE` , ensure the corresponding `access_key_n_last_used_date` is less than `45` days ago.- When a user having an `access_key_x_active` (where x is 1 or 2) to `TRUE` and corresponding access_key_x_last_used_date is set to `N/A', ensure `access_key_x_last_rotated` is less than 45 days ago.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Disabling or removing unnecessary credentials will reduce the window of opportunity for credentials associated with a compromised or abandoned account to be used.","RemediationProcedure": "**From Console:**Perform the following to manage Unused Password (IAM user console access)1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. Select user whose `Console last sign-in` is greater than 45 days 7. Click `Security credentials` 8. In section `Sign-in credentials`, `Console password` click `Manage`9. Under Console Access select `Disable` 10.Click `Apply`Perform the following to deactivate Access Keys:1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. Select any access keys that are over 45 days old and that have been used and - Click on `Make Inactive` 7. Select any access keys that are over 45 days old and that have not been used and - Click the X to `Delete`","AdditionalInformation": " is excluded in the audit since the root account should not be used for day to day business and would likely be unused for more than 45 days."}],"description": "Ensure credentials unused for 45 days or greater are disabled","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"1.13": {"name": "1.13","checks": {"iam_user_two_active_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html","Description": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)","DefaultValue": null,"AuditProcedure": "**From Console:**1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. Under `Access Keys` section, in the Status column, check the current status for each access key associated with the IAM user. If the selected IAM user has more than one access key activated then the users access configuration does not adhere to security best practices and the risk of accidental exposures increases. - Repeat steps no. 3 โ€“ 5 for each IAM user in your AWS account.**From Command Line:**1. Run `list-users` command to list all IAM users within your account: ``` aws iam list-users --query \"Users[*].UserName\" ``` The command output should return an array that contains all your IAM user names.2. Run `list-access-keys` command using the IAM user name list to return the current status of each access key associated with the selected IAM user: ``` aws iam list-access-keys --user-name  ``` The command output should expose the metadata `(\"Username\", \"AccessKeyId\", \"Status\", \"CreateDate\")` for each access key on that user account.3. Check the `Status` property value for each key returned to determine each keys current state. If the `Status` property value for more than one IAM access key is set to `Active`, the user access configuration does not adhere to this recommendation, refer to the remediation below.- Repeat steps no. 2 and 3 for each IAM user in your AWS account.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API. One of the best ways to protect your account is to not allow users to have multiple access keys.","RemediationProcedure": "**From Console:**1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. In `Access Keys` section, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working. 6. In the same `Access Keys` section, identify your non-operational access keys (other than the chosen one) and deactivate it by clicking the `Make Inactive` link. 7. If you receive the `Change Key Status` confirmation box, click `Deactivate` to switch off the selected key. 8. Repeat steps no. 3 โ€“ 7 for each IAM user in your AWS account.**From Command Line:**1. Using the IAM user and access key information provided in the `Audit CLI`, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working.2. Run the `update-access-key` command below using the IAM user name and the non-operational access key IDs to deactivate the unnecessary key(s). Refer to the Audit section to identify the unnecessary access key ID for the selected IAM user**Note** - the command does not return any output: ``` aws iam update-access-key --access-key-id  --status Inactive --user-name  ``` 3. To confirm that the selected access key pair has been successfully `deactivated` run the `list-access-keys` audit command again for that IAM User: ``` aws iam list-access-keys --user-name  ``` - The command output should expose the metadata for each access key associated with the IAM user. If the non-operational key pair(s) `Status` is set to `Inactive`, the key has been successfully deactivated and the IAM user access configuration adheres now to this recommendation.4. Repeat steps no. 1 โ€“ 3 for each IAM user in your AWS account.","AdditionalInformation": ""}],"description": "Ensure there is only one active access key available for any single IAM user","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.14": {"name": "1.14","checks": {"iam_rotate_access_key_90_days": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#rotate-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html","Description": "Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if access keys are rotated as prescribed:**From Console:**1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click `setting` icon 4. Select `Console last sign-in` 5. Click `Close` 6. Ensure that `Access key age` is less than 90 days ago. note) `None` in the `Access key age` means the user has not used the access key.**From Command Line:**``` aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d ``` The `access_key_1_last_rotated` field in this file notes The date and time, in ISO 8601 date-time format, when the user's access key was created or last changed. If the user does not have an active access key, the value in this field is N/A (not applicable).","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Rotating access keys will reduce the window of opportunity for an access key that is associated with a compromised or terminated account to be used.Access keys should be rotated to ensure that data cannot be accessed with an old key which might have been lost, cracked, or stolen.","RemediationProcedure": "Perform the following to rotate access keys:**From Console:**1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click on `Security Credentials`4. As an Administrator - Click on `Make Inactive` for keys that have not been rotated in `90` Days 5. As an IAM User- Click on `Make Inactive` or `Delete` for keys which have not been rotated or used in `90` Days 6. Click on `Create Access Key`7. Update programmatic call with new Access Key credentials**From Command Line:**1. While the first access key is still active, create a second access key, which is active by default. Run the following command: ``` aws iam create-access-key ```At this point, the user has two active access keys.2. Update all applications and tools to use the new access key. 3. Determine whether the first access key is still in use by using this command: ``` aws iam get-access-key-last-used ``` 4. One approach is to wait several days and then check the old access key for any use before proceeding.Even if step Step 3 indicates no use of the old key, it is recommended that you do not immediately delete the first access key. Instead, change the state of the first access key to Inactive using this command: ``` aws iam update-access-key ``` 5. Use only the new access key to confirm that your applications are working. Any applications and tools that still use the original access key will stop working at this point because they no longer have access to AWS resources. If you find such an application or tool, you can switch its state back to Active to reenable the first access key. Then return to step Step 2 and update this application to use the new key.6. After you wait some period of time to ensure that all applications and tools have been updated, you can delete the first access key with this command: ``` aws iam delete-access-key ```","AdditionalInformation": ""}],"description": "Ensure access keys are rotated every 90 days or less","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.15": {"name": "1.15","checks": {"iam_policy_attached_only_to_group_or_roles": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html","Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. Only the third implementation is recommended.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if an inline policy is set or a policy is directly attached to users:1. Run the following to get a list of IAM users: ```aws iam list-users --query 'Users[*].UserName' --output text``` 2. For each user returned, run the following command to determine if any policies are attached to them: ```aws iam list-attached-user-policies --user-name aws iam list-user-policies --user-name ``` 3. If any policies are returned, the user has an inline policy or direct policy attachment.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Assigning IAM policy only through groups unifies permissions management to a single, flexible layer consistent with organizational functional roles. By unifying permissions management, the likelihood of excessive permissions is reduced.","RemediationProcedure": "Perform the following to create an IAM group and assign a policy to it:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups` and then click `Create New Group` . 3. In the `Group Name` box, type the name of the group and then click `Next Step` . 4. In the list of policies, select the check box for each policy that you want to apply to all members of the group. Then click `Next Step` . 5. Click `Create Group` Perform the following to add a user to a given group:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups`3. Select the group to add a user to 4. Click `Add Users To Group`5. Select the users to be added to the group 6. Click `Add Users` Perform the following to remove a direct association between a user and policy:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left navigation pane, click on Users 3. For each user:- Select the user- Click on the `Permissions` tab- Expand `Permissions policies` - Click `X` for each policy; then click Detach or Remove (depending on policy type)","AdditionalInformation": ""}],"description": "Ensure IAM Users Receive Permissions Only Through Groups","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.16": {"name": "1.16","checks": {"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://docs.aws.amazon.com/cli/latest/reference/iam/index.html#cli-aws-iam","Description": "IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered a standard security advice to grant _least privilege_ -that is, granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform _only_ those tasks, instead of allowing full administrative privileges.","DefaultValue": null,"AuditProcedure": "Perform the following to determine what policies are created:**From Command Line:**1. Run the following to get a list of IAM policies: ```aws iam list-policies --only-attached --output text ``` 2. For each policy returned, run the following command to determine if any policies is allowing full administrative privileges on the account: ```aws iam get-policy-version --policy-arn  --version-id  ``` 3. In output ensure policy should not have any Statement block with `\"Effect\": \"Allow\"` and `Action` set to `\"*\"` and `Resource` set to `\"*\"`","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "It's more secure to start with a minimum set of permissions and grant additional permissions as necessary, rather than starting with permissions that are too lenient and then trying to tighten them later.Providing full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions.IAM policies that have a statement with \"Effect\": \"Allow\" with \"Action\": \"\\*\" over \"Resource\": \"\\*\" should be removed.","RemediationProcedure": "From Console: Perform the following to detach the policy that has full administrative privileges: 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/iam/. 2. In the navigation pane, click Policies and then search for the policy name found in the audit step. 3. Select the policy that needs to be deleted. 4. In the policy action menu, select first Detach 5. Select all Users, Groups, Roles that have this policy attached 6. Click Detach Policy 7. In the policy action menu, select Detach 8. Select the newly detached policy and select Delete From Command Line: Perform the following to detach the policy that has full administrative privileges as found in the audit step: 1. Lists all IAM users, groups, and roles that the specified managed policy is attached to. aws iam list-entities-for-policy --policy-arn  2. Detach the policy from all IAM Users: aws iam detach-user-policy --user-name  --policy-arn  3. Detach the policy from all IAM Groups: aws iam detach-group-policy --group-name  --policy-arn  4. Detach the policy from all IAM Roles: aws iam detach-role-policy --role-name  --policy-arn ","AdditionalInformation": ""}],"description": "Ensure IAM policies that allow full \"*:*\" administrative privileges are not attached","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"1.17": {"name": "1.17","checks": {"iam_support_role_created": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://aws.amazon.com/premiumsupport/pricing/:https://docs.aws.amazon.com/cli/latest/reference/iam/list-policies.html:https://docs.aws.amazon.com/cli/latest/reference/iam/attach-role-policy.html:https://docs.aws.amazon.com/cli/latest/reference/iam/list-entities-for-policy.html","Description": "AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.","DefaultValue": null,"AuditProcedure": "**From Command Line:**1. List IAM policies, filter for the 'AWSSupportAccess' managed policy, and note the \"Arn\" element value: ``` aws iam list-policies --query \"Policies[?PolicyName == 'AWSSupportAccess']\" ``` 2. Check if the 'AWSSupportAccess' policy is attached to any role:``` aws iam list-entities-for-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess ```3. In Output, Ensure `PolicyRoles` does not return empty. 'Example: Example: PolicyRoles: [ ]'If it returns empty refer to the remediation below.","ImpactStatement": "All AWS Support plans include an unlimited number of account and billing supportcases, with no long-term contracts. Support billing calculations are performed on a per-account basis for all plans. Enterprise Support plan customers have the option toinclude multiple enabled accounts in an aggregated monthly billing calculation. Monthlycharges for the Business and Enterprise support plans are based on each month's AWSusage charges, subject to a monthly minimum, billed in advance.When assigning rights, keep in mind that other policies may grant access to Support aswell. This may include AdministratorAccess and other policies including customermanaged policies. Utilizing the AWS managed 'AWSSupportAccess' role is one simpleway of ensuring that this permission is properly granted.To better support the principle of separation of duties, it would be best to only attach thisrole where necessary.","AssessmentStatus": "Automated","RationaleStatement": "By implementing least privilege for access control, an IAM Role will require an appropriate IAM Policy to allow Support Center Access in order to manage Incidents with AWS Support.","RemediationProcedure": "**From Command Line:**1. Create an IAM role for managing incidents with AWS:- Create a trust relationship policy document that allows  to manage AWS incidents, and save it locally as /tmp/TrustPolicy.json: ```{\"Version\": \"2012-10-17\",\"Statement\": [{\"Effect\": \"Allow\",\"Principal\": {\"AWS\": \"\"},\"Action\": \"sts:AssumeRole\"}]} ``` 2. Create the IAM role using the above trust policy: ``` aws iam create-role --role-name  --assume-role-policy-document file:///tmp/TrustPolicy.json ``` 3. Attach 'AWSSupportAccess' managed policy to the created IAM role: ``` aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess --role-name  ```","AdditionalInformation": "AWSSupportAccess policy is a global AWS resource. It has same ARN as `arn:aws:iam::aws:policy/AWSSupportAccess` for every account."}],"description": "Ensure a support role has been created to manage incidents with AWS Support","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.18": {"name": "1.18","checks": {"ec2_instance_profile_attached": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html","Description": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. \"AWS Access\" means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.","DefaultValue": null,"AuditProcedure": "Where an instance is associated with a Role:For instances that are known to perform AWS actions, ensure that they belong to an instance role that has the necessary permissions:1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Open the EC2 Dashboard and choose \"Instances\" 3. Click the EC2 instance that performs AWS actions, in the lower pane details find \"IAM Role\" 4. If the Role is blank, the instance is not assigned to one. 5. If the Role is filled in, it does not mean the instance might not \\*also\\* have credentials encoded on it for some activities.Where an Instance Contains Embedded Credentials:- On the instance that is known to perform AWS actions, audit all scripts and environment variables to ensure that none of them contain AWS credentials.Where an Instance Application Contains Embedded Credentials:- Applications that run on an instance may also have credentials embedded. This is a bad practice, but even worse if the source code is stored in a public code repository such as github. When an application contains credentials can be determined by eliminating all other sources of credentials and if the application can still access AWS resources - it likely contains embedded credentials. Another method is to examine all source code and configuration files of the application.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "AWS IAM roles reduce the risks associated with sharing and rotating credentials that can be used outside of AWS itself. If credentials are compromised, they can be used from outside of the AWS account they give access to. In contrast, in order to leverage role permissions an attacker would need to gain and maintain access to a specific instance to use the privileges associated with it.Additionally, if credentials are encoded into compiled applications or other hard to change mechanisms, then they are even more unlikely to be properly rotated due to service disruption risks. As time goes on, credentials that cannot be rotated are more likely to be known by an increasing number of individuals who no longer work for the organization owning the credentials.","RemediationProcedure": "IAM roles can only be associated at the launch of an instance. To remediate an instance to add it to a role you must create a new instance.If the instance has no external dependencies on its current private ip or public addresses are elastic IPs:1. In AWS IAM create a new role. Assign a permissions policy if needed permissions are already known. 2. In the AWS console launch a new instance with identical settings to the existing instance, and ensure that the newly created role is selected. 3. Shutdown both the existing instance and the new instance. 4. Detach disks from both instances. 5. Attach the existing instance disks to the new instance. 6. Boot the new instance and you should have the same machine, but with the associated role.**Note:** if your environment has dependencies on a dynamically assigned PRIVATE IP address you can create an AMI from the existing instance, destroy the old one and then when launching from the AMI, manually assign the previous private IP address.**Note: **if your environment has dependencies on a dynamically assigned PUBLIC IP address there is not a way ensure the address is retained and assign an instance role. Dependencies on dynamically assigned public IP addresses are a bad practice and, if possible, you may wish to rebuild the instance with a new elastic IP address and make the investment to remediate affected systems while assigning the system to a role.","AdditionalInformation": ""}],"description": "Ensure IAM instance roles are used for AWS resource access from instances","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.19": {"name": "1.19","checks": {"iam_no_expired_server_certificates_stored": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html:https://docs.aws.amazon.com/cli/latest/reference/iam/delete-server-certificate.html","Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates.Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.","DefaultValue": null,"AuditProcedure": "**From Console:**Getting the certificates expiration information via AWS Management Console is not currently supported.To request information about the SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).**From Command Line:**Run list-server-certificates command to list all the IAM-stored server certificates:``` aws iam list-server-certificates ```The command output should return an array that contains all the SSL/TLS certificates currently stored in IAM and their metadata (name, ID, expiration date, etc):``` {\"ServerCertificateMetadataList\": [{\"ServerCertificateId\": \"EHDGFRW7EJFYTE88D\",\"ServerCertificateName\": \"MyServerCertificate\",\"Expiration\": \"2018-07-10T23:59:59Z\",\"Path\": \"/\",\"Arn\": \"arn:aws:iam::012345678910:server-certificate/MySSLCertificate\",\"UploadDate\": \"2018-06-10T11:56:08Z\"}] } ```Verify the `ServerCertificateName` and `Expiration` parameter value (expiration date) for each SSL/TLS certificate returned by the list-server-certificates command and determine if there are any expired server certificates currently stored in AWS IAM. If so, use the AWS API to remove them.If this command returns: ``` { { \"ServerCertificateMetadataList\": [] } ``` This means that there are no expired certificates, It DOES NOT mean that no certificates exist.","ImpactStatement": "Deleting the certificate could have implications for your application if you are using an expired server certificate with Elastic Load Balancing, CloudFront, etc. One has to make configurations at respective services to ensure there is no interruption in application functionality.","AssessmentStatus": "Automated","RationaleStatement": "Removing expired SSL/TLS certificates eliminates the risk that an invalid certificate will be deployed accidentally to a resource such as AWS Elastic Load Balancer (ELB), which can damage the credibility of the application/website behind the ELB. As a best practice, it is recommended to delete expired certificates.","RemediationProcedure": "**From Console:**Removing expired certificates via AWS Management Console is not currently supported. To delete SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).**From Command Line:**To delete Expired Certificate run following command by replacing  with the name of the certificate to delete:``` aws iam delete-server-certificate --server-certificate-name  ```When the preceding command is successful, it does not return any output.","AdditionalInformation": ""}],"description": "Ensure that all the expired SSL/TLS certificates stored in AWS IAM are removed","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.20": {"name": "1.20","checks": {"accessanalyzer_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/get-analyzer.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/create-analyzer.html","Description": "Enable IAM Access analyzer for IAM policies about all resources in each region.IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access. Access Analyzer analyzes only policies that are applied to resources in the same AWS Region.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Open the IAM console at `https://console.aws.amazon.com/iam/` 2. Choose `Access analyzer` 3. Click 'Analyzers' 4. Ensure that at least one analyzer is present 5. Ensure that the `STATUS` is set to `Active` 6. Repeat these step for each active region**From Command Line:**1. Run the following command: ``` aws accessanalyzer list-analyzers | grep status ``` 2. Ensure that at least one Analyzer the `status` is set to `ACTIVE`3. Repeat the steps above for each active region.If an Access analyzer is not listed for each region or the status is not set to active refer to the remediation procedure below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data. Access Analyzer identifies resources that are shared with external principals by using logic-based reasoning to analyze the resource-based policies in your AWS environment. IAM Access Analyzer continuously monitors all policies for S3 bucket, IAM roles, KMS(Key Management Service) keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues.","RemediationProcedure": "**From Console:**Perform the following to enable IAM Access analyzer for IAM policies:1. Open the IAM console at `https://console.aws.amazon.com/iam/.` 2. Choose `Access analyzer`. 3. Choose `Create analyzer`. 4. On the `Create analyzer` page, confirm that the `Region` displayed is the Region where you want to enable Access Analyzer. 5. Enter a name for the analyzer. `Optional as it will generate a name for you automatically`. 6. Add any tags that you want to apply to the analyzer. `Optional`.7. Choose `Create Analyzer`. 8. Repeat these step for each active region**From Command Line:**Run the following command: ``` aws accessanalyzer create-analyzer --analyzer-name  --type  ``` Repeat this command above for each active region.**Note:** The IAM Access Analyzer is successfully configured only when the account you use has the necessary permissions.","AdditionalInformation": ""}],"description": "Ensure that IAM Access analyzer is enabled for all regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.21": {"name": "1.21","checks": {"iam_check_saml_providers_sts": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "","Description": "In multi-account environments, IAM user centralization facilitates greater user control. User access beyond the initial account is then provided via role assumption. Centralization of users can be accomplished through federation with an external identity provider or through the use of AWS Organizations.","DefaultValue": null,"AuditProcedure": "For multi-account AWS environments with an external identity provider: 1. Determine the master account for identity federation or IAM user management 2. Login to that account through the AWS Management Console 3. Click Services 4. Click IAM 5. Click Identity providers 6. Verify the configuration Then, determine all accounts that should not have local users present. For each account: 1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click Services 5. Click IAM 6. Click Users 7. Confirm that no IAM users representing individuals are present For multi-account AWS environments implementing AWS Organizations without an external identity provider: 1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click Services 5. Click IAM 6. Click Users 7. Confirm that no IAM users representing individuals are present","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Centralizing IAM user management to a single identity store reduces complexity and thus the likelihood of access management errors.","RemediationProcedure": "The remediation procedure will vary based on the individual organization's implementation of identity federation and/or AWS Organizations with the acceptance criteria that no non-service IAM users, and non-root accounts, are present outside the account providing centralized IAM user management.","AdditionalInformation": ""}],"description": "Ensure IAM users are managed centrally via identity federation or AWS Organizations for multi-account environments","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.22": {"name": "1.22","checks": {},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html","Description": "AWS CloudShell is a convenient way of running CLI commands against AWS services; a managed IAM policy ('AWSCloudShellFullAccess') provides full access to CloudShell, which allows file upload and download capability between a user's local system and the CloudShell environment. Within the CloudShell environment a user has sudo permissions, and can access the internet. So it is feasible to install file transfer software (for example) and move data from CloudShell to external internet servers.","DefaultValue": null,"AuditProcedure": "**From Console** 1. Open the IAM console at https://console.aws.amazon.com/iam/2. In the left pane, select Policies3. Search for and select AWSCloudShellFullAccess4. On the Entities attached tab, ensure that there are no entities using this policy **From Command Line**1. List IAM policies, filter for the 'AWSCloudShellFullAccess' managed policy, and note the \"\"Arn\"\" element value:```aws iam list-policies --query \"\"Policies[?PolicyName == 'AWSCloudShellFullAccess']\"\"``` 2. Check if the 'AWSCloudShellFullAccess' policy is attached to any role: ```aws iam list-entities-for-policy --policy-arn arn:aws:iam::aws:policy/AWSCloudShellFullAccess```3. In Output, Ensure PolicyRoles returns empty. 'Example: Example: PolicyRoles: [ ]'If it does not return empty refer to the remediation below.Note: Keep in mind that other policies may grant access.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Access to this policy should be restricted as it presents a potential channel for data exfiltration by malicious cloud admins that are given full permissions to the service. AWS documentation describes how to create a more restrictive IAM policy which denies file transfer permissions.","RemediationProcedure": "**From Console**1. Open the IAM console at https://console.aws.amazon.com/iam/2. In the left pane, select Policies 3. Search for and select AWSCloudShellFullAccess4. On the Entities attached tab, for each item, check the box and select Detach","AdditionalInformation": ""}],"description": "Ensure access to AWSCloudShellFullAccess is restricted","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"4.10": {"name": "4.10","checks": {"cloudwatch_log_metric_filter_security_group_changes": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Security Groups are a stateful packet filter that controls ingress and egress traffic within a VPC. It is recommended that a metric filter and alarm be established for detecting changes to Security Groups.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName== '']\" ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to security group will help ensure that resources and services are not unintentionally exposed.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for security groups changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"\" --filter-name \"\" --metric-transformations metricName= \"\" ,metricNamespace=\"CISBenchmark\",metricValue=1 --filter-pattern \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name \"\" ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn \"\" --protocol  --notification-endpoint \"\" ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"\" --metric-name \"\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions \"\" ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for security group changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.11": {"name": "4.11","checks": {"cloudwatch_changes_to_network_acls_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets within a VPC. It is recommended that a metric filter and alarm be established for changes made to NACLs.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to NACLs will help ensure that AWS resources and services are not unintentionally exposed.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for NACL changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for changes to Network Access Control Lists (NACL)","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.12": {"name": "4.12","checks": {"cloudwatch_changes_to_network_gateways_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Network gateways are required to send/receive traffic to a destination outside of a VPC. It is recommended that a metric filter and alarm be established for changes to network gateways.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to network gateways will help ensure that all ingress/egress traffic traverses the VPC border via a controlled path.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for network gateways changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for changes to network gateways","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.13": {"name": "4.13","checks": {"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Routing tables are used to route network traffic between subnets and to network gateways. It is recommended that a metric filter and alarm be established for changes to route tables.","DefaultValue": null,"AuditProcedure": "If you are using CloudTrails and CloudWatch , perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarmsconfigured:1. Identify the log group name configured for use with active multi-region CloudTrail:โ€ข List all CloudTrails: aws cloudtrail describe-trailsโ€ข Identify Multi region Cloudtrails: Trails with 'IsMultiRegionTrail' set totrueโ€ข From value associated with CloudWatchLogsLogGroupArn noteExample: for CloudWatchLogsLogGroupArn that looks likearn:aws:logs:::log-group:NewGroup:*, would be NewGroupโ€ข Ensure Identified Multi region CloudTrail is activeaws cloudtrail get-trail-status --name ensure IsLogging is set to TRUEโ€ข Ensure identified Multi-region Cloudtrail captures all Management Eventsaws cloudtrail get-event-selectors --trail-name Ensure there is at least one Event Selector for a Trail with IncludeManagementEvents setto true and ReadWriteType set to All2. Get a list of all associated metric filters for this :aws logs describe-metric-filters --log-group-name''3. Ensure the output from the above command contains the following:'filterPattern': '{($.eventSource = ec2.amazonaws.com) && ($.eventName =CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName =ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName= DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName =DisassociateRouteTable) }'4. Note the  value associated with thefilterPattern found in step 3.5. Get a list of CloudWatch alarms and filter on the captured in step 4.aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName==``]'6. Note the AlarmActions value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topicaws sns list-subscriptions-by-topic --topic-arn at least one subscription should have 'SubscriptionArn' with valid aws ARN.Example of valid 'SubscriptionArn':'arn:aws:sns::::'","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "CloudWatch is an AWS native service that allows you to observe and monitor resources and applications. CloudTrail Logs can also be sent to an external Security informationand event management (SIEM) environment for monitoring and alerting.Monitoring changes to route tables will help ensure that all VPC traffic flows through anexpected path and prevent any accidental or intentional modifications that may lead touncontrolled network traffic. An alarm should be triggered every time an AWS API call isperformed to create, replace, delete, or disassociate a Route Table.","RemediationProcedure": "If you are using CloudTrails and CloudWatch, perform the following to setup the metric filter, alarm, SNS topic, and subscription: 1. Create a metric filter based on filter pattern provided which checks for route table changes and the  taken from audit step 1. aws logs put-metric-filter --log-group-name  -- filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }' Note: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together. 2. Create an SNS topic that the alarm will notify aws sns create-topic --name  Note: you can execute this command once and then re-use the same topic for all monitoring alarms. 3. Create an SNS subscription to the topic created in step 2 aws sns subscribe --topic-arn  --protocol  - -notification-endpoint  Note: you can execute this command once and then re-use the SNS subscription for all monitoring alarms. 4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 - -threshold 1 --comparison-operator GreaterThanOrEqualToThreshold -- evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions ","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure route table changes are monitored","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.14": {"name": "4.14","checks": {"cloudwatch_changes_to_vpcs_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is possible to have more than 1 VPC within an account, in addition it is also possible to create a peer connection between 2 VPCs enabling network traffic to route between VPCs. It is recommended that a metric filter and alarm be established for changes made to VPCs.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to VPC will help ensure VPC traffic flow is not getting impacted.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for VPC changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for VPC changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.15": {"name": "4.15","checks": {"cloudwatch_log_metric_filter_aws_organizations_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_security_incident-response.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for AWS Organizations changes made in the master AWS Account.","DefaultValue": null,"AuditProcedure": "1. Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: - Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails:``` aws cloudtrail describe-trails ``` - Identify Multi region Cloudtrails, Trails with `\"IsMultiRegionTrail\"` set to true - From value associated with CloudWatchLogsLogGroupArn note **Example:** for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:::log-group:NewGroup:*,  would be NewGroup- Ensure Identified Multi region CloudTrail is active: ``` aws cloudtrail get-trail-status --name  ``` Ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events: ``` aws cloudtrail get-event-selectors --trail-name  ``` - Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to true and `ReadWriteType` set to `All`.2. Get a list of all associated metric filters for this : ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }\" ``` 4. Note the `` value associated with the filterPattern found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4: ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the AlarmActions value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic: ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. Example of valid \"SubscriptionArn\":``` \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring AWS Organizations changes can help you prevent any unwanted, accidental or intentional modifications that may lead to unauthorized access or other security breaches. This monitoring technique helps you to ensure that any unexpected changes performed within your AWS Organizations can be investigated and any unwanted changes can be rolled back.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Organizations changes and the `` taken from audit step 1: ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }' ``` **Note:** You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify: ``` aws sns create-topic --name  ``` **Note:** you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2: ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note:** you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2: ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": ""}],"description": "Ensure a log metric filter and alarm exists for AWS Organizations changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.16": {"name": "4.16","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-get-started.html:https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-enable.html#securityhub-enable-api:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/securityhub/enable-security-hub.html","Description": "Security Hub collects security data from across AWS accounts, services, and supported third-party partner products and helps you analyze your security trends and identify the highest priority security issues. When you enable Security Hub, it begins to consume, aggregate, organize, and prioritize findings from AWS services that you have enabled, such as Amazon GuardDuty, Amazon Inspector, and Amazon Macie. You can also enable integrations with AWS partner security products.","DefaultValue": null,"AuditProcedure": "The process to evaluate AWS Security Hub configuration per region **From Console:**1. Sign in to the AWS Management Console and open the AWS Security Hub console at https://console.aws.amazon.com/securityhub/. 2. On the top right of the console, select the target Region. 3. If presented with the Security Hub > Summary page then Security Hub is set-up for the selected region. 4. If presented with Setup Security Hub or Get Started With Security Hub - follow the online instructions. 5. Repeat steps 2 to 4 for each region.","ImpactStatement": "It is recommended AWS Security Hub be enabled in all regions. AWS Security Hub requires AWS Config to be enabled.","AssessmentStatus": "Automated","RationaleStatement": "AWS Security Hub provides you with a comprehensive view of your security state in AWS and helps you check your environment against security industry standards and best practices - enabling you to quickly assess the security posture across your AWS accounts.","RemediationProcedure": "To grant the permissions required to enable Security Hub, attach the Security Hub managed policy AWSSecurityHubFullAccess to an IAM user, group, or role.Enabling Security Hub**From Console:**1. Use the credentials of the IAM identity to sign in to the Security Hub console. 2. When you open the Security Hub console for the first time, choose Enable AWS Security Hub. 3. On the welcome page, Security standards list the security standards that Security Hub supports. 4. Choose Enable Security Hub.**From Command Line:**1. Run the enable-security-hub command. To enable the default standards, include `--enable-default-standards`. ``` aws securityhub enable-security-hub --enable-default-standards ```2. To enable the security hub without the default standards, include `--no-enable-default-standards`. ``` aws securityhub enable-security-hub --no-enable-default-standards ```","AdditionalInformation": ""}],"description": "Ensure AWS Security Hub is enabled","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.1.1": {"name": "2.1.1","checks": {"s3_bucket_secure_transport_policy": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "2.1. Simple Storage Service (S3)","References": "https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-policy-for-config-rule/:https://aws.amazon.com/blogs/security/how-to-use-bucket-policies-and-apply-defense-in-depth-to-help-secure-your-amazon-s3-data/:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/get-bucket-policy.html","Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.","DefaultValue": null,"AuditProcedure": "To allow access to HTTPS you can use a condition that checks for the key `\"aws:SecureTransport: true\"`. This means that the request is sent through HTTPS but that HTTP can still be used. So to make sure you do not allow HTTP access confirm that there is a bucket policy that explicitly denies access for HTTP requests and that it contains the key \"aws:SecureTransport\": \"false\".**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions', then Click on `Bucket Policy`. 4. Ensure that a policy is listed that matches: ``` '{\"Sid\": ,\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}' ``` `` and `` will be specific to your account5. Repeat for all the buckets in your AWS account.**From Command Line:**1. List all of the S3 Buckets``` aws s3 ls ``` 2. Using the list of buckets run this command on each of them: ``` aws s3api get-bucket-policy --bucket  | grep aws:SecureTransport ``` 3. Confirm that `aws:SecureTransport` is set to false `aws:SecureTransport:false` 4. Confirm that the policy line has Effect set to Deny 'Effect:Deny'","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "By default, Amazon S3 allows both HTTP and HTTPS requests. To achieve only allowing access to Amazon S3 objects through HTTPS you also have to explicitly deny access to HTTP requests. Bucket policies that allow HTTPS requests without explicitly denying HTTP requests will not comply with this recommendation.","RemediationProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions'. 4. Click 'Bucket Policy' 5. Add this to the existing policy filling in the required information ``` {\"Sid\": \",\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}}} ``` 6. Save 7. Repeat for all the buckets in your AWS account that contain sensitive data.**From Console** using AWS Policy Generator:1. Repeat steps 1-4 above. 2. Click on `Policy Generator` at the bottom of the Bucket Policy Editor 3. Select Policy Type `S3 Bucket Policy` 4. Add Statements - `Effect` = Deny - `Principal` = * - `AWS Service` = Amazon S3 - `Actions` = * - `Amazon Resource Name` =  5. Generate Policy 6. Copy the text and add it to the Bucket Policy.**From Command Line:**1. Export the bucket policy to a json file. ``` aws s3api get-bucket-policy --bucket  --query Policy --output text > policy.json ```2. Modify the policy.json file by adding in this statement: ``` {\"Sid\": \",\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}}} ``` 3. Apply this modified policy back to the S3 bucket: ``` aws s3api put-bucket-policy --bucket  --policy file://policy.json ```","AdditionalInformation": ""}],"description": "Ensure S3 Bucket Policy is set to deny HTTP requests","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"2.1.2": {"name": "2.1.2","checks": {"s3_bucket_no_mfa_delete": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "2.1. Simple Storage Service (S3)","References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete:https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html:https://aws.amazon.com/blogs/security/securing-access-to-aws-using-mfa-part-3/:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_lost-or-broken.html","Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.","DefaultValue": null,"AuditProcedure": "Perform the steps below to confirm MFA delete is configured on an S3 Bucket**From Console:**1. Login to the S3 console at `https://console.aws.amazon.com/s3/`2. Click the `Check` box next to the Bucket name you want to confirm3. In the window under `Properties`4. Confirm that Versioning is `Enabled`5. Confirm that MFA Delete is `Enabled`**From Command Line:**1. Run the `get-bucket-versioning` ``` aws s3api get-bucket-versioning --bucket my-bucket ```Output example: ```  EnabledEnabled ```If the Console or the CLI output does not show Versioning and MFA Delete `enabled` refer to the remediation below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Adding MFA delete to an S3 bucket, requires additional authentication when you change the version state of your bucket or you delete and object version adding another layer of security in the event your security credentials are compromised or unauthorized access is granted.","RemediationProcedure": "Perform the steps below to enable MFA delete on an S3 bucket.Note: -You cannot enable MFA Delete using the AWS Management Console. You must use the AWS CLI or API. -You must use your 'root' account to enable MFA Delete on S3 buckets.**From Command line:**1. Run the s3api put-bucket-versioning command``` aws s3api put-bucket-versioning --profile my-root-profile --bucket Bucket_Name --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa โ€œarn:aws:iam::aws_account_id:mfa/root-account-mfa-device passcodeโ€ ```","AdditionalInformation": ""}],"description": "Ensure MFA Delete is enabled on S3 buckets","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"2.1.3": {"name": "2.1.3","checks": {"macie_is_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "2.1. Simple Storage Service (S3)","References": "https://aws.amazon.com/macie/getting-started/:https://docs.aws.amazon.com/workspaces/latest/adminguide/data-protection.html:https://docs.aws.amazon.com/macie/latest/user/data-classification.html","Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.","DefaultValue": null,"AuditProcedure": "Perform the following steps to determine if Macie is running:**From Console:** 1. Login to the Macie console at https://console.aws.amazon.com/macie/ 2. In the left hand pane click on By job under findings. 3. Confirm that you have a Job setup for your S3 BucketsWhen you log into the Macie console if you aren't taken to the summary page and you don't have a job setup and running then refer to the remediation procedure below.If you are using a 3rd Party tool to manage and protect your s3 data you meet this recommendation.","ImpactStatement": "There is a cost associated with using Amazon Macie. There is also typically a cost associated with 3rd Party tools that perform similar processes and protection.","AssessmentStatus": "Manual","RationaleStatement": "Using a Cloud service or 3rd Party software to continuously monitor and automate the process of data discovery and classification for S3 buckets using machine learning and pattern matching is a strong defense in protecting that information.Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS.","RemediationProcedure": "Perform the steps below to enable and configure Amazon Macie**From Console:**1. Log on to the Macie console at `https://console.aws.amazon.com/macie/`2. Click `Get started`.3. Click `Enable Macie`.Setup a repository for sensitive data discovery results1. In the Left pane, under Settings, click `Discovery results`.2. Make sure `Create bucket` is selected.3. Create a bucket, enter a name for the bucket. The name must be unique across all S3 buckets. In addition, the name must start with a lowercase letter or a number.4. Click on `Advanced`.5. Block all public access, make sure `Yes` is selected.6. KMS encryption, specify the AWS KMS key that you want to use to encrypt the results. The key must be a symmetric, customer master key (CMK) that's in the same Region as the S3 bucket.7. Click on `Save`Create a job to discover sensitive data1. In the left pane, click `S3 buckets`. Macie displays a list of all the S3 buckets for your account.2. Select the `check box` for each bucket that you want Macie to analyze as part of the job3. Click `Create job`.3. Click `Quick create`.4. For the Name and description step, enter a name and, optionally, a description of the job.5. Then click `Next`.6. For the Review and create step, click `Submit`.Review your findings1. In the left pane, click `Findings`.2. To view the details of a specific finding, choose any field other than the check box for the finding.If you are using a 3rd Party tool to manage and protect your s3 data, follow the Vendor documentation for implementing and configuring that tool.","AdditionalInformation": ""}],"description": "Ensure all data in Amazon S3 has been discovered, classified and secured when required.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.1.4": {"name": "2.1.4","checks": {"s3_bucket_level_public_access_block": "PASS","s3_account_level_public_access_blocks": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.1. Simple Storage Service (S3)","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/block-public-access-account.html","Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.","DefaultValue": null,"AuditProcedure": "**If utilizing Block Public Access (bucket settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Ensure that block public access settings are set appropriately for this bucket 5. Repeat for all the buckets in your AWS account.**From Command Line:**1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Find the public access setting on that bucket ``` aws s3api get-public-access-block --bucket  ``` Output if Block Public access is enabled:``` {\"PublicAccessBlockConfiguration\": {\"BlockPublicAcls\": true,\"IgnorePublicAcls\": true,\"BlockPublicPolicy\": true,\"RestrictPublicBuckets\": true} } ```If the output reads `false` for the separate configuration settings then proceed to the remediation.**If utilizing Block Public Access (account settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Choose `Block public access (account settings)` 3. Ensure that block public access settings are set appropriately for your AWS account.**From Command Line:**To check Public access settings for this account status, run the following command, `aws s3control get-public-access-block --account-id  --region `Output if Block Public access is enabled:``` {\"PublicAccessBlockConfiguration\": {\"IgnorePublicAcls\": true, \"BlockPublicPolicy\": true, \"BlockPublicAcls\": true, \"RestrictPublicBuckets\": true} } ```If the output reads `false` for the separate configuration settings then proceed to the remediation.","ImpactStatement": "When you apply Block Public Access settings to an account, the settings apply to all AWS Regions globally. The settings might not take effect in all Regions immediately or simultaneously, but they eventually propagate to all Regions.","AssessmentStatus": "Automated","RationaleStatement": "Amazon S3 `Block public access (bucket settings)` prevents the accidental or malicious public exposure of data contained within the respective bucket(s). Amazon S3 `Block public access (account settings)` prevents the accidental or malicious public exposure of data contained within all buckets of the respective AWS account.Whether blocking public access to all or some buckets is an organizational decision that should be based on data sensitivity, least privilege, and use case.","RemediationProcedure": "**If utilizing Block Public Access (bucket settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Click 'Block all public access' 5. Repeat for all the buckets in your AWS account that contain sensitive data.**From Command Line:**1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Set the Block Public Access to true on that bucket ``` aws s3api put-public-access-block --bucket  --public-access-block-configuration \"BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true\" ```**If utilizing Block Public Access (account settings)****From Console:**If the output reads `true` for the separate configuration settings then it is set on the account.1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Choose `Block Public Access (account settings)` 3. Choose `Edit` to change the block public access settings for all the buckets in your AWS account 4. Choose the settings you want to change, and then choose `Save`. For details about each setting, pause on the `i` icons. 5. When you're asked for confirmation, enter `confirm`. Then Click `Confirm` to save your changes.**From Command Line:**To set Block Public access settings for this account, run the following command: ``` aws s3control put-public-access-block --public-access-block-configuration BlockPublicAcls=true, IgnorePublicAcls=true, BlockPublicPolicy=true, RestrictPublicBuckets=true --account-id  ```","AdditionalInformation": ""}],"description": "Ensure that S3 Buckets are configured with 'Block public access (bucket settings)'","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"2.2.1": {"name": "2.2.1","checks": {"ec2_ebs_volume_encryption": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.2. Elastic Compute Cloud (EC2)","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html:https://aws.amazon.com/blogs/aws/new-opt-in-to-default-encryption-for-new-ebs-volumes/","Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/2. Under `Account attributes`, click `EBS encryption`. 3. Verify `Always encrypt new EBS volumes` displays `Enabled`. 4. Review every region in-use.**Note:** EBS volume encryption is configured per region.**From Command Line:**1. Run``` aws --region  ec2 get-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Review every region in-use.**Note:** EBS volume encryption is configured per region.","ImpactStatement": "Losing access or removing the KMS key in use by the EBS volumes will result in no longer being able to access the volumes.","AssessmentStatus": "Automated","RationaleStatement": "Encrypting data at rest reduces the likelihood that it is unintentionally exposed and can nullify the impact of disclosure if the encryption remains unbroken.","RemediationProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/2. Under `Account attributes`, click `EBS encryption`. 3. Click `Manage`. 4. Click the `Enable` checkbox. 5. Click `Update EBS encryption` 6. Repeat for every region requiring the change.**Note:** EBS volume encryption is configured per region.**From Command Line:**1. Run``` aws --region  ec2 enable-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Repeat every region requiring the change.**Note:** EBS volume encryption is configured per region.","AdditionalInformation": "Default EBS volume encryption only applies to newly created EBS volumes. Existing EBS volumes are **not** converted automatically."}],"description": "Ensure EBS Volume Encryption is Enabled in all Regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.3.1": {"name": "2.3.1","checks": {"rds_instance_storage_encrypted": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "2.3. Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html:https://aws.amazon.com/blogs/database/selecting-the-right-encryption-options-for-amazon-rds-and-amazon-aurora-database-engines/#:~:text=With%20RDS%2Dencrypted%20resources%2C%20data,transparent%20to%20your%20database%20engine.:https://aws.amazon.com/rds/features/security/","Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/ 2. In the navigation pane, under RDS dashboard, click `Databases`. 3. Select the RDS Instance that you want to examine 4. Click `Instance Name` to see details, then click on `Configuration` tab. 5. Under Configuration Details section, In Storage pane search for the `Encryption Enabled` Status. 6. If the current status is set to `Disabled`, Encryption is not enabled for the selected RDS Instance database instance. 7. Repeat steps 3 to 7 to verify encryption status of other RDS Instance in same region. 8. Change region from the top of the navigation bar and repeat audit for other regions.**From Command Line:**1. Run `describe-db-instances` command to list all RDS Instance database names, available in the selected AWS region, Output will return each Instance database identifier-name.``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run again `describe-db-instances` command using the RDS Instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True` Or `False`. ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].StorageEncrypted' ``` 3. If the StorageEncrypted parameter value is `False`, Encryption is not enabled for the selected RDS database instance. 4. Repeat steps 1 to 3 for auditing each RDS Instance and change Region to verify for other regions","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Databases are likely to hold sensitive and critical data, it is highly recommended to implement encryption in order to protect your data from unauthorized access or disclosure. With RDS encryption enabled, the data stored on the instance's underlying storage, the automated backups, read replicas, and snapshots, are all encrypted.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases` 3. Select the Database instance that needs to be encrypted. 4. Click on `Actions` button placed at the top right and select `Take Snapshot`. 5. On the Take Snapshot page, enter a database name of which you want to take a snapshot in the `Snapshot Name` field and click on `Take Snapshot`. 6. Select the newly created snapshot and click on the `Action` button placed at the top right and select `Copy snapshot` from the Action menu. 7. On the Make Copy of DB Snapshot page, perform the following:- In the New DB Snapshot Identifier field, Enter a name for the `new snapshot`. - Check `Copy Tags`, New snapshot must have the same tags as the source snapshot. - Select `Yes` from the `Enable Encryption` dropdown list to enable encryption, You can choose to use the AWS default encryption key or custom key from Master Key dropdown list.8. Click `Copy Snapshot` to create an encrypted copy of the selected instance snapshot. 9. Select the new Snapshot Encrypted Copy and click on the `Action` button placed at the top right and select `Restore Snapshot` button from the Action menu, This will restore the encrypted snapshot to a new database instance. 10. On the Restore DB Instance page, enter a unique name for the new database instance in the DB Instance Identifier field. 11. Review the instance configuration details and click `Restore DB Instance`. 12. As the new instance provisioning process is completed can update application configuration to refer to the endpoint of the new Encrypted database instance Once the database endpoint is changed at the application level, can remove the unencrypted instance.**From Command Line:**1. Run `describe-db-instances` command to list all RDS database names available in the selected AWS region, The command output should return the database instance identifier. ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run `create-db-snapshot` command to create a snapshot for the selected database instance, The command output will return the `new snapshot` with name DB Snapshot Name. ``` aws rds create-db-snapshot --region  --db-snapshot-identifier  --db-instance-identifier  ``` 3. Now run `list-aliases` command to list the KMS keys aliases available in a specified region, The command output should return each `key alias currently available`. For our RDS encryption activation process, locate the ID of the AWS default KMS key. ``` aws kms list-aliases --region  ``` 4. Run `copy-db-snapshot` command using the default KMS key ID for RDS instances returned earlier to create an encrypted copy of the database instance snapshot, The command output will return the `encrypted instance snapshot configuration`. ``` aws rds copy-db-snapshot --region  --source-db-snapshot-identifier  --target-db-snapshot-identifier  --copy-tags --kms-key-id  ``` 5. Run `restore-db-instance-from-db-snapshot` command to restore the encrypted snapshot created at the previous step to a new database instance, If successful, the command output should return the new encrypted database instance configuration. ``` aws rds restore-db-instance-from-db-snapshot --region  --db-instance-identifier  --db-snapshot-identifier  ``` 6. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region, Output will return database instance identifier name Select encrypted database name that we just created DB-Name-Encrypted. ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 7. Run again `describe-db-instances` command using the RDS instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True`. ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].StorageEncrypted' ```","AdditionalInformation": ""}],"description": "Ensure that encryption is enabled for RDS Instances","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"2.3.2": {"name": "2.3.2","checks": {"rds_instance_minor_version_upgrade_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.3. Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_RDS_Managing.html:https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Upgrading.html:https://aws.amazon.com/rds/faqs/","Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases`. 3. Select the RDS instance that wants to examine. 4. Click on the `Maintenance and backups` panel. 5. Under the `Maintenance` section, search for the Auto Minor Version Upgrade status. - If the current status is set to `Disabled`, means the feature is not set and the minor engine upgrades released will not be applied to the selected RDS instance**From Command Line:**1. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region: ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance identifier. 3. Run again `describe-db-instances` command using the RDS instance identifier returned earlier to determine the Auto Minor Version Upgrade status for the selected instance: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].AutoMinorVersionUpgrade' ``` 4. The command output should return the feature current status. If the current status is set to `true`, the feature is enabled and the minor engine upgrades will be applied to the selected RDS instance.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "AWS RDS will occasionally deprecate minor engine versions and provide new ones for an upgrade. When the last version number within the release is replaced, the version changed is considered minor. With Auto Minor Version Upgrade feature enabled, the version upgrades will occur automatically during the specified maintenance window so your RDS instances can get the new features, bug fixes, and security patches for their database engines.","RemediationProcedure": "**From Console:**1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases`. 3. Select the RDS instance that wants to update. 4. Click on the `Modify` button placed on the top right side. 5. On the `Modify DB Instance: ` page, In the `Maintenance` section, select `Auto minor version upgrade` click on the `Yes` radio button. 6. At the bottom of the page click on `Continue`, check to Apply Immediately to apply the changes immediately, or select `Apply during the next scheduled maintenance window` to avoid any downtime. 7. Review the changes and click on `Modify DB Instance`. The instance status should change from available to modifying and back to available. Once the feature is enabled, the `Auto Minor Version Upgrade` status should change to `Yes`.**From Command Line:**1. Run `describe-db-instances` command to list all RDS database instance names, available in the selected AWS region: ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. The command output should return each database instance identifier. 3. Run the `modify-db-instance` command to modify the selected RDS instance configuration this command will apply the changes immediately, Remove `--apply-immediately` to apply changes during the next scheduled maintenance window and avoid any downtime: ``` aws rds modify-db-instance --region  --db-instance-identifier  --auto-minor-version-upgrade --apply-immediately ``` 4. The command output should reveal the new configuration metadata for the RDS instance and check `AutoMinorVersionUpgrade` parameter value. 5. Run `describe-db-instances` command to check if the Auto Minor Version Upgrade feature has been successfully enable: ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].AutoMinorVersionUpgrade' ``` 6. The command output should return the feature current status set to `true`, the feature is `enabled` and the minor engine upgrades will be applied to the selected RDS instance.","AdditionalInformation": ""}],"description": "Ensure Auto Minor Version Upgrade feature is Enabled for RDS Instances","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.3.3": {"name": "2.3.3","checks": {"rds_instance_no_public_access": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.3. Relational Database Service (RDS)","References": "1. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.html:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html:https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html:https://aws.amazon.com/rds/faqs/","Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to anypublicly accessible RDS database instance, you must disable the database PubliclyAccessible flag and update the VPC security group associated with the instance","DefaultValue": null,"AuditProcedure": "From Console: 1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. Under the navigation panel, On RDS Dashboard, click Databases. 3. Select the RDS instance that you want to examine. 4. Click Instance Name from the dashboard, Under `Connectivity and Security. 5. On the Security, check if the Publicly Accessible flag status is set to Yes, follow the below-mentioned steps to check database subnet access. โ€ข In the networking section, click the subnet link available under Subnets โ€ข The link will redirect you to the VPC Subnets page. โ€ข Select the subnet listed on the page and click the Route Table tab from the dashboard bottom panel. If the route table contains any entries with the destination CIDR block set to 0.0.0.0/0 and with an Internet Gateway attached. โ€ข The selected RDS database instance was provisioned inside a public subnet, therefore is not running within a logically isolated environment and can be accessible from the Internet. 6. Repeat steps no. 4 and 5 to determine the type (public or private) and subnet for other RDS database instances provisioned in the current region. 7. Change the AWS region from the navigation bar and repeat the audit process for other regions. From Command Line: 1. Run describe-db-instances command to list all RDS database names, available in the selected AWS region: aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' 2. The command output should return each database instance identifier. 3. Run again describe-db-instances command using the PubliclyAccessible parameter as query filter to reveal the database instance Publicly Accessible flag status: aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].PubliclyAccessible' 4. Check for the Publicly Accessible parameter status, If the Publicly Accessible flag is set to Yes. Then selected RDS database instance is publicly accessible and insecure, follow the below-mentioned steps to check database subnet access 5. Run again describe-db-instances command using the RDS database instance identifier that you want to check and appropriate filtering to describe the VPC subnet(s) associated with the selected instance: aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].DBSubnetGroup.Subnets[]' โ€ข The command output should list the subnets available in the selected database subnet group. 6. Run describe-route-tables command using the ID of the subnet returned at the previous step to describe the routes of the VPC route table associated with the selected subnet: aws ec2 describe-route-tables --region  --filters 'Name=association.subnet-id,Values=' --query 'RouteTables[*].Routes[]' โ€ข If the command returns the route table associated with database instance subnet ID. Check the GatewayId and DestinationCidrBlock attributes values returned in the output. If the route table contains any entries with the GatewayId value set to igw-xxxxxxxx and the DestinationCidrBlock value set to 0.0.0.0/0, the selected RDS database instance was provisioned inside a public subnet. โ€ข Or โ€ข If the command returns empty results, the route table is implicitly associated with subnet, therefore the audit process continues with the next step 7. Run again describe-db-instances command using the RDS database instance identifier that you want to check and appropriate filtering to describe the VPC ID associated with the selected instance: aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].DBSubnetGroup.VpcId' โ€ข The command output should show the VPC ID in the selected database subnet group 8. Now run describe-route-tables command using the ID of the VPC returned at the previous step to describe the routes of the VPC main route table implicitly associated with the selected subnet: aws ec2 describe-route-tables --region  --filters 'Name=vpc- id,Values=' 'Name=association.main,Values=true' --query 'RouteTables[*].Routes[]' โ€ข The command output returns the VPC main route table implicitly associated with database instance subnet ID. Check the GatewayId and DestinationCidrBlock attributes values returned in the output. If the route table contains any entries with the GatewayId value set to igw-xxxxxxxx and the DestinationCidrBlock value set to 0.0.0.0/0, the selected RDS database instance was provisioned inside a public subnet, therefore is not running within a logically isolated environment and does not adhere to AWS security best practices.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Ensure that no public-facing RDS database instances are provisioned in your AWS account and restrict unauthorized access in order to minimize security risks. When the RDS instance allows unrestricted access (0.0.0.0/0), everyone and everything on the Internet can establish a connection to your database and this can increase the opportunity for malicious activities such as brute force attacks, PostgreSQL injections, or DoS/DDoS attacks.","RemediationProcedure": "From Console: 1. Log in to the AWS management console and navigate to the RDS dashboard at https://console.aws.amazon.com/rds/. 2. Under the navigation panel, On RDS Dashboard, click Databases. 3. Select the RDS instance that you want to update. 4. Click Modify from the dashboard top menu. 5. On the Modify DB Instance panel, under the Connectivity section, click on Additional connectivity configuration and update the value for Publicly Accessible to Not publicly accessible to restrict public access. Follow the below steps to update subnet configurations: โ€ข Select the Connectivity and security tab, and click on the VPC attribute value inside the Networking section. โ€ข Select the Details tab from the VPC dashboard bottom panel and click on Route table configuration attribute value. โ€ข On the Route table details page, select the Routes tab from the dashboard bottom panel and click on Edit routes. โ€ข On the Edit routes page, update the Destination of Target which is set to igw- xxxxx and click on Save routes. 6. On the Modify DB Instance panel Click on Continue and In the Scheduling of modifications section, perform one of the following actions based on your requirements: โ€ข Select Apply during the next scheduled maintenance window to apply the changes automatically during the next scheduled maintenance window. โ€ข Select Apply immediately to apply the changes right away. With this option, any pending modifications will be asynchronously applied as soon as possible, regardless of the maintenance window setting for this RDS database instance. Note that any changes available in the pending modifications queue are also applied. If any of the pending modifications require downtime, choosing this option can cause unexpected downtime for the application. 7. Repeat steps 3 to 6 for each RDS instance available in the current region. 8. Change the AWS region from the navigation bar to repeat the process for other regions. From Command Line: 1. Run describe-db-instances command to list all RDS database names identifiers, available in the selected AWS region: aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' 2. The command output should return each database instance identifier. 3. Run modify-db-instance command to modify the selected RDS instance configuration. Then use the following command to disable the Publicly Accessible flag for the selected RDS instances. This command use the apply- immediately flag. If you want to avoid any downtime --no-apply-immediately flag can be used: aws rds modify-db-instance --region  --db-instance-identifier  --no-publicly-accessible --apply-immediately 4. The command output should reveal the PubliclyAccessible configuration under pending values and should get applied at the specified time. 5. Updating the Internet Gateway Destination via AWS CLI is not currently supported To update information about Internet Gateway use the AWS Console Procedure. 6. Repeat steps 1 to 5 for each RDS instance provisioned in the current region. 7. Change the AWS region by using the --region filter to repeat the process for other regions.","AdditionalInformation": ""}],"description": "Ensure that public access is not given to RDS Instance","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.4.1": {"name": "2.4.1","checks": {"efs_encryption_at_rest_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "2.4 Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/efs/latest/ug/encryption-at-rest.html:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/efs/index.html#efs","Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).","DefaultValue": null,"AuditProcedure": "**From Console:** 1. Login to the AWS Management Console and Navigate to `Elastic File System (EFS) dashboard. 2. Select `File Systems` from the left navigation panel. 3. Each item on the list has a visible Encrypted field that displays data at rest encryption status. 4. Validate that this field reads `Encrypted` for all EFS file systems in all AWS regions.**From CLI:** 1. Run describe-file-systems command using custom query filters to list the identifiers of all AWS EFS file systems currently available within the selected region: ``` aws efs describe-file-systems --region  --output table --query 'FileSystems[*].FileSystemId' ``` 2. The command output should return a table with the requested file system IDs. 3. Run describe-file-systems command using the ID of the file system that you want to examine as identifier and the necessary query filters: ``` aws efs describe-file-systems --region  --file-system-id  --query 'FileSystems[*].Encrypted' ``` 4. The command output should return the file system encryption status true or false. If the returned value is `false`, the selected AWS EFS file system is not encrypted and if the returned value is `true`, the selected AWS EFS file system is encrypted.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Data should be encrypted at rest to reduce the risk of a data breach via direct access to the storage device.","RemediationProcedure": "**It is important to note that EFS file system data at rest encryption must be turned on when creating the file system.**If an EFS file system has been created without data at rest encryption enabled then you must create another EFS file system with the correct configuration and transfer the data.**Steps to create an EFS file system with data encrypted at rest:****From Console:** 1. Login to the AWS Management Console and Navigate to `Elastic File System (EFS)` dashboard. 2. Select `File Systems` from the left navigation panel. 3. Click `Create File System` button from the dashboard top menu to start the file system setup process. 4. On the `Configure file system access` configuration page, perform the following actions. - Choose the right VPC from the VPC dropdown list. - Within Create mount targets section, select the checkboxes for all of the Availability Zones (AZs) within the selected VPC. These will be your mount targets. - Click `Next step` to continue.5. Perform the following on the `Configure optional settings` page. - Create `tags` to describe your new file system. - Choose `performance mode` based on your requirements. - Check `Enable encryption` checkbox and choose `aws/elasticfilesystem` from Select KMS master key dropdown list to enable encryption for the new file system using the default master key provided and managed by AWS KMS. - Click `Next step` to continue.6. Review the file system configuration details on the `review and create` page and then click `Create File System` to create your new AWS EFS file system. 7. Copy the data from the old unencrypted EFS file system onto the newly create encrypted file system. 8. Remove the unencrypted file system as soon as your data migration to the newly create encrypted file system is completed. 9. Change the AWS region from the navigation bar and repeat the entire process for other aws regions.**From CLI:** 1. Run describe-file-systems command to describe the configuration information available for the selected (unencrypted) file system (see Audit section to identify the right resource): ``` aws efs describe-file-systems --region  --file-system-id  ``` 2. The command output should return the requested configuration information. 3. To provision a new AWS EFS file system, you need to generate a universally unique identifier (UUID) in order to create the token required by the create-file-system command. To create the required token, you can use a randomly generated UUID from \"https://www.uuidgenerator.net\". 4. Run create-file-system command using the unique token created at the previous step. ``` aws efs create-file-system --region  --creation-token  --performance-mode generalPurpose --encrypted ``` 5. The command output should return the new file system configuration metadata. 6. Run create-mount-target command using the newly created EFS file system ID returned at the previous step as identifier and the ID of the Availability Zone (AZ) that will represent the mount target: ``` aws efs create-mount-target --region  --file-system-id  --subnet-id  ``` 7. The command output should return the new mount target metadata. 8. Now you can mount your file system from an EC2 instance. 9. Copy the data from the old unencrypted EFS file system onto the newly create encrypted file system. 10. Remove the unencrypted file system as soon as your data migration to the newly create encrypted file system is completed. ``` aws efs delete-file-system --region  --file-system-id  ``` 11. Change the AWS region by updating the --region and repeat the entire process for other aws regions.","AdditionalInformation": ""}],"description": "Ensure that encryption is enabled for EFS file systems","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}}},"requirements_passed": 51,"requirements_failed": 10,"requirements_manual": 1,"total_requirements": 62,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "a349f7c9-fce3-4ac4-821a-d0c974496c2b","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "nist_800_53_revision_5_aws","framework": "NIST-800-53-Revision-5","version": "","description": "The NIST 800-53 (Rev. 5) Low-Moderate-High framework represents the security controls and the associated assessment procedures that are defined in NIST SP 800-53 Revision 5 Recommended Security Controls for Federal Information Systems and Organizations. For any discrepancies that are noted in the content between this NIST SP 800-53 framework and the latest published NIST Special Publication SP 800-53 Revision 5, refer to the official published documents that are available at the NIST Computer Security Resource Center.","region": "eu-west-1","requirements": {"ac_3": {"name": "Access Enforcement (AC-3)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"ec2_instance_profile_attached": "PASS","rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Enforce approved authorizations for logical access to information and system resources in accordance with applicable access control policies.","checks_status": {"fail": 1,"pass": 7,"total": 21,"manual": 0}},"ac_4": {"name": "Information Flow Enforcement (AC-4)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_4","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Enforce approved authorizations for controlling the flow of information within the system and between connected systems based on [Assignment: organization-defined information flow control policies].","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"ac_6": {"name": "Least Privilege (AC-6)","checks": {"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_6","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Employ the principle of least privilege, allowing only authorized accesses for users (or processes acting on behalf of users) that are necessary to accomplish assigned organizational tasks.","checks_status": {"fail": 1,"pass": 6,"total": 21,"manual": 0}},"ca_7": {"name": "Continuous Monitoring (CA-7)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ca_7","Section": "Assessment, Authorization, And Monitoring (CA)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Continuously monitor configuration management processes. Determine security impact, environment and operational risks.","checks_status": {"fail": 1,"pass": 2,"total": 7,"manual": 0}},"cm_6": {"name": "Configuration Settings (CM-6)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_6","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization: (i) establishes mandatory configuration settings for information technology products employed within the information system; (ii) configures the security settings of information technology products to the most restrictive mode consistent with operational requirements; (iii) documents the configuration settings; and (iv) enforces the configuration settings in all components of the information system.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"ia_2": {"name": "Identification and Authentication (Organizational users) (IA-2)","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_2","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": null}],"description": "The information system uniquely identifies and authenticates organizational users (or processes acting on behalf of organizational users).","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5": {"name": "Authenticator Management (IA-5)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5","Section": "Identification and Authentication (IA)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Authenticate users and devices. Automate administrative control. Enforce restrictions. Protect against unauthorized use.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"mp_2": {"name": "Media Access (MP-2)","checks": {"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "mp_2","Section": "Media Protection (MP)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Restrict access to [Assignment: organization-defined types of digital and/or non-digital media] to [Assignment: organization-defined personnel or roles].","checks_status": {"fail": 1,"pass": 6,"total": 21,"manual": 0}},"sc_6": {"name": "Resource Availability (SC-6)","checks": {"rds_instance_multi_az": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_6","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Protect the availability of resources by allocating [Assignment: organization-defined resources] by [Selection (one or more): priority; quota; [Assignment: organization-defined controls]].","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"sc_8": {"name": "Transmission Confidentiality And Integrity (SC-8)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_8","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Protect the [Selection (one or more): confidentiality; integrity] of transmitted information.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"ac_24": {"name": "Access Control Decisions (AC-24)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_24","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "[Selection: Establish procedures; Implement mechanisms] to ensure [Assignment: organization-defined access control decisions] are applied to each access request prior to access enforcement.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"au_10": {"name": "Non-Repudiation (AU-10)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","opensearch_service_domains_cloudwatch_logging_enabled": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_10","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Provide irrefutable evidence that an individual (or process acting on behalf of an individual) has performed [Assignment: organization-defined actions to be covered by non-repudiation].","checks_status": {"fail": 6,"pass": 2,"total": 13,"manual": 0}},"au_11": {"name": "Audit Record Retention (AU-11)","checks": {"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_11","Section": "Audit and Accountability (AU)","Service": "cloudwatch","SubGroup": null,"SubSection": null}],"description": "Retain audit records for [Assignment: organization-defined time period consistent with records retention policy] to provide support for after-the-fact investigations of incidents and to meet regulatory and organizational information retention requirements.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"au_16": {"name": "Cross-Organizational Audit Logging (AU-16)","checks": {"cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_16","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Employ [Assignment: organization-defined methods] for coordinating [Assignment: organization-defined audit information] among external organizations when audit information is transmitted across organizational boundaries.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"cp_10": {"name": "System Recovery And Reconstitution (CP-10)","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_10","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Provide for the recovery and reconstitution of the system to a known state within [Assignment: organization-defined time period consistent with recovery time and recovery point objectives] after a disruption, compromise, or failure.","checks_status": {"fail": 3,"pass": 1,"total": 7,"manual": 0}},"pm_16": {"name": "Threat Awareness Program (PM-16)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "pm_16","Section": "Program Management (PM)","Service": "guarduty","SubGroup": null,"SubSection": null}],"description": "Implement a threat awareness program that includes a cross-organization information-sharing capability for threat intelligence.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"pm_31": {"name": "Continuous Monitoring Strategy (PM-31)","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "pm_31","Section": "Program Management (PM)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Develop an organization-wide continuous monitoring strategy and implement continuous monitoring programs that include: a. Establishing the following organization-wide metrics to be monitored: [Assignment: organization-defined metrics]; b. Establishing [Assignment: organization-defined frequencies] for monitoring and [Assignment: organization-defined frequencies] for assessment of control effectiveness; c. Ongoing monitoring of organizationally-defined metrics in accordance with the continuous monitoring strategy; d. Correlation and analysis of information generated by control assessments and monitoring; e. Response actions to address results of the analysis of control assessment and monitoring information; and f. Reporting the security and privacy status of organizational systems to [Assignment: organization-defined personnel or roles] [Assignment: organization-defined frequency].","checks_status": {"fail": 8,"pass": 4,"total": 20,"manual": 0}},"sc_12": {"name": "Cryptographic Key Establishment And Management (SC-12)","checks": {"kms_cmk_rotation_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_12","Section": "System and Communications Protection (SC)","Service": "kms","SubGroup": null,"SubSection": null}],"description": "Establish and manage cryptographic keys when cryptography is employed within the system in accordance with the following key management requirements: [Assignment: organization-defined requirements for key generation, distribution, storage, access, and destruction].","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"sc_22": {"name": "Architecture And Provisioning For Name/Address Resolution Service (SC-22)","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_22","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Ensure the systems that collectively provide name/address resolution service for an organization are fault-tolerant and implement internal and external role separation.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"sc_23": {"name": "Session Authenticity (SC-23)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_23","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Protect the authenticity of communications sessions.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"sc_25": {"name": "Thin Nodes (SC-25)","checks": {"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_25","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Employ minimal functionality and information storage on the following system components: [Assignment: organization-defined system components].","checks_status": {"fail": 1,"pass": 5,"total": 17,"manual": 0}},"sc_36": {"name": "Distributed Processing And Storage (SC-36)","checks": {"rds_instance_multi_az": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_36","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Distribute the following processing and storage components across multiple [Selection: physical locations; logical domains]: [Assignment: organization-defined processing and storage components].","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"si_12": {"name": "Information Management and Retention (SI-12)","checks": {"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_12","Section": "System and Information integrity (SI)","Service": "cloudwatch","SubGroup": null,"SubSection": null}],"description": "Manage and retain information within the system and information output from the system in accordance with applicable laws, executive orders, directives, regulations, policies, standards, guidelines and operational requirements.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"ac_2_1": {"name": "AC-2(1) Automated System Account Management","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_2_1","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "Support the management of system accounts using [Assignment: organization-defined automated mechanisms].","checks_status": {"fail": 1,"pass": 0,"total": 14,"manual": 0}},"ac_2_3": {"name": "AC-2(3) Disable Accounts","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_3","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "Disable accounts within [Assignment: organization-defined time period] when the accounts: (a) Have expired; (b) Are no longer associated with a user or individual; (c) Are in violation of organizational policy; or (d) Have been inactive for [Assignment: organization-defined time period].","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"ac_2_4": {"name": "AC-2(4) Automated Audit Actions","checks": {"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_2_4","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "Automatically audit account creation, modification, enabling, disabling, and removal actions.","checks_status": {"fail": 3,"pass": 1,"total": 8,"manual": 0}},"ac_2_6": {"name": "AC-2(6) Dynamic Privilege Management","checks": {"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_2_6","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "Implement [Assignment: organization-defined dynamic privilege management capabilities].","checks_status": {"fail": 1,"pass": 6,"total": 21,"manual": 0}},"ac_2_g": {"name": "AC-2(g)","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_g","Section": "Access Control (AC)","Service": "iam","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "The organization: g. Monitors the use of information system accounts.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"ac_2_j": {"name": "AC-2(j)","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_j","Section": "Access Control (AC)","Service": "iam","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "The organization: j. Reviews accounts for compliance with account management requirements [Assignment: organization-defined frequency].","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"ac_3_1": {"name": "AC-3(1) Restricted Access To Privileged Functions","checks": {"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_1","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Access Enforcement (AC-3)"}],"description": "Employ an audited override of automated access control mechanisms under [Assignment: organization-defined conditions] by [Assignment: organization-defined roles].","checks_status": {"fail": 3,"pass": 1,"total": 8,"manual": 0}},"ac_3_2": {"name": "AC-3(2) Dual Authorization","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_3_2","Section": "Access Control (AC)","Service": "iam","SubGroup": null,"SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce dual authorization for [Assignment: organization-defined privileged commands and/or other organization-defined actions].","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ac_3_3": {"name": "AC-3(3) Mandatory Access Control","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_3","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined mandatory access control policy] over the set of covered subjects and objects specified in the policy, and where the policy: (a) Is uniformly enforced across the covered subjects and objects within the system; (b) Specifies that a subject that has been granted access to information is constrained from doing any of the following; (1) Passing the information to unauthorized subjects or objects; (2) Granting its privileges to other subjects; (3) Changing one or more security attributes (specified by the policy) on subjects, objects, the system, or system components; (4) Choosing the security attributes and attribute values (specified by the policy) to be associated with newly created or modified objects; and (5) Changing the rules governing access control; and (c) Specifies that [Assignment: organization-defined subjects] may explicitly be granted [Assignment: organization-defined privileges] such that they are not limited by any defined subset (or all) of the above constraints.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_4": {"name": "AC-3(4) Discretionary Access Control","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_4","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined discretionary access control policy] over the set of covered subjects and objects specified in the policy, and where the policy specifies that a subject that has been granted access to information can do one or more of the following: (a) Pass the information to any other subjects or objects; (b) Grant its privileges to other subjects; (c) Change security attributes on subjects, objects, the system, or the systemโ€™s components; (d) Choose the security attributes to be associated with newly created or revised objects; or (e) Change the rules governing access control.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_7": {"name": "AC-3(7) Role-Based Access Control","checks": {"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_7","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce a role-based access control policy over defined subjects and objects and control access based upon [Assignment: organization-defined roles and users authorized to assume such roles].","checks_status": {"fail": 1,"pass": 6,"total": 21,"manual": 0}},"ac_3_8": {"name": "AC-3(8) Revocation Of Access Authorizations","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_8","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce the revocation of access authorizations resulting from changes to the security attributes of subjects and objects based on [Assignment: organization-defined rules governing the timing of revocations of access authorizations].","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_5_b": {"name": "AC-5(b)","checks": {"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_5_b","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Separation Of Duties (AC-5)"}],"description": "Define system access authorizations to support separation of duties.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"ac_6_2": {"name": "AC-6(2)","checks": {"iam_no_root_access_key": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_6_2","Section": "Access Control (AC)","Service": "iam","SubGroup": null,"SubSection": "Least Privilege (AC-6)"}],"description": "Require that users of system accounts (or roles) with access to [Assignment: organization-defined security functions or security-relevant information] use non-privileged accounts or roles, when accessing nonsecurity functions.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ac_6_3": {"name": "AC-6(3)","checks": {"iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_6_3","Section": "Access Control (AC)","Service": "iam","SubGroup": null,"SubSection": "Least Privilege (AC-6)"}],"description": "Authorize network access to [Assignment: organization-defined privileged commands] only for [Assignment: organization-defined compelling operational needs] and document the rationale for such access in the security plan for the system.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ac_6_9": {"name": "AC-6(9)","checks": {"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_6_9","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Least Privilege (AC-6)"}],"description": "Log the execution of privileged functions.","checks_status": {"fail": 3,"pass": 1,"total": 8,"manual": 0}},"ac_7_4": {"name": "AC-7(4) Use Of Alternate Authentication Factor","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_7_4","Section": "Access Control (AC)","Service": "iam","SubGroup": null,"SubSection": "Unsuccessful Logon Attempts (AC-7)"}],"description": "Prevent non-privileged users from executing privileged functions.","checks_status": {"fail": 0,"pass": 0,"total": 5,"manual": 0}},"au_2_b": {"name": "AU-2(b)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_2_b","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Event Logging (AU-2)"}],"description": "Coordinate the event logging function with other organizational entities requiring audit-related information to guide and inform the selection criteria for events to be logged.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_3_1": {"name": "AU-3(1) Additional Audit Information","checks": {"guardduty_is_enabled": "PASS","cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "au_3_1","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Content of Audit Records (AU-3)"}],"description": "Generate audit records containing the following additional information: [Assignment: organization-defined additional information].","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"au_3_a": {"name": "AU-3(a)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_3_a","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Content of Audit Records (AU-3)"}],"description": "Ensure that audit records contain information that establishes the following: a. What type of event occurred.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_3_b": {"name": "AU-3(b)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_3_b","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Content of Audit Records (AU-3)"}],"description": "Ensure that audit records contain information that establishes the following: b. When the event occurred.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_3_c": {"name": "AU-3(c)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_3_c","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Content of Audit Records (AU-3)"}],"description": "Ensure that audit records contain information that establishes the following: c. Where the event occurred.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_3_d": {"name": "AU-3(d)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_3_d","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Content of Audit Records (AU-3)"}],"description": "Ensure that audit records contain information that establishes the following: d. Source of the event.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_3_e": {"name": "AU-3(e)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_3_e","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Content of Audit Records (AU-3)"}],"description": "Ensure that audit records contain information that establishes the following: e. Outcome of the event.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_3_f": {"name": "AU-3(f)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_3_f","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Content of Audit Records (AU-3)"}],"description": "Ensure that audit records contain information that establishes the following: e. Outcome of the event.","checks_status": {"fail": 5,"pass": 2,"total": 11,"manual": 0}},"au_4_1": {"name": "AU-4(1) Transfer To Alternate Storage","checks": {"cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_4_1","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Log Stprage Capacity (AU-4)"}],"description": "Transfer audit logs [Assignment: organization-defined frequency] to a different system, system component, or media other than the system or system component conducting the logging.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"au_6_1": {"name": "AU-6(1) Automated Process Integration","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_6_1","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Review, Analysis And Reporting (AU-6)"}],"description": "Integrate audit record review, analysis, and reporting processes using [Assignment: organization-defined automated mechanisms].","checks_status": {"fail": 1,"pass": 2,"total": 7,"manual": 0}},"au_6_3": {"name": "AU-6(3) Correlate Audit Record Repositories","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_6_3","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Review, Analysis And Reporting (AU-6)"}],"description": "Analyze and correlate audit records across different repositories to gain organization-wide situational awareness.","checks_status": {"fail": 7,"pass": 2,"total": 13,"manual": 0}},"au_6_4": {"name": "AU-6(4) Central Review And Analysis","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_6_4","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Review, Analysis And Reporting (AU-6)"}],"description": "Provide and implement the capability to centrally review and analyze audit records from multiple components within the system.","checks_status": {"fail": 7,"pass": 2,"total": 13,"manual": 0}},"au_6_5": {"name": "AU-6(5) Central Review And Analysis","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_6_5","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Review, Analysis And Reporting (AU-6)"}],"description": "Integrate analysis of audit records with analysis of [Selection (one or more): vulnerability scanning information; performance data; system monitoring information; [Assignment: organization-defined data/information collected from other sources]] to further enhance the ability to identify inappropriate or unusual activity.","checks_status": {"fail": 1,"pass": 2,"total": 7,"manual": 0}},"au_6_6": {"name": "AU-6(6) Correletion With Physical Monitoring","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_6_6","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Review, Analysis And Reporting (AU-6)"}],"description": "Correlate information from audit records with information obtained from monitoring physical access to further enhance the ability to identify suspicious, inappropriate, unusual, or malevolent activity.","checks_status": {"fail": 7,"pass": 2,"total": 13,"manual": 0}},"au_6_9": {"name": "AU-6(9) Correletion With From Nontechnical Sources","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_6_9","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Review, Analysis And Reporting (AU-6)"}],"description": "Correlate information from nontechnical sources with audit record information to enhance organization-wide situational awareness.","checks_status": {"fail": 7,"pass": 2,"total": 13,"manual": 0}},"au_7_1": {"name": "AU-7(1) Automatic Processing","checks": {"cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_7_1","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Reduction And Report Generation (AU-7)"}],"description": "Provide and implement the capability to process, sort, and search audit records for events of interest based on the following content: [Assignment: organization-defined fields within audit records].","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"au_8_b": {"name": "AU-8(b)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_8_b","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Time Stamps (AU-8)"}],"description": "Record time stamps for audit records that meet [Assignment: organization-defined granularity of time measurement] and that use Coordinated Universal Time, have a fixed local time offset from Coordinated Universal Time, or that include the local time offset as part of the time stamp.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_9_2": {"name": "AU-9(2) Store On Separate Physical Systems Or Components","checks": {"s3_bucket_object_versioning": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_9_2","Section": "Audit and Accountability (AU)","Service": "s3","SubGroup": null,"SubSection": "Protection of Audit Information (AU-9)"}],"description": "Store audit records [Assignment: organization-defined frequency] in a repository that is part of a physically different system or system component than the system or component being audited.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"au_9_3": {"name": "AU-9(3) Cryptographic Protection","checks": {"elb_ssl_listeners": "FAIL","ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_9_3","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Protection of Audit Information (AU-9)"}],"description": "Implement cryptographic mechanisms to protect the integrity of audit information and audit tools.","checks_status": {"fail": 8,"pass": 3,"total": 17,"manual": 0}},"au_9_7": {"name": "AU-9(7) Store On Component With Different Operation Systems","checks": {"cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_9_7","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Protection of Audit Information (AU-9)"}],"description": "Store audit information on a component running a different operating system than the system or component being audited.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"au_9_a": {"name": "AU-9(a)","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_9_a","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Protection of Audit Information (AU-9)"}],"description": "Protect audit information and audit logging tools from unauthorized access, modification, and deletion.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"ca_2_2": {"name": "CA-2(2) Specialized Assessments","checks": {"rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ca_2_2","Section": "Assessment, Authorization, And Monitoring (CA)","Service": "aws","SubGroup": null,"SubSection": "Control Assessments (CA-2)"}],"description": "Include as part of control assessments, [Assignment: organization-defined frequency], [Selection: announced; unannounced], [Selection (one or more): in-depth monitoring; security instrumentation; automated security test cases; vulnerability scanning; malicious user testing; insider threat assessment; performance and load testing; data leakage or data loss assessment; [Assignment: organization-defined other forms of assessment]].","checks_status": {"fail": 1,"pass": 0,"total": 5,"manual": 0}},"ca_2_d": {"name": "CA-2(d)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ca_2_d","Section": "Assessment, Authorization, And Monitoring (CA)","Service": "aws","SubGroup": null,"SubSection": "Control Assessments (CA-2)"}],"description": "Assess the controls in the system and its environment of operation [Assignment: organization-defined frequency] to determine the extent to which the controls are implemented correctly, operating as intended, and producing the desired outcome with respect to meeting established security and privacy requirements.","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"ca_7_b": {"name": "CA-7(b)","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ca_7_b","Section": "Assessment, Authorization, And Monitoring (CA)","Service": "aws","SubGroup": null,"SubSection": "Continuous Monitoring (CA-7)"}],"description": "Develop a system-level continuous monitoring strategy and implement continuous monitoring in accordance with the organization-level continuous monitoring strategy that includes: b. Establishing [Assignment: organization-defined frequencies] for monitoring and [Assignment: organization-defined frequencies] for assessment of control effectiveness.","checks_status": {"fail": 8,"pass": 4,"total": 20,"manual": 0}},"ca_9_b": {"name": "CA-9(b)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ca_9_b","Section": "Assessment, Authorization, And Monitoring (CA)","Service": "aws","SubGroup": null,"SubSection": "Internal System Connections (CA-9)"}],"description": "Document, for each internal connection, the interface characteristics, security and privacy requirements, and the nature of the information communicated.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"cm_2_2": {"name": "CM-2(2) Automation Support For Accuracy And Currency","checks": {"elbv2_deletion_protection": "FAIL","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","ec2_instance_older_than_specific_days": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_2_2","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Baseline Configuration (CM-2)"}],"description": "Maintain the currency, completeness, accuracy, and availability of the baseline configuration of the system using [Assignment: organization-defined automated mechanisms].","checks_status": {"fail": 5,"pass": 0,"total": 5,"manual": 0}},"cm_2_a": {"name": "CM-2(a)","checks": {"elbv2_deletion_protection": "FAIL","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","ec2_instance_older_than_specific_days": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_2_a","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Baseline Configuration (CM-2)"}],"description": "Develop, document, and maintain under configuration control, a current baseline configuration of the system.","checks_status": {"fail": 5,"pass": 0,"total": 5,"manual": 0}},"cm_2_b": {"name": "CM-2(b)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null,"ec2_instance_older_than_specific_days": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_2_b","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Baseline Configuration (CM-2)"}],"description": "Review and update the baseline configuration of the system: 1. [Assignment: organization-defined frequency]; 2. When required due to [Assignment: organization-defined circumstances]; and 3. When system components are installed or upgraded.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"cm_3_3": {"name": "CM-3(3) Automated Change Implementation","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null,"ec2_instance_older_than_specific_days": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_3_3","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Configuration Change Control (CM-3)"}],"description": "Implement changes to the current system baseline and deploy the updated baseline across the installed base using [Assignment: organization-defined automated mechanisms].","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"cm_3_a": {"name": "CM-3(a)","checks": {"elbv2_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_3_a","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Configuration Change Control (CM-3)"}],"description": "Determine and document the types of changes to the system that are configuration-controlled.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cm_6_a": {"name": "CM-6(a)","checks": {"iam_root_mfa_enabled": null,"vpc_flow_logs_enabled": "FAIL","iam_no_root_access_key": null,"s3_bucket_public_access": null,"kms_cmk_rotation_enabled": null,"ec2_ebs_volume_encryption": "PASS","iam_user_accesskey_unused": null,"ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","ec2_instance_profile_attached": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","iam_user_mfa_enabled_console_access": null,"s3_bucket_policy_public_write_access": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","iam_password_policy_minimum_length_14": null,"s3_account_level_public_access_blocks": null,"cloudtrail_log_file_validation_enabled": "FAIL","cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_6_a","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Configuration Settings (CM-6)"}],"description": "Establish and document configuration settings for components employed within the system that reflect the most restrictive mode consistent with operational requirements using [Assignment: organization-defined common secure configurations].","checks_status": {"fail": 8,"pass": 6,"total": 31,"manual": 0}},"cm_7_b": {"name": "CM-7(b)","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_7_b","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Least Functionality (CM-7)"}],"description": "Prohibit or restrict the use of the following functions, ports, protocols, software, and/or services: [Assignment: organization-defined prohibited or restricted functions, system ports, protocols, software, and/or services].","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"cm_8_1": {"name": "CM-8(1) Updates During Installation And Removals","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_1","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "System Component Inventory (CM-8)"}],"description": "Update the inventory of system components as part of component installations, removals, and system updates.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cm_8_2": {"name": "CM-8(2) Automated Maintenance","checks": {"ec2_instance_managed_by_ssm": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_2","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "System Component Inventory (CM-8)"}],"description": "Maintain the currency, completeness, accuracy, and availability of the inventory of system components using [Assignment: organization-defined automated mechanisms].","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"cm_8_6": {"name": "CM-8(6) Assessed Configurations And Approved Deviations","checks": {"elbv2_deletion_protection": "FAIL","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","ec2_instance_older_than_specific_days": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_6","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "System Component Inventory (CM-8)"}],"description": "Include assessed component configurations and any approved deviations to current deployed configurations in the system component inventory.","checks_status": {"fail": 5,"pass": 0,"total": 5,"manual": 0}},"cm_8_a": {"name": "CM-8(a)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_a","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "System Component Inventory (CM-8)"}],"description": "Develop and document an inventory of system components that: 1. Accurately reflects the system; 2. Includes all components within the system; 3. Does not include duplicate accounting of components or components assigned to any other system; 4. Is at the level of granularity deemed necessary for tracking and reporting; and 5. Includes the following information to achieve system component accountability: [Assignment: organization-defined information deemed necessary to achieve effective system component accountability].","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cm_8_b": {"name": "CM-8(b)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_b","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "System Component Inventory (CM-8)"}],"description": "Review and update the system component inventory [Assignment: organization-defined frequency].","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cm_9_b": {"name": "CM-9(b)","checks": {"iam_root_mfa_enabled": null,"vpc_flow_logs_enabled": "FAIL","iam_no_root_access_key": null,"s3_bucket_public_access": null,"kms_cmk_rotation_enabled": null,"ec2_ebs_volume_encryption": "PASS","iam_user_accesskey_unused": null,"ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","iam_user_mfa_enabled_console_access": null,"s3_bucket_policy_public_write_access": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","iam_password_policy_minimum_length_14": null,"s3_account_level_public_access_blocks": null,"cloudtrail_log_file_validation_enabled": "FAIL","cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_9_b","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Configuration Management Plan (CM-9)"}],"description": "Develop, document, and implement a configuration management plan for the system that: b. Establishes a process for identifying configuration items throughout the system development life cycle and for managing the configuration of the configuration items.","checks_status": {"fail": 8,"pass": 6,"total": 31,"manual": 0}},"cp_1_2": {"name": "CP-1(2)","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_1_2","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "Policy And Procedures (CP-1)"}],"description": "Implement transaction recovery for systems that are transaction-based.","checks_status": {"fail": 2,"pass": 1,"total": 6,"manual": 0}},"cp_2_5": {"name": "CP-2(5) Continue Mission And Business Functions","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"rds_instance_deletion_protection": "FAIL","redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_2_5","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "Contingency Plan (CP-2)"}],"description": "Plan for the continuance of [Selection: all; essential] mission and business functions with minimal or no loss of operational continuity and sustains that continuity until full system restoration at primary processing and/or storage sites.","checks_status": {"fail": 5,"pass": 1,"total": 9,"manual": 0}},"cp_2_6": {"name": "CP-2(6) Alternate Processing And Storage Sites","checks": {"rds_instance_multi_az": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_2_6","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "Contingency Plan (CP-2)"}],"description": "Plan for the transfer of [Selection: all; essential] mission and business functions to alternate processing and/or storage sites with minimal or no loss of operational continuity and sustain that continuity through system restoration to primary processing and/or storage sites.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"cp_2_a": {"name": "CP-2(a)","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_2_a","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "Contingency Plan (CP-2)"}],"description": "a. Develop a contingency plan for the system that: 1. Identifies essential mission and business functions and associated contingency requirements; 2. Provides recovery objectives, restoration priorities, and metrics; 3. Addresses contingency roles, responsibilities, assigned individuals with contact information; 4. Addresses maintaining essential mission and business functions despite a system disruption, compromise, or failure; 5. Addresses eventual, full system restoration without deterioration of the controls originally planned and implemented; 6. Addresses the sharing of contingency information; and 7. Is reviewed and approved by [Assignment: organization-defined personnel or roles]","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"cp_2_d": {"name": "CP-2(d)","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_2_d","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "Contingency Plan (CP-2)"}],"description": "Review the contingency plan for the system [Assignment: organization-defined frequency]","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"cp_2_e": {"name": "CP-2(e)","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_2_e","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "Contingency Plan (CP-2)"}],"description": "Update the contingency plan to address changes to the organization, system, or environment of operation and problems encountered during contingency plan implementation, execution, or testing.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"cp_6_1": {"name": "CP-6(1) Separation From Primary Site","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_6_1","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "Alternate Storage Sites (CP-6)"}],"description": "Identify an alternate storage site that is sufficiently separated from the primary storage site to reduce susceptibility to the same threats.","checks_status": {"fail": 2,"pass": 1,"total": 5,"manual": 0}},"cp_6_2": {"name": "CP-6(2) Recovery Time And Recovery Point Objectives","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_6_2","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "Alternate Storage Sites (CP-6)"}],"description": "Configure the alternate storage site to facilitate recovery operations in accordance with recovery time and recovery point objectives.","checks_status": {"fail": 3,"pass": 1,"total": 7,"manual": 0}},"cp_6_a": {"name": "CP-6(a)","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_6_a","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "Alternate Storage Sites (CP-6)"}],"description": "Establish an alternate storage site, including necessary agreements to permit the storage and retrieval of system backup information.","checks_status": {"fail": 2,"pass": 1,"total": 5,"manual": 0}},"cp_9_8": {"name": "CP-9(8) Cryptographic Protection","checks": {"s3_bucket_default_encryption": "PASS","rds_instance_storage_encrypted": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_9_8","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "System Backup (CP-9)"}],"description": "Implement cryptographic mechanisms to prevent unauthorized disclosure and modification of [Assignment: organization-defined backup information].","checks_status": {"fail": 1,"pass": 1,"total": 3,"manual": 0}},"cp_9_a": {"name": "CP-9(a)","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null,"redshift_cluster_automatic_upgrades": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_9_a","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "System Backup (CP-9)"}],"description": "Conduct backups of user-level information contained in [Assignment: organization-defined system components] [Assignment: organization-defined frequency consistent with recovery time and recovery point objectives].","checks_status": {"fail": 2,"pass": 1,"total": 7,"manual": 0}},"cp_9_b": {"name": "CP-9(b)","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null,"redshift_cluster_automatic_upgrades": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_9_b","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "System Backup (CP-9)"}],"description": "Conduct backups of system-level information contained in the system [Assignment: organization-defined frequency consistent with recovery time and recovery point objectives].","checks_status": {"fail": 2,"pass": 1,"total": 7,"manual": 0}},"cp_9_c": {"name": "CP-9(c)","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null,"redshift_cluster_automatic_upgrades": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_9_c","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "System Backup (CP-9)"}],"description": "Conduct backups of system documentation, including security- and privacy-related documentation [Assignment: organization-defined frequency consistent with recovery time and recovery point objectives].","checks_status": {"fail": 2,"pass": 1,"total": 7,"manual": 0}},"cp_9_d": {"name": "CP-9(d)","checks": {"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_9_d","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "System Backup (CP-9)"}],"description": "Protect the confidentiality, integrity, and availability of backup information.","checks_status": {"fail": 5,"pass": 3,"total": 13,"manual": 0}},"ia_2_1": {"name": "IA-2(1) Multi-Factor Authentication To Privileged Accounts","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_2_1","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identification and Authentication (Organizational users) (IA-2)"}],"description": "Implement multi-factor authentication for access to privileged accounts.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ia_2_2": {"name": "IA-2(2) Multi-Factor Authentication To Non-Privileged Accounts","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_2_2","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identification and Authentication (Organizational users) (IA-2)"}],"description": "Implement multi-factor authentication for access to non-privileged accounts.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ia_2_6": {"name": "IA-2(6) Acces To Accounts โ€” Separate Device","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_2_6","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identification and Authentication (Organizational users) (IA-2)"}],"description": "Implement multi-factor authentication for [Selection (one or more): local; network; remote] access to [Selection (one or more): privileged accounts; non-privileged accounts] such that: (a) One of the factors is provided by a device separate from the system gaining access; and (b) The device meets [Assignment: organization-defined strength of mechanism requirements].","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ia_2_8": {"name": "IA-2(8) Access To Accounts โ€” Replay Resistant","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_2_8","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identification and Authentication (Organizational users) (IA-2)"}],"description": "Implement replay-resistant authentication mechanisms for access to [Selection (one or more): privileged accounts; non-privileged accounts].","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ia_4_4": {"name": "IA-4(4)","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_4_4","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identifier Management (IA-4)"}],"description": "Manage individual identifiers by uniquely identifying each individual as [Assignment: organization-defined characteristic identifying individual status].","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_4_8": {"name": "IA-4(8)","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_4_8","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identifier Management (IA-4)"}],"description": "Generate pairwise pseudonymous identifiers.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_4_b": {"name": "IA-4(b)","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_4_b","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identifier Management (IA-4)"}],"description": "Manage system identifiers by: b. Selecting an identifier that identifies an individual, group, role, service, or device.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_4_d": {"name": "IA-4(d)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_4_d","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identifier Management (IA-4)"}],"description": "Manage system identifiers by: d. Preventing reuse of identifiers for [Assignment: organization-defined time period].","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5_8": {"name": "IA-5(8) Multiple System Accounts","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_8","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": null}],"description": "Implement [Assignment: organization-defined security controls] to manage the risk of compromise due to individuals having accounts on multiple systems.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5_b": {"name": "IA-5(b)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_b","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Authenticator Management (IA-5)"}],"description": "Manage system authenticators by: b. Establishing initial authenticator content for any authenticators issued by the organization.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5_c": {"name": "IA-5(c)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_c","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Authenticator Management (IA-5)"}],"description": "Manage system authenticators by: c. Ensuring that authenticators have sufficient strength of mechanism for their intended use.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5_d": {"name": "IA-5(d)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_d","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Authenticator Management (IA-5)"}],"description": "Manage system authenticators by: d. Establishing and implementing administrative procedures for initial authenticator distribution, for lost or compromised or damaged authenticators, and for revoking authenticators.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5_f": {"name": "IA-5(f)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_f","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Authenticator Management (IA-5)"}],"description": "Manage system authenticators by: f. Changing or refreshing authenticators [Assignment: organization-defined time period by authenticator type] or when [Assignment: organization-defined events] occur.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5_h": {"name": "IA-5(h)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_h","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Authenticator Management (IA-5)"}],"description": "Manage system authenticators by: h. Requiring individuals to take, and having devices implement, specific controls to protect authenticators.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ir_4_a": {"name": "IR-4(a)","checks": {"guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ir_4_a","Section": "Incident Response (IR)","Service": "guarduty","SubGroup": null,"SubSection": "Incident Handling (IR-4)"}],"description": "Implement an incident handling capability for incidents that is consistent with the incident response plan and includes preparation, detection and analysis, containment, eradication, and recovery.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"ma_4_c": {"name": "MA-4(c)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ma_4_c","Section": "Maintenance (MA)","Service": "iam","SubGroup": null,"SubSection": "Nonlocal Maintenance (MA-4)"}],"description": "Employ strong authentication in the establishment of nonlocal maintenance and diagnostic sessions.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"pe_6_2": {"name": "PE-6(2) Monitoring Physical Access","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "pe_6_2","Section": "Physical And Environmental Protection (PE)","Service": "guarduty","SubGroup": null,"SubSection": "Monitoring Physical Access (PE-6)"}],"description": "Recognize [Assignment: organization-defined classes or types of intrusions] and initiate [Assignment: organization-defined response actions] using [Assignment: organization-defined automated mechanisms].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"pe_6_4": {"name": "PE-6(4) Monitoring Physical Access","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "pe_6_4","Section": "Physical And Environmental Protection (PE)","Service": "guarduty","SubGroup": null,"SubSection": "Monitoring Physical Access (PE-6)"}],"description": "Monitor physical access to the system in addition to the physical access monitoring of the facility at [Assignment: organization-defined physical spaces containing one or more components of the system].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ra_1_a": {"name": "RA-1(a)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ra_1_a","Section": "Risk Assessment (RA)","Service": "guarduty","SubGroup": null,"SubSection": "Policy And Procedures (RA-1)"}],"description": "Establish and maintain a cyber threat hunting capability to: 1. Search for indicators of compromise in organizational systems; and 2. Detect, track, and disrupt threats that evade existing controls.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ra_3_4": {"name": "RA-3(4) Predictive Cyber Analytics","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ra_3_4","Section": "Risk Assessment (RA)","Service": "guarduty","SubGroup": null,"SubSection": "Risk Assessment (RA-3)"}],"description": "Employ the following advanced automation and analytics capabilities to predict and identify risks to [Assignment: organization-defined systems or system components]: [Assignment: organization-defined advanced automation and analytics capabilities].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ra_5_4": {"name": "RA-5(4) Discoverable Information","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ra_5_4","Section": "Risk Assessment (RA)","Service": "guarduty","SubGroup": null,"SubSection": "Vulnerability Monitoring And Scanning (RA-5)"}],"description": "Determine information about the system that is discoverable and take [Assignment: organization-defined corrective actions].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ra_5_a": {"name": "RA-5(a)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ra_5_a","Section": "Risk Assessment (RA)","Service": "guarduty","SubGroup": null,"SubSection": "Vulnerability Monitoring And Scanning (RA-5)"}],"description": "Monitor and scan for vulnerabilities in the system and hosted applications [Assignment: organization-defined frequency and/or randomly in accordance with organization-defined process] and when new vulnerabilities potentially affecting the system are identified and reported.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"sa_1_1": {"name": "SA-1(1)","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sa_1_1","Section": "System and Services Acquisition (SA)","Service": "cloudtrail","SubGroup": null,"SubSection": "Policy And Procedures (SA-1)"}],"description": "Require the developer of the system, system component, or system service to enable integrity verification of software and firmware components.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"sa_9_6": {"name": "SA-9(6) Organization-Controlled Cryptographic Keys","checks": {"kms_cmk_rotation_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "sa_9_6","Section": "System and Services Acquisition (SA)","Service": "kms","SubGroup": null,"SubSection": "External System Services (SA-9)"}],"description": "Maintain exclusive control of cryptographic keys for encrypted material stored or transmitted through an external system.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"sc_5_1": {"name": "SC-5(1) Restrict Ability TO Attack Other Systems","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_5_1","Section": "System and Communications Protection (SC)","Service": "guarduty","SubGroup": null,"SubSection": "Denial Of Service Protection (SC-5)"}],"description": "Restrict the ability of individuals to launch the following denial-of-service attacks against other systems: [Assignment: organization-defined denial-of-service attacks].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"sc_5_2": {"name": "SC-5(2) Capacity, Bandwidth, And Redundancy","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"rds_instance_deletion_protection": "FAIL","redshift_cluster_automated_snapshot": null,"redshift_cluster_automatic_upgrades": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_5_2","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Denial Of Service Protection (SC-5)"}],"description": "Manage capacity, bandwidth, or other redundancy to limit the effects of information flooding denial-of-service attacks.","checks_status": {"fail": 5,"pass": 1,"total": 10,"manual": 0}},"sc_5_a": {"name": "SC-5(a)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_5_a","Section": "System and Communications Protection (SC)","Service": "guarduty","SubGroup": null,"SubSection": "Denial Of Service Protection (SC-5)"}],"description": "[Selection: Protect against; Limit] the effects of the following types of denial-of-service events: [Assignment: organization-defined types of denial-of-service events].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"sc_5_b": {"name": "SC-5(b)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_5_b","Section": "System and Communications Protection (SC)","Service": "guarduty","SubGroup": null,"SubSection": "Denial Of Service Protection (SC-5)"}],"description": "Employ the following controls to achieve the denial-of-service objective: [Assignment: organization-defined controls by type of denial-of-service event].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"sc_7_2": {"name": "SC-7(2) Public Access","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_2","Section": "System and Communications Protection (SC)","Service": "guarduty","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Provide the capability to dynamically isolate [Assignment: organization-defined system components] from other system components.","checks_status": {"fail": 1,"pass": 5,"total": 13,"manual": 0}},"sc_7_3": {"name": "SC-7(3) Access Points","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_3","Section": "System and Communications Protection (SC)","Service": "guarduty","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Limit the number of external network connections to the system.","checks_status": {"fail": 1,"pass": 5,"total": 13,"manual": 0}},"sc_7_5": {"name": "SC-7(5) Deny By Default โ€” Allow By Exception","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_5","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Deny network communications traffic by default and allow network communications traffic by exception [Selection (one or more): at managed interfaces; for [Assignment: organization-defined systems]].","checks_status": {"fail": 5,"pass": 0,"total": 6,"manual": 0}},"sc_7_7": {"name": "SC-7(7) Split Tunneling For Remote Devices","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_7","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Prevent split tunneling for remote devices connecting to organizational systems unless the split tunnel is securely provisioned using [Assignment: organization-defined safeguards].","checks_status": {"fail": 3,"pass": 5,"total": 16,"manual": 0}},"sc_7_a": {"name": "SC-7(a)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_a","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Monitor and control communications at the external managed interfaces to the system and at key internal managed interfaces within the system.","checks_status": {"fail": 3,"pass": 6,"total": 17,"manual": 0}},"sc_7_b": {"name": "SC-7(b)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_b","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Implement subnetworks for publicly accessible system components that are [Selection: physically; logically] separated from internal organizational networks.","checks_status": {"fail": 1,"pass": 5,"total": 13,"manual": 0}},"sc_7_c": {"name": "SC-7(c)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_c","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Connect to external networks or systems only through managed interfaces consisting of boundary protection devices arranged in accordance with an organizational security and privacy architecture.","checks_status": {"fail": 3,"pass": 6,"total": 17,"manual": 0}},"sc_8_1": {"name": "SC-8(1) Cryptographic Protection","checks": {"elb_ssl_listeners": "FAIL","elbv2_insecure_ssl_ciphers": "PASS","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_8_1","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Transmission Confidentiality And Integrity (SC-8)"}],"description": "Implement cryptographic mechanisms to [Selection (one or more): prevent unauthorized disclosure of information; detect changes to information] during transmission.","checks_status": {"fail": 3,"pass": 1,"total": 5,"manual": 0}},"sc_8_2": {"name": "SC-8(2) Pre- And Post-Transmission Handling","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_8_2","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Transmission Confidentiality And Integrity (SC-8)"}],"description": "Maintain the [Selection (one or more): confidentiality; integrity] of information during preparation for transmission and during reception.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"sc_8_3": {"name": "SC-8(3) Cryptographic Protection For Message Externals","checks": {"elb_ssl_listeners": "FAIL","ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_8_3","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Transmission Confidentiality And Integrity (SC-8)"}],"description": "Implement cryptographic mechanisms to protect message externals unless otherwise protected by [Assignment: organization-defined alternative physical controls].","checks_status": {"fail": 8,"pass": 3,"total": 18,"manual": 0}},"sc_8_4": {"name": "SC-8(4) Conceal Or Ramdomize Communications","checks": {"elb_ssl_listeners": "FAIL","ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_8_4","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Transmission Confidentiality And Integrity (SC-8)"}],"description": "Implement cryptographic mechanisms to conceal or randomize communication patterns unless otherwise protected by [Assignment: organization-defined alternative physical controls].","checks_status": {"fail": 8,"pass": 3,"total": 18,"manual": 0}},"sc_8_5": {"name": "SC-8(5) Protected Distribution System","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_8_5","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Transmission Confidentiality And Integrity (SC-8)"}],"description": "Implement [Assignment: organization-defined protected distribution system] to [Selection (one or more): prevent unauthorized disclosure of information; detect changes to information] during transmission.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"si_2_2": {"name": "SI-2(2) Automated Flaw Remediation Status","checks": {"ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_2_2","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Flaw Remediation (SI-2)"}],"description": "Determine if system components have applicable security-relevant software and firmware updates installed using [Assignment: organization-defined automated mechanisms] [Assignment: organization-defined frequency].","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"si_2_5": {"name": "SI-2(5) Automatic Software And Firmware Updated","checks": {"ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_2_5","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Flaw Remediation (SI-2)"}],"description": "Install [Assignment: organization-defined security-relevant software and firmware updates] automatically to [Assignment: organization-defined system components].","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"si_2_a": {"name": "SI-2(a)","checks": {"rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_2_a","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Flaw Remediation (SI-2)"}],"description": "Identify, report, and correct system flaws.","checks_status": {"fail": 1,"pass": 0,"total": 5,"manual": 0}},"si_2_c": {"name": "SI-2(c)","checks": {"ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_2_c","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Flaw Remediation (SI-2)"}],"description": "Install security-relevant software and firmware updates within [Assignment: organization-defined time period] of the release of the updates.","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"si_2_d": {"name": "SI-2(d)","checks": {"ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_2_d","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Flaw Remediation (SI-2)"}],"description": "Incorporate flaw remediation into the organizational configuration management process.","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"si_4_1": {"name": "SI-4(1) System-Wide Intrusion Detection System","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_1","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Connect and configure individual intrusion detection tools into a system-wide intrusion detection system.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_2": {"name": "SI-4(2) Automated Tools For Real-Time Analysis","checks": {"guardduty_is_enabled": "PASS","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_4_2","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Implement the following additional monitoring of privileged users: [Assignment: organization-defined additional monitoring]. Employ automated tools and mechanisms to support near real-time analysis of events.","checks_status": {"fail": 3,"pass": 2,"total": 9,"manual": 0}},"si_4_3": {"name": "SI-4(3) Automated Tools And Mechanism Integration","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_3","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Employ automated tools and mechanisms to integrate intrusion detection tools and mechanisms into access control and flow control mechanisms.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_a": {"name": "SI-4(a)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_a","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Monitor the system to detect: 1. Attacks and indicators of potential attacks in accordance with the following monitoring objectives: [Assignment: organization-defined monitoring objectives]; and 2. Unauthorized local, network, and remote connections.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_b": {"name": "SI-4(b)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_b","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Identify unauthorized use of the system through the following techniques and methods: [Assignment: organization-defined techniques and methods].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_c": {"name": "SI-4(c)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_c","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "c. Invoke internal monitoring capabilities or deploy monitoring devices: 1. Strategically within the system to collect organization-determined essential information; and 2. At ad hoc locations within the system to track specific types of transactions of interest to the organization.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_d": {"name": "SI-4(d)","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_4_d","Section": "System and Information integrity (SI)","Service": "cloudtrail","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Analyze detected events and anomalies.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"si_5_1": {"name": "SI-5(1) Automated Alerts And Advisories","checks": {"guardduty_is_enabled": "PASS","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_5_1","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Secuity Alerts, Advisories, And Directives (SI-5)"}],"description": "Broadcast security alert and advisory information throughout the organization using [Assignment: organization-defined automated mechanisms].","checks_status": {"fail": 0,"pass": 1,"total": 5,"manual": 0}},"si_5_b": {"name": "SI-5(b)","checks": {"guardduty_is_enabled": "PASS","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_5_b","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Secuity Alerts, Advisories, And Directives (SI-5)"}],"description": "Generate internal security alerts, advisories, and directives as deemed necessary.","checks_status": {"fail": 0,"pass": 1,"total": 5,"manual": 0}},"si_7_1": {"name": "SI-7(1) Integrity Checks","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_7_1","Section": "System and Information integrity (SI)","Service": "cloudtrail","SubGroup": null,"SubSection": "Software, Firmware, and Information Integrity (SI-7)"}],"description": "Perform an integrity check of [Assignment: organization-defined software, firmware, and information] [Selection (one or more): at startup; at [Assignment: organization-defined transitional states or security-relevant events]; [Assignment: organization-defined frequency]].","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"si_7_3": {"name": "SI-7(3) Centrally Managed Integrity Tools","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_7_3","Section": "System and Information integrity (SI)","Service": "cloudtrail","SubGroup": null,"SubSection": "Software, Firmware, and Information Integrity (SI-7)"}],"description": "Employ centrally managed integrity verification tools.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"si_7_7": {"name": "SI-7(7) Integration Of Detection And Response","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_7_7","Section": "System and Information integrity (SI)","Service": "cloudtrail","SubGroup": null,"SubSection": "Software, Firmware, and Information Integrity (SI-7)"}],"description": "Incorporate the detection of the following unauthorized changes into the organizational incident response capability: [Assignment: organization-defined security-relevant changes to the system].","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"si_7_8": {"name": "SI-7(8) Auditing Capability For Significant Events","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_7_8","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Software, Firmware, and Information Integrity (SI-7)"}],"description": "Upon detection of a potential integrity violation, provide the capability to audit the event and initiate the following actions: [Selection (one or more): generate an audit record; alert current user; alert [Assignment: organization-defined personnel or roles]; [Assignment: organization-defined other actions]].","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"si_7_a": {"name": "SI-7(a)","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_7_a","Section": "System and Information integrity (SI)","Service": "cloudtrail","SubGroup": null,"SubSection": "Software, Firmware, and Information Integrity (SI-7)"}],"description": "Employ integrity verification tools to detect unauthorized changes to the following software, firmware, and information: [Assignment: organization-defined software, firmware, and information].","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"ac_16_b": {"name": "AC-16(b)","checks": {"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_16_b","Section": "Access Control (AC)","Service": "cloudwatch","SubGroup": null,"SubSection": "Security And Privacy Attributes (AC-16)"}],"description": "Ensure that the attribute associations are made and retained with the information.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"ac_17_1": {"name": "AC-17(1) Monitoring And Control","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_17_1","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Remote Access (AC-17)"}],"description": "Employ automated mechanisms to monitor and control remote access methods.","checks_status": {"fail": 3,"pass": 6,"total": 17,"manual": 0}},"ac_17_2": {"name": "AC-17(2) Protection Of Confidentiality And Integrity Using Encryption","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_17_2","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Remote Access (AC-17)"}],"description": "Implement cryptographic mechanisms to protect the confidentiality and integrity of remote access sessions.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"ac_17_9": {"name": "AC-17(9) Disconnect Or Disable Access","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_17_9","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Remote Access (AC-17)"}],"description": "Provide the capability to disconnect or disable remote access to the system within [Assignment: organization-defined time period].","checks_status": {"fail": 3,"pass": 6,"total": 17,"manual": 0}},"ac_17_b": {"name": "AC-17(b)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_17_b","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Remote Access (AC-17)"}],"description": "Authorize each type of remote access to the system prior to allowing such connections.","checks_status": {"fail": 3,"pass": 6,"total": 17,"manual": 0}},"ac_24_1": {"name": "AC-24(1)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_24_1","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Access Control Decisions (AC-24)"}],"description": "Transmit [Assignment: organization-defined access authorization information] using [Assignment: organization-defined controls] to [Assignment: organization-defined systems] that enforce access control decisions.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"ac_3_10": {"name": "AC-3(10) Audited Override Of Access Control Mechanisms","checks": {"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_10","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Access Enforcement (AC-3)"}],"description": "Employ an audited override of automated access mechanisms under [Assignment: organization-defined conditions] by [Assignment: organization-defined roles].","checks_status": {"fail": 3,"pass": 1,"total": 8,"manual": 0}},"ac_3_13": {"name": "AC-3(13) Attribute-Based Access Control","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_13","Section": "Access Control (AC)","Service": "guarduty","SubGroup": null,"SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce attribute-based access control policy over defined subjects and objects and control access based upon [Assignment: organization-defined attributes to assume access permissions].","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_4_21": {"name": "AC-4(21) Physical Or Logical Separation Of Infomation Flows","checks": {"ec2_instance_public_ip": "FAIL","elbv2_waf_acl_attached": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"apigateway_restapi_waf_acl_attached": "FAIL","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_4_21","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Information Flow Enforcement (AC-4)"}],"description": "Separate information flows logically or physically using [Assignment: organization-defined mechanisms and/or techniques] to accomplish [Assignment: organization-defined required separations by types of information].","checks_status": {"fail": 5,"pass": 5,"total": 18,"manual": 0}},"ac_4_22": {"name": "AC-4(22) Access Only","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_4_22","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Information Flow Enforcement (AC-4)"}],"description": "Provide access from a single device to computing platforms, applications, or data residing in multiple different security domains, while preventing information flow between the different security domains.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"ac_4_26": {"name": "AC-4(26) Audit Filtering Actions","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_4_26","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Information Flow Enforcement (AC-4)"}],"description": "When transferring information between different security domains, record and audit content filtering actions and results for the information being filtered.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"ac_4_28": {"name": "AC-4(28) Linear Filter Pipelines","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_4_28","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Information Flow Enforcement (AC-4)"}],"description": "When transferring information between different security domains, implement a linear content filter pipeline that is enforced with discretionary and mandatory access controls.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_6_10": {"name": "AC-6(10)","checks": {"iam_no_root_access_key": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_6_10","Section": "Access Control (AC)","Service": "iam","SubGroup": null,"SubSection": "Least Privilege (AC-6)"}],"description": "Prevent non-privileged users from executing privileged functions.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"au_11_1": {"name": "AU-11(1) Long-Term Retrieval Capability","checks": {"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_11_1","Section": "Audit and Accountability (AU)","Service": "cloudwatch","SubGroup": null,"SubSection": "Audit Record Retention (AU-11)"}],"description": "Employ [Assignment: organization-defined measures] to ensure that long-term audit records generated by the system can be retrieved.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"au_12_1": {"name": "AU-12(1) System-Wide And Time-Correlated Audit Trial","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_12_1","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Generation (AU-12)"}],"description": "Compile audit records from [Assignment: organization-defined system components] into a system-wide (logical or physical) audit trail that is time-correlated to within [Assignment: organization-defined level of tolerance for the relationship between time stamps of individual records in the audit trail].","checks_status": {"fail": 7,"pass": 2,"total": 13,"manual": 0}},"au_12_2": {"name": "AU-12(2) Standardized Formats","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_12_2","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Generation (AU-12)"}],"description": "Produce a system-wide (logical or physical) audit trail composed of audit records in a standardized format.","checks_status": {"fail": 7,"pass": 2,"total": 13,"manual": 0}},"au_12_3": {"name": "AU-12(3) Changes By Authorized Individuals","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_12_3","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Generation (AU-12)"}],"description": "Provide and implement the capability for [Assignment: organization-defined individuals or roles] to change the logging to be performed on [Assignment: organization-defined system components] based on [Assignment: organization-defined selectable event criteria] within [Assignment: organization-defined time thresholds].","checks_status": {"fail": 8,"pass": 4,"total": 20,"manual": 0}},"au_12_4": {"name": "AU-12(4) Query Parameter Audits Of Personally Identifiable Information","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_12_4","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Generation (AU-12)"}],"description": "Provide and implement the capability for auditing the parameters of user query events for data sets containing personally identifiable information.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_12_a": {"name": "AU-12(a)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_12_a","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Generation (AU-12)"}],"description": "Provide audit record generation capability for the event types the system is capable of auditing as defined in AU-2a on [Assignment: organization-defined system components].","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_12_c": {"name": "AU-12(c)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_12_c","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Record Generation (AU-12)"}],"description": "Generate audit records for the event types defined in AU-2c that include the audit record content defined in AU-3.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_14_3": {"name": "AU-14(3) Remote Viewing And Listening","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_14_3","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Session Audit (AU-14)"}],"description": "Provide and implement the capability for authorized users to remotely view and hear content related to an established user session in real time.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au_14_a": {"name": "AU-14(a)","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_14_a","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Session Audit (AU-14)"}],"description": "Provide and implement the capability for [Assignment: organization-defined users or roles] to [Selection (one or more): record; view; hear; log] the content of a user session under [Assignment: organization-defined circumstances].","checks_status": {"fail": 8,"pass": 4,"total": 20,"manual": 0}},"au_14_b": {"name": "AU-14(b)","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au_14_b","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Session Audit (AU-14)"}],"description": "Develop, integrate, and use session auditing activities in consultation with legal counsel and in accordance with applicable laws, executive orders, directives, regulations, policies, standards, and guidelines.","checks_status": {"fail": 8,"pass": 4,"total": 20,"manual": 0}},"cm_12_b": {"name": "CM-12(b)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "cm_12_b","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Information Location (CM-12)"}],"description": "Identify and document the users who have access to the system and system components where the information is processed and stored.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"cp_10_2": {"name": "CP-10(2) Transaction Recovery","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_10_2","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "System Recovery And Reconstitution (CP-10)"}],"description": "Implement transaction recovery for systems that are transaction-based.","checks_status": {"fail": 2,"pass": 1,"total": 6,"manual": 0}},"pm_11_b": {"name": "PM-11(b)","checks": {"s3_bucket_object_versioning": "FAIL","s3_bucket_default_encryption": "PASS","s3_bucket_secure_transport_policy": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "pm_11_b","Section": "Program Management (PM)","Service": "aws","SubGroup": null,"SubSection": "Mission And Business Process Defination (PM-11)"}],"description": "Determine information protection and personally identifiable information processing needs arising from the defined mission and business processes.","checks_status": {"fail": 3,"pass": 1,"total": 4,"manual": 0}},"pm_14_b": {"name": "PM-14(b)","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "pm_14_b","Section": "Program Management (PM)","Service": "aws","SubGroup": null,"SubSection": "Testing, Training, And Monitoring (PM-14)"}],"description": "Review testing, training, and monitoring plans for consistency with the organizational risk management strategy and organization-wide priorities for risk response actions.","checks_status": {"fail": 8,"pass": 4,"total": 20,"manual": 0}},"pm_17_b": {"name": "PM-17(b)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_object_versioning": "FAIL","s3_bucket_secure_transport_policy": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "pm_17_b","Section": "Program Management (PM)","Service": "aws","SubGroup": null,"SubSection": "Protecting Controlled Unclassified Information On External Systems (PM-17)"}],"description": "Review and update the policy and procedures [Assignment: organization-defined frequency].","checks_status": {"fail": 5,"pass": 0,"total": 6,"manual": 0}},"pm_21_b": {"name": "PM-21(b)","checks": {"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "pm_21_b","Section": "Program Management (PM)","Service": "cloudwatch","SubGroup": null,"SubSection": "Accounting Of Disclosures (PM-21)"}],"description": "Retain the accounting of disclosures for the length of the time the personally identifiable information is maintained or five years after the disclosure is made, whichever is longer.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"ra_10_a": {"name": "RA-10(a)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ra_10_a","Section": "Risk Assessment (RA)","Service": "guarduty","SubGroup": null,"SubSection": "Threat Hunting (RA-10)"}],"description": "Establish and maintain a cyber threat hunting capability to: 1. Search for indicators of compromise in organizational systems; and 2. Detect, track, and disrupt threats that evade existings.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"sa_10_1": {"name": "SA-10(1) Software And Firmware Integrity Verification","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sa_10_1","Section": "System and Services Acquisition (SA)","Service": "kms","SubGroup": null,"SubSection": "Developer Configuration Management (SA-10)"}],"description": "Require the developer of the system, system component, or system service to enable integrity verification of software and firmware components.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"sc_12_2": {"name": "SC-12(2) Symmetric Keys","checks": {"kms_cmk_rotation_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_12_2","Section": "System and Communications Protection (SC)","Service": "kms","SubGroup": null,"SubSection": "Cryptographic Key Establishment And Management (SC-12)"}],"description": "Produce, control, and distribute symmetric cryptographic keys using [Selection: NIST FIPS-validated; NSA-approved] key management technology and processes.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"sc_12_6": {"name": "SC-12(6) Physical Control Of Keys","checks": {"kms_cmk_rotation_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_12_6","Section": "System and Communications Protection (SC)","Service": "kms","SubGroup": null,"SubSection": "Cryptographic Key Establishment And Management (SC-12)"}],"description": "Maintain physical control of cryptographic keys when stored information is encrypted by external service providers.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"sc_13_a": {"name": "SC-13(a)","checks": {"elb_ssl_listeners": "FAIL","ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_13_a","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Cryptographic Protection (SC-13)"}],"description": "Determine the [Assignment: organization-defined cryptographic uses].","checks_status": {"fail": 8,"pass": 3,"total": 18,"manual": 0}},"sc_16_1": {"name": "SC-16(1) Integrity Verification","checks": {"s3_bucket_object_versioning": "FAIL","s3_bucket_default_encryption": "PASS","s3_bucket_secure_transport_policy": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_16_1","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Transmission Of Security And Privacy Attributes (SC-16)"}],"description": "Verify the integrity of transmitted security and privacy attributes.","checks_status": {"fail": 3,"pass": 1,"total": 4,"manual": 0}},"sc_23_3": {"name": "SC-23(3) Unique System-Generated Session Identifiers","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_23_3","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Session Authenticity (SC-23)"}],"description": "Generate a unique session identifier for each session with [Assignment: organization-defined randomness requirements] and recognize only session identifiers that are system-generated.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"sc_23_5": {"name": "SC-23(5) Allowed Certificate Authorities","checks": {"elb_ssl_listeners": "FAIL","elbv2_insecure_ssl_ciphers": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_23_5","Section": "System and Communications Protection (SC)","Service": "elb","SubGroup": null,"SubSection": "Session Authenticity (SC-23)"}],"description": "Only allow the use of [Assignment: organization-defined certificate authorities] for verification of the establishment of protected sessions.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"sc_28_1": {"name": "SC-28(1) Cryptographic Protection","checks": {"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_28_1","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Protection Of Information At Rest (SC-28)"}],"description": "Implement cryptographic mechanisms to prevent unauthorized disclosure and modification of the following information at rest on [Assignment: organization-defined system components or media]: [Assignment: organization-defined information].","checks_status": {"fail": 5,"pass": 3,"total": 14,"manual": 0}},"sc_28_2": {"name": "SC-28(2) Offline Storage","checks": {"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_28_2","Section": "System and Communications Protection (SC)","Service": "cloudwatch","SubGroup": null,"SubSection": "Protection Of Information At Rest (SC-28)"}],"description": "Remove the following information from online storage and store offline in a secure location: [Assignment: organization-defined information].","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"sc_43_b": {"name": "SC-43(b)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_43_b","Section": "System and Communications Protection (SC)","Service": "guarduty","SubGroup": null,"SubSection": "Usage Restrictions (SC-43)"}],"description": "Authorize, monitor, and control the use of such components within the system.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"sc_7_11": {"name": "SC-7(11) Restrict Incoming communications Traffic","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_11","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Only allow incoming communications from [Assignment: organization-defined authorized sources] to be routed to [Assignment: organization-defined authorized destinations].","checks_status": {"fail": 3,"pass": 6,"total": 17,"manual": 0}},"sc_7_12": {"name": "SC-7(12) Host-Based Protection","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"acm_certificates_expiration_check": "PASS","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_12","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Implement [Assignment: organization-defined host-based boundary protection mechanisms] at [Assignment: organization-defined system components].","checks_status": {"fail": 3,"pass": 7,"total": 17,"manual": 0}},"sc_7_16": {"name": "SC-7(16) Prevent Discovery Of System Components","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"acm_certificates_expiration_check": "PASS","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_16","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Prevent the discovery of specific system components that represent a managed interface.","checks_status": {"fail": 3,"pass": 7,"total": 17,"manual": 0}},"sc_7_20": {"name": "SC-7(20) Prevent Discovery Of System Components","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_20","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Prevent the discovery of specific system components that represent a managed interface.","checks_status": {"fail": 1,"pass": 5,"total": 13,"manual": 0}},"sc_7_21": {"name": "SC-7(21) Isolation Of System Components","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_21","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Employ boundary protection mechanisms to isolate [Assignment: organization-defined system components] supporting [Assignment: organization-defined missions and/or business functions].","checks_status": {"fail": 3,"pass": 6,"total": 17,"manual": 0}},"sc_7_25": {"name": "SC-7(25) Unclassified National Security System Connections","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_25","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Prohibit the direct connection of [Assignment: organization-defined unclassified national security system] to an external network without the use of [Assignment: organization-defined boundary protection device].","checks_status": {"fail": 3,"pass": 5,"total": 15,"manual": 0}},"sc_7_26": {"name": "SC-7(26) Classified National Security System Connections","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_26","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Prohibit the direct connection of a classified national security system to an external network without the use of [Assignment: organization-defined boundary protection device].","checks_status": {"fail": 3,"pass": 5,"total": 15,"manual": 0}},"sc_7_27": {"name": "SC-7(27) Unclassified Non-National Security System Connections","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_27","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Prohibit the direct connection of [Assignment: organization-defined unclassified non-national security system] to an external network without the use of [Assignment: organization-defined boundary protection device].","checks_status": {"fail": 3,"pass": 5,"total": 15,"manual": 0}},"sc_7_28": {"name": "SC-7(28) Connections To Public Networks","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_28","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "Prohibit the direct connection of [Assignment: organization-defined system] to a public network.","checks_status": {"fail": 3,"pass": 5,"total": 15,"manual": 0}},"si_13_5": {"name": "SI-13(5) Failover Capability","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"rds_instance_deletion_protection": "FAIL","redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_13_5","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Predictable Failure Prevention (SI-13)"}],"description": "Provide [Selection: real-time; near real-time] [Assignment: organization-defined failover capability] for the system.","checks_status": {"fail": 4,"pass": 1,"total": 8,"manual": 0}},"si_19_4": {"name": "SI-19(4) Removal, Masking, Encryption, Hashing, Or Replacement Of Direct Identifiers","checks": {"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_19_4","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "De-Identification (SI-19)"}],"description": "Remove, mask, encrypt, hash, or replace direct identifiers in a dataset.","checks_status": {"fail": 4,"pass": 3,"total": 13,"manual": 0}},"si_4_10": {"name": "SI-4(10) Visibility Of Encrypted Communications","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_10","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Make provisions so that [Assignment: organization-defined encrypted communications traffic] is visible to [Assignment: organization-defined system monitoring tools and mechanisms].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_12": {"name": "SI-4(12) Automated Organization-Generated Alerts","checks": {"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_12","Section": "System and Information integrity (SI)","Service": "cloudwatch","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Alert [Assignment: organization-defined personnel or roles] using [Assignment: organization-defined automated mechanisms] when the following indications of inappropriate or unusual activities with security or privacy implications occur: [Assignment: organization-defined activities that trigger alerts].","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"si_4_14": {"name": "SI-4(14) Wireless Intrusion Detection","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_14","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Employ a wireless intrusion detection system to identify rogue wireless devices and to detect attack attempts and potential compromises or breaches to the system.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_17": {"name": "SI-4(17) Integrated Situational Awareness","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_4_17","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Correlate information from monitoring physical, cyber, and supply chain activities to achieve integrated, organization-wide situational awareness.","checks_status": {"fail": 7,"pass": 2,"total": 13,"manual": 0}},"si_4_20": {"name": "SI-4(20) Privileged Users","checks": {"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_4_20","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Implement the following additional monitoring of privileged users: [Assignment: organization-defined additional monitoring].","checks_status": {"fail": 3,"pass": 1,"total": 8,"manual": 0}},"si_4_23": {"name": "SI-4(23) Host-Based Devices","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_23","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Implement the following host-based monitoring mechanisms at [Assignment: organization-defined system components]: [Assignment: organization-defined host-based monitoring mechanisms].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_25": {"name": "SI-4(25) Optimize Network Traffic Analysis","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_25","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "System Monitoring (SI-4)"}],"description": "Provide visibility into network traffic at external and key internal system interfaces to optimize the effectiveness of monitoring devices.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ac_17_10": {"name": "AC-17(10) Authenticate Remote Commands","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_17_10","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Remote Access (AC-17)"}],"description": "Provide the capability to disconnect or disable remote access to the system within [Assignment: organization-defined time period].","checks_status": {"fail": 3,"pass": 6,"total": 17,"manual": 0}},"ac_2_3_a": {"name": "AC-2(3)(a)","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_3_a","Section": "Access Control (AC)","Service": "iam","SubGroup": "AC-2(3) Disable Accounts","SubSection": "Account Management (AC-2)"}],"description": "Support the management of system accounts using [Assignment: organization-defined automated mechanisms].","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"ac_2_3_b": {"name": "AC-2(3)(b)","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_3_b","Section": "Access Control (AC)","Service": "iam","SubGroup": "AC-2(3) Disable Accounts","SubSection": "Account Management (AC-2)"}],"description": "Disable accounts within [Assignment: organization-defined time period] when the accounts: (b) Are no longer associated with a user or individual.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"ac_2_3_c": {"name": "AC-2(3)(c)","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_3_c","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-2(3) Disable Accounts","SubSection": "Account Management (AC-2)"}],"description": "Disable accounts within [Assignment: organization-defined time period] when the accounts: (c) Are in violation of organizational policy.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"ac_2_3_d": {"name": "AC-2(3)(d)","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_3_d","Section": "Access Control (AC)","Service": "iam","SubGroup": "AC-2(3) Disable Accounts","SubSection": "Account Management (AC-2)"}],"description": "Disable accounts within [Assignment: organization-defined time period] when the accounts: (d) Have been inactive for [Assignment: organization-defined time period].","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"ac_2_d_1": {"name": "AC-2(d)(1)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_d_1","Section": "Access Control (AC)","Service": "iam","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "d. Specify: 1. Authorized users of the system;personnel termination and transfer processes.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ac_2_i_2": {"name": "AC-2(i)(2)","checks": {"iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_ยก_2","Section": "Access Control (AC)","Service": "iam","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "i. Authorize access to the system based on: 2. Intended system usage.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ac_3_3_a": {"name": "AC-3(3)(a)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_3_a","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(3) Mandatory Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined mandatory access policy] over the set of covered subjects and objects specified in the policy, and where the policy: (a) Is uniformly enforced across the covered subjects and objects within the system.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_3_c": {"name": "AC-3(3)(c)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_3_c","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(3) Mandatory Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined mandatory access policy] over the set of covered subjects and objects specified in the policy, and where the policy: (c) Specifies that [Assignment: organization-defined subjects] may explicitly be granted [Assignment: organization-defined privileges] such that they are not limited by any defined subset (or all) of the above constraints.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_4_a": {"name": "AC-3(4)(a)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_4_a","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(4) Discretionary Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined discretionary access policy] over the set of covered subjects and objects specified in the policy, and where the policy specifies that a subject that has been granted access to information can do one or more of the following: (a) Pass the information to any other subjects or objects.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_4_b": {"name": "AC-3(4)(b)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_4_b","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(4) Discretionary Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined discretionary access policy] over the set of covered subjects and objects specified in the policy, and where the policy specifies that a subject that has been granted access to information can do one or more of the following: (b) Grant its privileges to other subjects.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_4_c": {"name": "AC-3(4)(c)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_4_c","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(4) Discretionary Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined discretionary access policy] over the set of covered subjects and objects specified in the policy, and where the policy specifies that a subject that has been granted access to information can do one or more of the following: (c) Change security attributes on subjects, objects, the system, or the systemโ€™s components.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_4_d": {"name": "AC-3(4)(d)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_4_d","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(4) Discretionary Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined discretionary access policy] over the set of covered subjects and objects specified in the policy, and where the policy specifies that a subject that has been granted access to information can do one or more of the following: (d) Choose the security attributes to be associated with newly created or revised objects.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_4_e": {"name": "AC-3(4)(e)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_4_e","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(4) Discretionary Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined discretionary access policy] over the set of covered subjects and objects specified in the policy, and where the policy specifies that a subject that has been granted access to information can do one or more of the following: (e) Change the rules governing access.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_7_4_a": {"name": "AC-7(4)(a)","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_7_4_a","Section": "Access Control (AC)","Service": "iam","SubGroup": "AC-7(4) Use Of Alternate Authentication Factor","SubSection": "Unsuccessful Logon Attempts (AC-7)"}],"description": "Allow the use of [Assignment: organization-defined authentication factors] that are different from the primary authentication factors after the number of organization-defined consecutive invalid logon attempts have been exceeded.","checks_status": {"fail": 0,"pass": 0,"total": 5,"manual": 0}},"ca_7_4_c": {"name": "CA-7(4)(c)","checks": {"elbv2_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ca_7_4_c","Section": "Assessment, Authorization, And Monitoring (CA)","Service": "aws","SubGroup": "CA-7(4) Risk Monitoring","SubSection": "Continuous Monitoring (CA-7)"}],"description": "Ensure risk monitoring is an integral part of the continuous monitoring strategy that includes the following: (c) Change monitoring.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cm_2_b_1": {"name": "CM-2(b)(1)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null,"ec2_instance_older_than_specific_days": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_2_b_1","Section": "Configuration Management (CM)","Service": "aws","SubGroup": "CM-2(b)","SubSection": "Baseline Configuration (CM-2)"}],"description": "Review and update the baseline configuration of the system: 1. [Assignment: organization-defined frequency].","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"cm_2_b_2": {"name": "CM-2(b)(2)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null,"ec2_instance_older_than_specific_days": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_2_b_2","Section": "Configuration Management (CM)","Service": "aws","SubGroup": "CM-2(b)","SubSection": "Baseline Configuration (CM-2)"}],"description": "Review and update the baseline configuration of the system: 2. When required due to [Assignment: organization-defined circumstances].","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"cm_2_b_3": {"name": "CM-2(b)(3)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null,"ec2_instance_older_than_specific_days": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_2_b_3","Section": "Configuration Management (CM)","Service": "aws","SubGroup": "CM-2(b)","SubSection": "Baseline Configuration (CM-2)"}],"description": "Review and update the baseline configuration of the system: 3 When system components are installed or upgraded.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"cm_5_1_a": {"name": "CM-5(1)(a)","checks": {"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","ec2_instance_profile_attached": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_5_1_a","Section": "Configuration Management (CM)","Service": "aws","SubGroup": "CM-5(1) Automated Access Enforcement And Audit Records","SubSection": "Access Restrictions For Change (CM-5)"}],"description": "Enforce access restrictions using [Assignment: organization-defined automated mechanisms].","checks_status": {"fail": 1,"pass": 2,"total": 15,"manual": 0}},"cm_5_1_b": {"name": "CM-5(1)(b)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_5_1_b","Section": "Configuration Management (CM)","Service": "aws","SubGroup": "CM-5(1) Automated Access Enforcement And Audit Records","SubSection": "Access Restrictions For Change (CM-5)"}],"description": "Automatically generate audit records of the enforcement actions.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"cm_8_3_a": {"name": "CM-8(3)(a)","checks": {"guardduty_is_enabled": "PASS","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_3_a","Section": "Configuration Management (CM)","Service": "aws","SubGroup": "CM-8(3) Automated Unauthorized Component Detection","SubSection": "System Component Inventory (CM-8)"}],"description": "Detect the presence of unauthorized hardware, software, and firmware components within the system using [Assignment: organization-defined automated mechanisms] [Assignment: organization-defined frequency].","checks_status": {"fail": 2,"pass": 1,"total": 4,"manual": 0}},"cm_8_a_1": {"name": "CM-8(a)(1)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_a_1","Section": "Configuration Management (CM)","Service": "aws","SubGroup": "CM-8(a)","SubSection": "System Component Inventory (CM-8)"}],"description": "Develop and document an inventory of system components that: 1. Accurately reflects the system.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cm_8_a_2": {"name": "CM-8(a)(2)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_a_2","Section": "Configuration Management (CM)","Service": "aws","SubGroup": "CM-8(a)","SubSection": "System Component Inventory (CM-8)"}],"description": "Develop and document an inventory of system components that: 2. Includes all components within the system.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cm_8_a_3": {"name": "CM-8(a)(3)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_a_3","Section": "Configuration Management (CM)","Service": "aws","SubGroup": "CM-8(a)","SubSection": "System Component Inventory (CM-8)"}],"description": "Develop and document an inventory of system components that: 3. Does not include duplicate accounting of components or components assigned to any other system.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cm_8_a_4": {"name": "CM-8(a)(4)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_a_4","Section": "Configuration Management (CM)","Service": "aws","SubGroup": "CM-8(a)","SubSection": "System Component Inventory (CM-8)"}],"description": "Develop and document an inventory of system components that: 4. Is at the level of granularity deemed necessary for tracking and reporting.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cm_8_a_5": {"name": "CM-8(a)(5)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm_8_a_5","Section": "Configuration Management (CM)","Service": "aws","SubGroup": "CM-8(a)","SubSection": "System Component Inventory (CM-8)"}],"description": "Develop and document an inventory of system components that: 5. Includes the following information to achieve system component accountability: [Assignment: organization-defined information deemed necessary to achieve effective system component accountability].","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cp_1_a_2": {"name": "CP-1(a)(2)","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_1_a_2","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": "CP-1(a)","SubSection": "Policy And Procedures (CP-1)"}],"description": "a. Develop, document, and disseminate to [Assignment: organization-defined personnel or roles]: 2. Procedures to facilitate the implementation of the contingency planning policy and the associated contingency planning controls.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"cp_2_a_6": {"name": "CP-2(a)(6)","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_2_a_6","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": "CP-2(a)","SubSection": "Contingency Plan (CP-2)"}],"description": "Develop a contingency plan for the system that: 6. Addresses the sharing of contingency information.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"cp_2_a_7": {"name": "CP-2(a)(7)","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_2_a_7","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": "CP-2(a)","SubSection": "Contingency Plan (CP-2)"}],"description": "Develop a contingency plan for the system that: 7. Is reviewed and approved by [Assignment: organization-defined personnel or roles].","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"ia_2_6_a": {"name": "IA-2(6)(a)","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_2_6_a","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": "IA-2(6) Acces To Accounts โ€” Separate Device","SubSection": "Identification and Authentication (Organizational users) (IA-2)"}],"description": "Implement multi-factor authentication for [Selection (one or more): local; network; remote] access to [Selection (one or more): privileged accounts; non-privileged accounts] such that: (a) One of the factors is provided by a device separate from the system gaining access.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ia_3_3_b": {"name": "IA-3(3)(b)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ia_3_3_b","Section": "Identification and Authentication (IA)","Service": "aws","SubGroup": "IA-3(3) Dynamic Address Allocation","SubSection": "Device Identification And Authentication (IA-3)"}],"description": "Audit lease information when assigned to a device.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"ia_5_1_c": {"name": "IA-5(1)(c)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ia_5_1_c","Section": "Identification and Authentication (IA)","Service": "aws","SubGroup": "IA-5(1) Password-Based Authentication","SubSection": "Authenticator Management (IA-5)"}],"description": "For password-based authentication: (c) Transmit passwords only over cryptographically-protected channels.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"ia_5_1_f": {"name": "IA-5(1)(f)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_1_f","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": "IA-5(1) Password-Based Authentication","SubSection": "Authenticator Management (IA-5)"}],"description": "For password-based authentication: (f) Allow user selection of long passwords and passphrases, including spaces and all printable characters.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5_1_g": {"name": "IA-5(1)(g)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_1_g","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": "IA-5(1) Password-Based Authentication","SubSection": "Authenticator Management (IA-5)"}],"description": "For password-based authentication: (g) Employ automated tools to assist the user in selecting strong password authenticators.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5_1_h": {"name": "IA-5(1)(h)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_1_h","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": "IA-5(1) Password-Based Authentication","SubSection": "Authenticator Management (IA-5)"}],"description": "For password-based authentication: (h) Enforce the following composition and complexity rules: [Assignment: organization-defined composition and complexity rules].","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_8_2_b": {"name": "IA-8(2)(b)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_8_2_b","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": "IA-8(2) Acceptance Of External Authenticators","SubSection": "Identification And Authentication (Non-Organizational Users) (IA-8)"}],"description": "Document and maintain a list of accepted external authenticators.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ma_4_1_a": {"name": "MA-4(1)(a)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ma_4_1_a","Section": "Maintenance (MA)","Service": "aws","SubGroup": "MA-4(1) Logging And Review","SubSection": "Nonlocal Maintenance (MA-4)"}],"description": "Log [Assignment: organization-defined audit events] for nonlocal maintenance and diagnostic sessions.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"ra_1_a_1": {"name": "RA-1(a)(1)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ra_1_a_1","Section": "Risk Assessment (RA)","Service": "guarduty","SubGroup": "RA-1(a)","SubSection": "Policy And Procedures (RA-1)"}],"description": "Establish and maintain a cyber threat hunting capability to: 1. Search for indicators of compromise in organizational systems.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ra_1_a_2": {"name": "RA-1(a)(2)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ra_1_a_2","Section": "Risk Assessment (RA)","Service": "guarduty","SubGroup": "RA-1(a)","SubSection": "Policy And Procedures (RA-1)"}],"description": "a. Establish and maintain a cyber threat hunting capability to: 2. Detect, track, and disrupt threats that evade existing controls.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ra_3_a_1": {"name": "RA-3(a)(1)","checks": {"guardduty_is_enabled": "PASS","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ra_3_a_1","Section": "Risk Assessment (RA)","Service": "guarduty","SubGroup": null,"SubSection": "Risk Assessment (RA-3)"}],"description": "a. Conduct a risk assessment, including: 1. Identifying threats to and vulnerabilities in the system.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"sc_5_3_a": {"name": "SC-5(3)(a)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_5_3_a","Section": "System and Communications Protection (SC)","Service": "guarduty","SubGroup": "SC-5(3) Detection And Monitoring","SubSection": "Denial Of Service Protection (SC-5)"}],"description": "Employ the following monitoring tools to detect indicators of denial-of-service attacks against, or launched from, the system: [Assignment: organization-defined monitoring tools].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"sc_5_3_b": {"name": "SC-5(3)(b)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc_5_3_b","Section": "System and Communications Protection (SC)","Service": "guarduty","SubGroup": "SC-5(3) Detection And Monitoring","SubSection": "Denial Of Service Protection (SC-5)"}],"description": "Monitor the following system resources to determine if sufficient resources exist to prevent effective denial-of-service attacks: [Assignment: organization-defined system resources].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"sc_7_4_b": {"name": "SC-7(4)(b)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_4_b","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": "SC-7(4) External Telecommunications Services","SubSection": "Boundary Protection (SC-7)"}],"description": "Establish a traffic flow policy for each managed interface.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"sc_7_4_g": {"name": "SC-7(4)(g)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_4_g","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": "SC-7(4) External Telecommunications Services","SubSection": "Boundary Protection (SC-7)"}],"description": "Publish information to enable remote networks to detect unauthorized control plane traffic from internal networks.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"sc_7_9_a": {"name": "SC-7(9)(a)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_9_a","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": "SC-7(9) Restrict Threatening Outgoing Communications Traffic","SubSection": "Boundary Protection (SC-7)"}],"description": "Detect and deny outgoing communications traffic posing a threat to external systems.","checks_status": {"fail": 1,"pass": 5,"total": 13,"manual": 0}},"sc_7_9_b": {"name": "SC-7(9)(b)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_9_b","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": "SC-7(9) Restrict Threatening Outgoing Communications Traffic","SubSection": "Boundary Protection (SC-7)"}],"description": "Audit the identity of internal users associated with denied communications.","checks_status": {"fail": 5,"pass": 2,"total": 11,"manual": 0}},"si_1_1_c": {"name": "SI-1(1)(c)","checks": {"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_1_1_c","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Policy And Procedures (SI-1)"}],"description": "Audit the use of the manual override capability.","checks_status": {"fail": 3,"pass": 1,"total": 8,"manual": 0}},"si_1_a_2": {"name": "SI-1(a)(2)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_object_versioning": "FAIL","s3_bucket_secure_transport_policy": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_1_a_2","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Policy And Procedures (SI-1)"}],"description": "a. Develop, document, and disseminate to [Assignment: organization-defined personnel or roles]: 2. Procedures to facilitate the implementation of the system and information integrity policy and the associated system and information integrity controls;.","checks_status": {"fail": 5,"pass": 0,"total": 6,"manual": 0}},"si_3_8_a": {"name": "SI-3(8)(a)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_3_8_a","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": "SI-3(8) Detect Unauthorized Commands","SubSection": "Malicious Code Protection (SI-3)"}],"description": "Detect the following unauthorized operating system commands through the kernel application programming interface on [Assignment: organization-defined system hardware components]: [Assignment: organization-defined unauthorized operating system commands].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_3_8_b": {"name": "SI-3(8)(b)","checks": {"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_3_8_b","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": "SI-3(8) Detect Unauthorized Commands","SubSection": "Malicious Code Protection (SI-3)"}],"description": "[Selection (one or more): issue a warning; audit the command execution; prevent the execution of the command].","checks_status": {"fail": 3,"pass": 1,"total": 8,"manual": 0}},"si_3_c_2": {"name": "SI-3(c)(2)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_3_c_2","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Malicious Code Protection (SI-3)"}],"description": "c. Configure malicious code protection mechanisms to: 2. [Selection (one or more): block malicious code; quarantine malicious code; take [Assignment: organization-defined action]]; and send alert to [Assignment: organization-defined personnel or roles] in response to malicious code detection.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"si_4_4_a": {"name": "SI-4(4)(a)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_4_a","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": "SI-4(4) Inbound and Outbound Communications Traffic","SubSection": "System Monitoring (SI-4)"}],"description": "Determine criteria for unusual or unauthorized activities or conditions for inbound and outbound communications traffic.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_4_b": {"name": "SI-4(4)(b)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_4_b","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": "SI-4(4) Inbound and Outbound Communications Traffic","SubSection": "System Monitoring (SI-4)"}],"description": "Monitor inbound and outbound communications traffic [Assignment: organization-defined frequency] for [Assignment: organization-defined unusual or unauthorized activities or conditions].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_a_1": {"name": "SI-4(a)(1)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_a_1","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": "SI-4(a)","SubSection": "System Monitoring (SI-4)"}],"description": "a. Monitor the system to detect: 1. Attacks and indicators of potential attacks in accordance with the following monitoring objectives: [Assignment: organization-defined monitoring objectives]; and 2. Unauthorized local, network, and remote connections.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si_4_a_2": {"name": "SI-4(a)(2)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_a_2","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": "SI-4(a)","SubSection": "System Monitoring (SI-4)"}],"description": "a. Monitor the system to detect: 2. Unauthorized local, network, and remote connections.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ac_17_4_a": {"name": "AC-17(4)(a)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_17_4_a","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-17(4) Privileged Commands And Access","SubSection": "Remote Access (AC-17)"}],"description": "Authorize the execution of privileged commands and access to security-relevant information via remote access only in a format that provides assessable evidence and for the following needs: [Assignment: organization-defined needs];","checks_status": {"fail": 3,"pass": 5,"total": 16,"manual": 0}},"ac_2_12_a": {"name": "AC-2(12)(a)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_2_12_a","Section": "Access Control (AC)","Service": "guarduty","SubGroup": "AC-2(12) Account Monitoring","SubSection": "Account Management (AC-2)"}],"description": "Monitor system accounts for [Assignment: organization-defined atypical usage].","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ac_3_12_a": {"name": "AC-3(12)(a)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_12_a","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(12)Assert And Enforce Application Access","SubSection": "Access Enforcement (AC-3)"}],"description": "Require applications to assert, as part of the installation process, the access needed to the following system applications and functions: [Assignment: organization-defined system applications and functions].","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_12_b": {"name": "AC-3(12)(b)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac_3_12_b","Section": "Access Control (AC)","Service": "guarduty","SubGroup": "AC-3(12) Assert And Enforce Application Access","SubSection": "Access Enforcement (AC-3)"}],"description": "Provide an enforcement mechanism to prevent unauthorized access;","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ac_3_15_a": {"name": "AC-3(15)(a)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_15_a","Section": "Access Control (AC)","Service": "guarduty","SubGroup": "AC-3(15) Discretionary And Mandatory Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined mandatory access control policy] over the set of covered subjects and objects specified in the policy.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_15_b": {"name": "AC-3(15)(b)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_15_b","Section": "Access Control (AC)","Service": "guarduty","SubGroup": "AC-3(15) Discretionary And Mandatory Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined discretionary access control policy] over the set of covered subjects and objects specified in the policy.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ia_5_18_a": {"name": "IA-5(18)(a)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_18_a","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": "IA-5(18) Password Managers","SubSection": "Authenticator Management (IA-5)"}],"description": "Employ [Assignment: organization-defined password managers] to generate and manage passwords.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia_5_18_b": {"name": "IA-5(18)(b)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia_5_18_b","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": "IA-5(18) Password Managers","SubSection": "Authenticator Management (IA-5)"}],"description": "Protect the passwords using [Assignment: organization-defined controls].","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"pm_14_a_1": {"name": "PM-14(a)(1)","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "pm_14_a_1","Section": "Program Management (PM)","Service": "aws","SubGroup": null,"SubSection": "Testing, Training, And Monitoring (PM-14)"}],"description": "a. Implement a process for ensuring that organizational plans for conducting security and privacy testing, training, and monitoring activities associated with organizational systems: 1. Are developed and maintained.","checks_status": {"fail": 8,"pass": 4,"total": 20,"manual": 0}},"ra_10_a_1": {"name": "RA-10(a)(1)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ra_10_a_1","Section": "Risk Assessment (RA)","Service": "guarduty","SubGroup": "RA-10(a)","SubSection": "Threat Hunting (RA-10)"}],"description": "Establish and maintain a cyber threat hunting capability to: 1. Search for indicators of compromise in organizational systems; and 2. Detect, track, and disrupt threats that evade existings.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ra_10_a_2": {"name": "RA-10(a)(2)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ra_10_a_2","Section": "Risk Assessment (RA)","Service": "guarduty","SubGroup": "RA-10(a)","SubSection": "Threat Hunting (RA-10)"}],"description": "a. Establish and maintain a cyber threat hunting capability to: 2. Detect, track, and disrupt threats that evade existings.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"sa_15_a_4": {"name": "SA-15(a)(4)","checks": {"elbv2_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sa_15_a_4","Section": "System and Services Acquisition (SA)","Service": "aws","SubGroup": null,"SubSection": "Development Process, Standards, And Tools (SA-15)"}],"description": "a. Require the developer of the system, system component, or system service to follow a documented development process that: 4. Documents, manages, and ensures the integrity of changes to the process and/or tools used in development.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"sc_36_1_a": {"name": "SC-36(1)(a)","checks": {"rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_36_1_a","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Distributed Processing And Storage (SC-36)"}],"description": "Employ polling techniques to identify potential faults, errors, or compromises to the following processing and storage components: [Assignment: organization-defined distributed processing and storage components].","checks_status": {"fail": 1,"pass": 0,"total": 5,"manual": 0}},"sc_7_24_b": {"name": "SC-7(24)(b)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc_7_24_b","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": "SC-7(24) Personally Identifiable Information","SubSection": "Boundary Protection (SC-7)"}],"description": "For systems that process personally identifiable information: (b) Monitor for permitted processing at the external interfaces to the system and at key internal boundaries within the system.","checks_status": {"fail": 3,"pass": 6,"total": 17,"manual": 0}},"si_10_1_c": {"name": "SI-10(1)(c)","checks": {"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si_10_1_c","Section": "System and Information integrity (SI)","Service": "aws","SubGroup": "SI-10(1) Manual Override Capability","SubSection": "Information Input Validation (SI-10)"}],"description": "Audit the use of the manual override capability.","checks_status": {"fail": 3,"pass": 1,"total": 8,"manual": 0}},"si_4_13_a": {"name": "SI-4(13)(a)","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si_4_13_a","Section": "System and Information integrity (SI)","Service": "guarduty","SubGroup": "SI-4(13) Analyze Traffic And Event Patterns","SubSection": "System Monitoring (SI-4)"}],"description": "Analyze communications traffic and event patterns for the system.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ac_3_3_b_1": {"name": "AC-3(3)(b)(1)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_3_b_1","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(3) Mandatory Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined mandatory access policy] over the set of covered subjects and objects specified in the policy, and where the policy: (b) Specifies that a subject that has been granted access to information is constrained from doing any of the following; (1) Passing the information to unauthorized subjects or objects.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_3_b_2": {"name": "AC-3(3)(b)(2)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_3_b_2","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(3) Mandatory Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined mandatory access policy] over the set of covered subjects and objects specified in the policy, and where the policy: (b) Specifies that a subject that has been granted access to information is constrained from doing any of the following; (2) Granting its privileges to other subjects.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_3_b_3": {"name": "AC-3(3)(b)(3)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_3_b_3","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(3) Mandatory Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined mandatory access policy] over the set of covered subjects and objects specified in the policy, and where the policy: (b) Specifies that a subject that has been granted access to information is constrained from doing any of the following; (3) Changing one or more security attributes (specified by the policy) on subjects, objects, the system, or system components.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_3_b_4": {"name": "AC-3(3)(b)(4)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_3_b_4","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(3) Mandatory Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined mandatory access policy] over the set of covered subjects and objects specified in the policy, and where the policy: (b) Specifies that a subject that has been granted access to information is constrained from doing any of the following; (4) Choosing the security attributes and attribute values (specified by the policy) to be associated with newly created or modified objects.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"ac_3_3_b_5": {"name": "AC-3(3)(b)(5)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac_3_3_b_5","Section": "Access Control (AC)","Service": "aws","SubGroup": "AC-3(3) Mandatory Access Control","SubSection": "Access Enforcement (AC-3)"}],"description": "Enforce [Assignment: organization-defined mandatory access policy] over the set of covered subjects and objects specified in the policy, and where the policy: (b) Specifies that a subject that has been granted access to information is constrained from doing any of the following; (5) Changing the rules governing access.","checks_status": {"fail": 1,"pass": 1,"total": 15,"manual": 0}},"cp_1_a_1_b": {"name": "CP-1(a)(1)(b)","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp_1_a_1_b","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": "CP-1(a)","SubSection": "Policy And Procedures (CP-1)"}],"description": "a. Develop, document, and disseminate to [Assignment: organization-defined personnel or roles]: 1. [Selection (one or more): Organization-level; Mission/business process-level; System-level] contingency planning policy that: (b) Is consistent with applicable laws, executive orders, directives, regulations, policies, standards, and guidelines.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}}},"requirements_passed": 86,"requirements_failed": 202,"requirements_manual": 0,"total_requirements": 288,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "c7fda251-1b8b-4668-be6e-6929da58d6af","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "rbi_cyber_security_framework_aws","framework": "RBI-Cyber-Security-Framework","version": "","description": "The Reserve Bank had prescribed a set of baseline cyber security controls for primary (Urban) cooperative banks (UCBs) in October 2018. On further examination, it has been decided to prescribe a comprehensive cyber security framework for the UCBs, as a graded approach, based on their digital depth and interconnectedness with the payment systems landscape, digital products offered by them and assessment of cyber security risk. The framework would mandate implementation of progressively stronger security measures based on the nature, variety and scale of digital product offerings of banks.","region": "eu-west-1","requirements": {"annex_i_6": {"name": "Annex I (6)","checks": {"ssm_managed_compliant_patching": "FAIL","guardduty_no_high_severity_findings": "FAIL","redshift_cluster_automatic_upgrades": null,"rds_instance_minor_version_upgrade_enabled": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "annex_i_6","Section": null,"Service": "aws","SubGroup": null,"SubSection": null}],"description": "Put in place systems and processes to identify, track, manage and monitor the status of patches to servers, operating system and application software running at the systems used by the UCB officials (end-users). Implement and update antivirus protection for all servers and applicable end points preferably through a centralised system.","checks_status": {"fail": 2,"pass": 1,"total": 6,"manual": 0}},"annex_i_12": {"name": "Annex I (12)","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "annex_i_12","Section": null,"Service": "aws","SubGroup": null,"SubSection": null}],"description": "Take periodic back up of the important data and store this data โ€˜off lineโ€™ (i.e., transferring important files to a storage device that can be detached from a computer/system after copying all the files).","checks_status": {"fail": 2,"pass": 1,"total": 5,"manual": 0}},"annex_i_1_1": {"name": "Annex I (1.1)","checks": {"ec2_instance_managed_by_ssm": "FAIL","organizations_account_part_of_organizations": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "annex_i_1_1","Section": null,"Service": "aws","SubGroup": null,"SubSection": null}],"description": "UCBs should maintain an up-to-date business IT Asset Inventory Register containing the following fields, as a minimum: a) Details of the IT Asset (viz., hardware/software/network devices, key personnel, services, etc.), b. Details of systems where customer data are stored, c. Associated business applications, if any, d. Criticality of the IT asset (For example, High/Medium/Low).","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"annex_i_1_3": {"name": "Annex I (1.3)","checks": {"elb_ssl_listeners": "FAIL","elbv2_ssl_listeners": "FAIL","ec2_instance_public_ip": "FAIL","backup_vaults_encrypted": "PASS","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"kms_cmk_rotation_enabled": null,"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","elbv2_insecure_ssl_ciphers": "PASS","rds_snapshots_public_access": "PASS","ssm_documents_set_as_public": "PASS","s3_bucket_default_encryption": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"redshift_cluster_public_access": null,"acm_certificates_expiration_check": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","vpc_subnet_no_public_ip_by_default": "FAIL","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"sns_topics_kms_encryption_at_rest_enabled": "FAIL","awslambda_function_not_publicly_accessible": "PASS","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"opensearch_service_domains_https_communications_enforced": null,"opensearch_service_domains_node_to_node_encryption_enabled": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "annex_i_1_3","Section": null,"Service": "aws","SubGroup": null,"SubSection": null}],"description": "Appropriately manage and provide protection within and outside UCB/network, keeping in mind how the data/information is stored, transmitted, processed, accessed and put to use within/outside the UCBโ€™s network, and level of risk they are exposed to depending on the sensitivity of the data/information.","checks_status": {"fail": 12,"pass": 15,"total": 40,"manual": 0}},"annex_i_5_1": {"name": "Annex I (5.1)","checks": {"elbv2_waf_acl_attached": "FAIL","apigateway_restapi_waf_acl_attached": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "annex_i_5_1","Section": null,"Service": "aws","SubGroup": null,"SubSection": null}],"description": "The firewall configurations should be set to the highest security level and evaluation of critical device (such as firewall, network switches, security devices, etc.) configurations should be done periodically.","checks_status": {"fail": 4,"pass": 1,"total": 5,"manual": 0}},"annex_i_7_1": {"name": "Annex I (7.1)","checks": {"iam_no_root_access_key": null,"iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "annex_i_7_1","Section": null,"Service": "iam","SubGroup": null,"SubSection": null}],"description": "Disallow administrative rights on end-user workstations/PCs/laptops and provide access rights on a โ€˜need to knowโ€™ and โ€˜need to doโ€™ basis.","checks_status": {"fail": 0,"pass": 0,"total": 5,"manual": 0}},"annex_i_7_2": {"name": "Annex I (7.2)","checks": {"iam_password_policy_reuse_24": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "annex_i_7_2","Section": null,"Service": "iam","SubGroup": null,"SubSection": null}],"description": "Passwords should be set as complex and lengthy and users should not use same passwords for all the applications/systems/devices.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"annex_i_7_3": {"name": "Annex I (7.3)","checks": {"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "annex_i_7_3","Section": null,"Service": "vpc","SubGroup": null,"SubSection": null}],"description": "Remote Desktop Protocol (RDP) which allows others to access the computer remotely over a network or over the internet should be always disabled and should be enabled only with the approval of the authorised officer of the UCB. Logs for such remote access shall be enabled and monitored for suspicious activities.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"annex_i_7_4": {"name": "Annex I (7.4)","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","opensearch_service_domains_audit_logging_enabled": null,"opensearch_service_domains_cloudwatch_logging_enabled": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "annex_i_7_4","Section": null,"Service": "aws","SubGroup": null,"SubSection": null}],"description": "Implement appropriate (e.g. centralised) systems and controls to allow, manage, log and monitor privileged/super user/administrative access to critical systems (servers/databases, applications, network devices etc.)","checks_status": {"fail": 7,"pass": 3,"total": 15,"manual": 0}}},"requirements_passed": 3,"requirements_failed": 6,"requirements_manual": 0,"total_requirements": 9,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "c9352bc9-2107-40a5-8dc6-c67817863253","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "fedramp_moderate_revision_4_aws","framework": "FedRamp-Moderate-Revision-4","version": "","description": "The Federal Risk and Authorization Management Program (FedRAMP) was established in 2011. It provides a cost-effective, risk-based approach for the adoption and use of cloud services by the U.S. federal government. FedRAMP empowers federal agencies to use modern cloud technologies, with an emphasis on the security and protection of federal information.","region": "eu-west-1","requirements": {"ac-3": {"name": "Access Enforcement (AC-3)","checks": {"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac-3","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system enforces approved authorizations for logical access to information and system resources in accordance with applicable access control policies.","checks_status": {"fail": 1,"pass": 6,"total": 21,"manual": 0}},"ac-4": {"name": "Information Flow Enforcement (AC-4)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"acm_certificates_expiration_check": "PASS","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac-4","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system enforces approved authorizations for controlling the flow of information within the system and between interconnected systems based on organization-defined information flow control policies.","checks_status": {"fail": 3,"pass": 7,"total": 16,"manual": 0}},"ac-6": {"name": "Least Privilege (AC-6)","checks": {"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac-6","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization employs the principle of least privilege, allowing only authorized accesses for users (or processes acting on behalf of users) which are necessary to accomplish assigned tasks in accordance with organizational missions and business functions.","checks_status": {"fail": 1,"pass": 6,"total": 20,"manual": 0}},"au-3": {"name": "Content of Audit Records (AU-3)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au-3","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system generates audit records containing information that establishes what type of event occurred, when the event occurred, where the event occurred, the source of the event, the outcome of the event, and the identity of any individuals or subjects associated with the event.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au-9": {"name": "Protection of Audit Information (AU-9)","checks": {"cloudtrail_kms_encryption_enabled": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL","cloudwatch_log_group_kms_encryption_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au-9","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system protects audit information and audit tools from unauthorized access, modification, and deletion.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"cm-2": {"name": "Baseline Configuration (CM-2)","checks": {"ec2_instance_public_ip": "FAIL","elbv2_waf_acl_attached": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"ec2_instance_managed_by_ssm": "FAIL","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"ssm_managed_compliant_patching": "FAIL","apigateway_restapi_waf_acl_attached": "FAIL","s3_bucket_policy_public_write_access": "PASS","ec2_instance_older_than_specific_days": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm-2","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization develops, documents, and maintains under configuration control, a current baseline configuration of the information system.","checks_status": {"fail": 8,"pass": 6,"total": 22,"manual": 0}},"ia-2": {"name": "Identification and Authentication (Organizational users) (IA-2)","checks": {"iam_no_root_access_key": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia-2","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": null}],"description": "The information system uniquely identifies and authenticates organizational users (or processes acting on behalf of organizational users).","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"ra-5": {"name": "Vulnerability Scanning (RA-5)","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ra-5","Section": "Risk Assessment (RA)","Service": "guarduty","SubGroup": null,"SubSection": null}],"description": "Scan for system vulnerabilities. Share vulnerability information and security controls that eliminate vulnerabilities.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"sc-2": {"name": "Application Partitioning (SC-2)","checks": {"iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc-2","Section": "System and Communications Protection (SC)","Service": "iam","SubGroup": null,"SubSection": null}],"description": "The information system separates user functionality (including user interface services) from information system management functionality.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"sc-4": {"name": "Information In Shared Resources (SC-4)","checks": {"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","s3_account_level_public_access_blocks": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-4","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system prevents unauthorized and unintended information transfer via shared system resources.","checks_status": {"fail": 2,"pass": 5,"total": 14,"manual": 0}},"sc-5": {"name": "Denial Of Service Protection (SC-5)","checks": {"guardduty_is_enabled": "PASS","rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"rds_instance_deletion_protection": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-5","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system protects against or limits the effects of the following types of denial of service attacks: [Assignment: organization-defined types of denial of service attacks or references to sources for such information] by employing [Assignment: organization-defined security safeguards].","checks_status": {"fail": 4,"pass": 1,"total": 6,"manual": 0}},"sc-7": {"name": "Boundary Protection (SC-7)","checks": {"elb_ssl_listeners": "FAIL","ec2_instance_public_ip": "FAIL","elbv2_waf_acl_attached": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_secure_transport_policy": "FAIL","s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-7","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system: a. Monitors and controls communications at the external boundary of the system and at key internal boundaries within the system; b. Implements subnetworks for publicly accessible system components that are [Selection: physically; logically] separated from internal organizational networks; and c. Connects to external networks or information systems only through managed interfaces consisting of boundary protection devices arranged in accordance with an organizational security architecture.","checks_status": {"fail": 6,"pass": 6,"total": 21,"manual": 0}},"sc-8": {"name": "Transmission Integrity (SC-8)","checks": {"elb_ssl_listeners": "FAIL","elbv2_insecure_ssl_ciphers": "PASS","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-8","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system protects the confidentiality AND integrity of transmitted information.","checks_status": {"fail": 3,"pass": 1,"total": 5,"manual": 0}},"si-7": {"name": "Software, Firmware, and Information Integrity (SI-7)","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si-7","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization employs integrity verification tools to detect unauthorized changes to [Assignment: organization-defined software, firmware, and information].","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"au-11": {"name": "Audit Record Retention (AU-11)","checks": {"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au-11","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization retains audit records for at least 90 days to provide support for after-the-fact investigations of security incidents and to meet regulatory and organizational information retention requirements.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"cp-10": {"name": "Information System Recovery And Reconstitution (CP-10)","checks": {"rds_instance_multi_az": "FAIL","efs_have_backup_enabled": "FAIL","elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp-10","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization provides for the recovery and reconstitution of the information system to a known state after a disruption, compromise, or failure.","checks_status": {"fail": 4,"pass": 1,"total": 9,"manual": 0}},"sa-10": {"name": "Developer Configuration Management (SA-10)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_managed_by_ssm": "FAIL","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sa-10","Section": "System and Services Acquisition (SA)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization requires the developer of the information system, system component, or information system service to: a. Perform configuration management during system, component, or service [Selection (one or more): design; development; implementation; operation]; b. Document, manage, and control the integrity of changes to [Assignment: organization-defined configuration items under configuration management]; c. Implement only organization-approved changes to the system, component, or service; d. Document approved changes to the system, component, or service and the potential security impacts of such changes; and e. Track security flaws and flaw resolution within the system, component, or service and report findings to [Assignment: organization-defined personnel].","checks_status": {"fail": 2,"pass": 2,"total": 4,"manual": 0}},"sc-12": {"name": "Cryptographic Key Establishment And Management (SC-12)","checks": {"kms_cmk_rotation_enabled": null,"acm_certificates_expiration_check": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "sc-12","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization establishes and manages cryptographic keys for required cryptography employed within the information system in accordance with [Assignment: organization-defined requirements for key generation, distribution, storage, access, and destruction].","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"sc-13": {"name": "Use of Cryptography (SC-13)","checks": {"s3_bucket_default_encryption": "PASS","sns_topics_kms_encryption_at_rest_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-13","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system implements FIPS-validated or NSA-approved cryptography in accordance with applicable federal laws, Executive Orders, directives, policies, regulations, and standards.","checks_status": {"fail": 1,"pass": 1,"total": 3,"manual": 0}},"sc-23": {"name": "Session Authenticity (SC-23)","checks": {"elb_ssl_listeners": "FAIL","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-23","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system protects the authenticity of communications sessions.","checks_status": {"fail": 3,"pass": 0,"total": 4,"manual": 0}},"sc-28": {"name": "Protection of Information at Rest (SC-28)","checks": {"ec2_ebs_volume_encryption": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-28","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The information system protects the confidentiality AND integrity of [Assignment: organization-defined information at rest].","checks_status": {"fail": 5,"pass": 2,"total": 13,"manual": 0}},"si-12": {"name": "Information Handling and Retention (SI-12)","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si-12","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": null}],"description": "The organization handles and retains information within the information system and information output from the system in accordance with applicable federal laws, Executive Orders, directives, policies, regulations, standards, and operational requirements.","checks_status": {"fail": 3,"pass": 1,"total": 8,"manual": 0}},"ac-2-1": {"name": "AC-2(1) Automated System Account Management","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac-2-1","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "The organization employs automated mechanisms to support the management of information system accounts.","checks_status": {"fail": 0,"pass": 2,"total": 15,"manual": 0}},"ac-2-3": {"name": "AC-2-3","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac-2-3","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "The information system automatically disables inactive accounts after 90 days for user accounts.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"ac-2-4": {"name": "AC-2(4) Automated Audit Actions","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac-2-4","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "The information system automatically audits account creation, modification, enabling, disabling, and removal actions, and notifies [Assignment: organization-defined personnel or roles].","checks_status": {"fail": 3,"pass": 3,"total": 14,"manual": 0}},"ac-2-f": {"name": "AC-2(f)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac-2-f","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "The organization: f. Creates, enables, modifies, disables, and removes information system accounts in accordance with [Assignment: organization-defined procedures or conditions].","checks_status": {"fail": 0,"pass": 0,"total": 13,"manual": 0}},"ac-2-g": {"name": "AC-2(g)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","opensearch_service_domains_cloudwatch_logging_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac-2-g","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "The organization: g. Monitors the use of information system accounts.","checks_status": {"fail": 3,"pass": 4,"total": 12,"manual": 0}},"ac-2-j": {"name": "AC-2(j)","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac-2-j","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "The organization: j. Reviews accounts for compliance with account management requirements [Assignment: organization-defined frequency].","checks_status": {"fail": 0,"pass": 0,"total": 12,"manual": 0}},"ac-5-c": {"name": "AC-5(c)","checks": {"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null,"iam_password_policy_minimum_length_14": null,"iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac-5-c","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Separation Of Duties (AC-5)"}],"description": "The organization: c. Defines information system access authorizations to support separation of duties.","checks_status": {"fail": 0,"pass": 0,"total": 8,"manual": 0}},"au-7-1": {"name": "AU-7(1) Automatic Processing","checks": {"cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au-7-1","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Reduction And Report Generation (AU-7)"}],"description": "The information system provides the capability to process audit records for events of interest based on [Assignment: organization-defined audit fields within audit records].","checks_status": {"fail": 1,"pass": 0,"total": 5,"manual": 0}},"au-9-2": {"name": "AU-9(2) Audit Backup On Separate Physical Systems / Components","checks": {"s3_bucket_object_versioning": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au-9-2","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Protection of Audit Information (AU-9)"}],"description": "The information system backs up audit records at least weekly onto a physically different system or system component than the system or component being audited.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"cm-7-a": {"name": "CM-7(a)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm-7-a","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Least Functionality (CM-7)"}],"description": "The organization: a. Configures the information system to provide only essential capabilities.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cm-8-1": {"name": "CM-8(1)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm-8-1","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Information System Component Inventory (CM-8)"}],"description": "The organization updates the inventory of information system components as an integral part of component installations, removals, and information system updates.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"cp-9-b": {"name": "CP-9(b))","checks": {"efs_have_backup_enabled": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"redshift_cluster_automated_snapshot": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cp-9-b","Section": "Contingency Planning (CP)","Service": "aws","SubGroup": null,"SubSection": "Information System Backup (CP-9)"}],"description": "The organization: b. Conducts backups of system-level information contained in the information system (daily incremental; weekly full).","checks_status": {"fail": 2,"pass": 1,"total": 7,"manual": 0}},"ia-2-1": {"name": "IA-2(1) Network Access To Privileged Accounts","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia-2-1","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Identification and Authentication (Organizational users) (IA-2)"}],"description": "The information system implements multi-factor authentication for network access to privileged accounts.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ia-5-4": {"name": "IA-5(4) Automated Support For Password Strength Determination","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia-5-4","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "Authenticator Management (IA-5)"}],"description": "The organization employs automated tools to determine if password authenticators are sufficiently strong to satisfy [Assignment: organization-defined requirements].","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"ia-5-7": {"name": "IA-5(7) No Embedded Unencrypted Static Authenticators","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia-5-7","Section": "Identification and Authentication (IA)","Service": "codebuild","SubGroup": null,"SubSection": "Authenticator Management (IA-5)"}],"description": "The organization ensures that unencrypted static authenticators are not embedded in applications or access scripts or stored on function keys.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"ir-4-1": {"name": "IR-4(1) Automated Incident Handling Processes","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ir-4-1","Section": "Incident Response (IR)","Service": "aws","SubGroup": null,"SubSection": "Incident Handling (IR-4)"}],"description": "The organization employs automated mechanisms to support the incident handling process.","checks_status": {"fail": 1,"pass": 2,"total": 7,"manual": 0}},"ir-6-1": {"name": "IR-6(1) Automated Reporting","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ir-6-1","Section": "Incident Response (IR)","Service": "aws","SubGroup": null,"SubSection": "Incident Reporting (IR-6)"}],"description": "The organization employs automated mechanisms to assist in the reporting of security incidents.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"ir-7-1": {"name": "IR-7(1) Automation Support For Availability Of Information / Support","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ir-7-1","Section": "Incident Response (IR)","Service": "aws","SubGroup": null,"SubSection": "Incident Response Assistance (IR-7)"}],"description": "The organization employs automated mechanisms to increase the availability of incident response-related information and support.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"sa-3-a": {"name": "SA-3(a)","checks": {"ec2_instance_managed_by_ssm": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sa-3-a","Section": "System and Services Acquisition (SA)","Service": "aws","SubGroup": null,"SubSection": "System Development Life Cycle (SA-3)"}],"description": "The organization: a. Manages the information system using [Assignment: organization-defined system development life cycle] that incorporates information security considerations.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"sc-7-3": {"name": "SC-7(3) Access Points","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-7-3","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Boundary Protection (SC-7)"}],"description": "The organization limits the number of external network connections to the information system.","checks_status": {"fail": 3,"pass": 6,"total": 17,"manual": 0}},"sc-8-1": {"name": "SC-8(1) Cryptographic Or Alternate Physical Protection","checks": {"elb_ssl_listeners": "FAIL","elbv2_insecure_ssl_ciphers": "PASS","s3_bucket_secure_transport_policy": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sc-8-1","Section": "System and Communications Protection (SC)","Service": "aws","SubGroup": null,"SubSection": "Transmission Integrity (SC-8)"}],"description": "The information system implements cryptographic mechanisms to [Selection (one or more): prevent unauthorized disclosure of information; detect changes to information] during transmission unless otherwise protected by [Assignment: organization-defined alternative physical safeguards].","checks_status": {"fail": 3,"pass": 1,"total": 5,"manual": 0}},"si-2-2": {"name": "Automated Flaw Remediation Status (SI-2(2))","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si-2-2","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Flaw Remediation (SI-2)"}],"description": "The organization employs automated mechanisms at least monthly to determine the state of information system components with regard to flaw remediation.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"si-4-1": {"name": "SI-4(1) System-Wide Intrusion Detection System","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "si-4-1","Section": "System and Information Integrity (SI)","Service": "guarduty","SubGroup": null,"SubSection": "Information System Monitoring (SI-4)"}],"description": "The organization connects and configures individual intrusion detection tools into an information system-wide intrusion detection system.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"si-4-2": {"name": "SI-4(2) Automated Tools For Real-Time Analysis","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_imdsv2_enabled": "PASS","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "si-4-2","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Information System Monitoring (SI-4)"}],"description": "The organization employs automated tools to support near real-time analysis of events.","checks_status": {"fail": 0,"pass": 4,"total": 12,"manual": 0}},"si-4-4": {"name": "SI-4(4) Inbound and Outbound Communications Traffic","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_imdsv2_enabled": "PASS","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "si-4-4","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Information System Monitoring (SI-4)"}],"description": "The information system monitors inbound and outbound communications traffic continuously for unusual or unauthorized activities or conditions.","checks_status": {"fail": 0,"pass": 4,"total": 12,"manual": 0}},"si-4-5": {"name": "SI-4(5) System-Generated Alerts","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_imdsv2_enabled": "PASS","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "si-4-5","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Information System Monitoring (SI-4)"}],"description": "The information system alerts organization-defined personnel or roles when the following indications of compromise or potential compromise occur: [Assignment: organization-defined compromise indicators].","checks_status": {"fail": 0,"pass": 4,"total": 12,"manual": 0}},"si-7-1": {"name": "SI-7(1) Integrity Checks","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si-7-1","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Software, Firmware, and Information Integrity (SI-7)"}],"description": "The information system performs an integrity check of security relevant events at least monthly.","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"ac-17-1": {"name": "AC-17(1) Automated Monitoring/Control","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac-17-1","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Remote Access (AC-17)"}],"description": "The information system monitors and controls remote access methods.","checks_status": {"fail": 3,"pass": 8,"total": 19,"manual": 0}},"ac-17-2": {"name": "AC-17(2) Protection Of Confidentiality/Integrity Using Encryption","checks": {"elb_ssl_listeners": "FAIL","acm_certificates_expiration_check": "PASS","s3_bucket_secure_transport_policy": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac-17-2","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Remote Access (AC-17)"}],"description": "The information system implements cryptographic mechanisms to protect the confidentiality and integrity of remote access sessions.","checks_status": {"fail": 2,"pass": 1,"total": 3,"manual": 0}},"ac-21-b": {"name": "AC-21(b)","checks": {"ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ac-21-b","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Information Sharing (AC-21)"}],"description": "The organization: b. Employs [Assignment: organization-defined automated mechanisms or manual processes] to assist users in making information sharing/collaboration decisions.","checks_status": {"fail": 3,"pass": 4,"total": 15,"manual": 0}},"ac-6-10": {"name": "AC-6(10) Prohibit Non-Privileged Users From Executing Privileged Functions","checks": {"iam_no_root_access_key": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac-6-10","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Least Privilege (AC-6)"}],"description": "The information system prevents non-privileged users from executing privileged functions to include disabling, circumventing, or altering implemented security safeguards/countermeasures.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"si-4-16": {"name": "SI-4(16) Correlate Monitoring Information","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "si-4-16","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Information System Monitoring (SI-4)"}],"description": "The organization correlates information from monitoring tools employed throughout the information system.","checks_status": {"fail": 0,"pass": 3,"total": 7,"manual": 0}},"au-2-a-d": {"name": "AU-2(a)(d)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au-2-a-d","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Events (AU-2)"}],"description": "The organization: a. Determines that the information system is capable of auditing the following events: Successful and unsuccessful account logon events, account management events, object access, policy change, privilege functions, process tracking, and system events. For Web applications: all administrator activity, authentication checks, authorization checks, data deletions, data access, data changes, and permission changes. d. Determines that the following events are to be audited within the information system: [organization-defined subset of the auditable events defined in AU-2 a to be audited continually for each identified event].","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"au-6-1-3": {"name": "AU-6(1)(3)","checks": {"elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au-6-1-3","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Review, Analysis And Reporting (AU-6)"}],"description": "(1) The organization employs automated mechanisms to integrate audit review, analysis, and reporting processes to support organizational processes for investigation and response to suspicious activities. (3) The organization analyzes and correlates audit records across different repositories to gain organization-wide situational awareness.","checks_status": {"fail": 7,"pass": 4,"total": 19,"manual": 0}},"ca-7-a-b": {"name": "CA-7(a)(b)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ec2_instance_imdsv2_enabled": "PASS","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"rds_instance_enhanced_monitoring_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ca-7-a-b","Section": "Security Assessment And Authorization (CA)","Service": "aws","SubGroup": null,"SubSection": "Continuous Monitoring (CA-7)"}],"description": "The organization develops a continuous monitoring strategy and implements a continuous monitoring program that includes: a. Establishment of [Assignment: organization-defined metrics] to be monitored; b. Establishment of [Assignment: organization-defined frequencies] for monitoring and [Assignment: organization-defined frequencies] for assessments supporting such monitoring.","checks_status": {"fail": 1,"pass": 4,"total": 13,"manual": 0}},"cm-8-3-a": {"name": "CM-8(3)(a)","checks": {"guardduty_is_enabled": "PASS","ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cm-8-3-a","Section": "Configuration Management (CM)","Service": "aws","SubGroup": null,"SubSection": "Information System Component Inventory (CM-8)"}],"description": "The organization: a. Employs automated mechanisms continuously, using automated mechanisms with a maximum five-minute delay in detection, to detect the presence of unauthorized hardware, software, and firmware components within the information system","checks_status": {"fail": 2,"pass": 1,"total": 4,"manual": 0}},"ia-2-1-2": {"name": "IA-2(1)(2)","checks": {"iam_root_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia-2-1-2","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "IA-2(1) Network Access To Privileged Accounts"}],"description": "(1) The information system implements multifactor authentication for network access to privileged accounts. (2) The information system implements multifactor authentication for network access to non- privileged accounts.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"ac-2-12-a": {"name": "AC-2(12)(a)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ac-2-12-a","Section": "Access Control (AC)","Service": "aws","SubGroup": null,"SubSection": "Account Management (AC-2)"}],"description": "The organization: a. Monitors information system accounts for [Assignment: organization-defined atypical use].","checks_status": {"fail": 0,"pass": 2,"total": 2,"manual": 0}},"au-12-a-c": {"name": "AU-12(a)(c)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "au-12-a-c","Section": "Audit and Accountability (AU)","Service": "aws","SubGroup": null,"SubSection": "Audit Generation (AU-12)"}],"description": "The information system: a. Provides audit record generation capability for the auditable events defined in AU-2 a. at all information system and network components where audit capability is deployed/available c. Generates audit records for the events defined in AU-2 d. with the content defined in AU-3.","checks_status": {"fail": 6,"pass": 2,"total": 12,"manual": 0}},"si-4-a-b-c": {"name": "SI-4(a)(b)(c)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_waf_acl_attached": "FAIL","ec2_instance_imdsv2_enabled": "PASS","apigateway_restapi_waf_acl_attached": "FAIL","guardduty_no_high_severity_findings": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "si-4-a-b-c","Section": "System and Information Integrity (SI)","Service": "aws","SubGroup": null,"SubSection": "Information System Monitoring (SI-4)"}],"description": "The organization: a. Monitors the information system to detect: 1. Attacks and indicators of potential attacks in accordance with [Assignment: organization- defined monitoring objectives]; and 2. Unauthorized local, network, and remote connections; b. Identifies unauthorized use of the information system through [Assignment: organization- defined techniques and methods]; c. Deploys monitoring devices: i. strategically within the information system to collect organization-determined essential information; and (ii) at ad hoc locations within the system to track specific types of transactions of interest to the organization.","checks_status": {"fail": 4,"pass": 3,"total": 11,"manual": 0}},"ia-5-1-a-d-e": {"name": "IA-5(1)(a)(d)(e)","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "ia-5-1-a-d-e","Section": "Identification and Authentication (IA)","Service": "iam","SubGroup": null,"SubSection": "IA-5(1) Password-Based Authentication"}],"description": "The information system, for password-based authentication: a. Enforces minimum password complexity of [Assignment: organization-defined requirements for case sensitivity, number of characters, mix of upper-case letters, lower-case letters, numbers, and special characters, including minimum requirements for each type]; d. Enforces password minimum and maximum lifetime restrictions of [Assignment: organization- defined numbers for lifetime minimum, lifetime maximum]; e. Prohibits password reuse for 24 generations","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}}},"requirements_passed": 20,"requirements_failed": 43,"requirements_manual": 1,"total_requirements": 64,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "d7024b7f-64c1-4d70-8f9d-7844c9194c42","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "gdpr_aws","framework": "GDPR","version": "","description": "The General Data Protection Regulation (GDPR) is a new European privacy law that became enforceable on May 25, 2018. The GDPR replaces the EU Data Protection Directive, also known as Directive 95/46/EC. It's intended to harmonize data protection laws throughout the European Union (EU). It does this by applying a single data protection law that's binding throughout each EU member state.","region": "eu-west-1","requirements": {"article_25": {"name": "Article 25 Data protection by design and by default","checks": {"iam_root_mfa_enabled": null,"vpc_flow_logs_enabled": "FAIL","iam_no_root_access_key": null,"iam_support_role_created": null,"kms_cmk_rotation_enabled": null,"iam_user_accesskey_unused": null,"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_reuse_24": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","config_recorder_all_regions_enabled": null,"iam_user_mfa_enabled_console_access": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","iam_password_policy_minimum_length_14": null,"cloudtrail_log_file_validation_enabled": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"cloudwatch_log_metric_filter_policy_changes": null,"iam_inline_policy_no_administrative_privileges": null,"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL","cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS","cloudwatch_log_metric_filter_authentication_failures": null,"iam_aws_attached_policy_no_administrative_privileges": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"iam_customer_attached_policy_no_administrative_privileges": null,"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "article_25","Section": "Article 25 Data protection by design and by default","Service": "aws","SubGroup": null,"SubSection": null}],"description": "To obtain the latest version of the official guide, please visit https://gdpr-info.eu/art-25-gdpr/. Taking into account the state of the art, the cost of implementation and the nature, scope, context and purposes of processing as well as the risks of varying likelihood and severity for rights and freedoms of natural persons posed by the processing, the controller shall, both at the time of the determination of the means for processing and at the time of the processing itself, implement appropriate technical and organisational measures, such as pseudonymisation, which are designed to implement data-protection principles, such as data minimisation, in an effective manner and to integrate the necessary safeguards into the processing in order to meet the requirements of this Regulation and protect the rights of data subjects. The controller shall implement appropriate technical and organisational measures for ensuring that, by default, only personal data which are necessary for each specific purpose of the processing are processed. That obligation applies to the amount of personal data collected, the extent of their processing, the period of their storage and their accessibility. In particular, such measures shall ensure that by default personal data are not made accessible without the individual's intervention to an indefinite number of natural persons. An approved certification mechanism pursuant to Article 42 may be used as an element to demonstrate compliance with the requirements set out in paragraphs 1 and 2 of this Article.","checks_status": {"fail": 5,"pass": 2,"total": 42,"manual": 0}},"article_30": {"name": "Article 30 Records of processing activities","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","kms_cmk_rotation_enabled": null,"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","config_recorder_all_regions_enabled": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "article_30","Section": "Article 30 Records of processing activities","Service": "aws","SubGroup": null,"SubSection": null}],"description": " To obtain the latest version of the official guide, please visit https://www.privacy-regulation.eu/en/article-30-records-of-processing-activities-GDPR.htm. Each controller and, where applicable, the controller's representative, shall maintain a record of processing activities under its responsibility. That record shall contain all of the following information like the name and contact details of the controller and where applicable, the joint controller, the controller's representative and the data protection officer, the purposes of the processing etc. Each processor and where applicable, the processor's representative shall maintain a record of all categories of processing activities carried out on behalf of a controller, containing the name and contact details of the processor or processors and of each controller on behalf of which the processor is acting, and, where applicable of the controller's or the processor's representative, and the data protection officer, where applicable, transfers of personal data to a third country or an international organisation, including the identification of that third country or international organisation and, in the case of transfers referred to in the second subparagraph of Article 49(1), the documentation of suitable safeguards. The records referred to in paragraphs 1 and 2 shall be in writing, including in electronic form. The controller or the processor and, where applicable, the controller's or the processor's representative, shall make the record available to the supervisory authority on request. The obligations referred to in paragraphs 1 and 2 shall not apply to an enterprise or an organisation employing fewer than 250 persons unless the processing it carries out is likely to result in a risk to the rights and freedoms of data subjects, the processing is not occasional, or the processing includes special categories of data as referred to in Article 9(1) or personal data relating to criminal convictions and offences referred to in Article 10.","checks_status": {"fail": 5,"pass": 1,"total": 12,"manual": 0}},"article_32": {"name": "Article 32 Security of processing","checks": {"elb_ssl_listeners": "FAIL","ec2_ebs_volume_encryption": "PASS","rds_instance_backup_enabled": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"acm_certificates_expiration_check": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","redshift_cluster_automated_snapshot": null,"cloudfront_distributions_https_enabled": null,"cloudtrail_log_file_validation_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "article_32","Section": "Article 32 Security of processing","Service": "aws","SubGroup": null,"SubSection": null}],"description": " To obtain the latest version of the official guide, please visit https://gdpr-info.eu/art-32-gdpr/. Taking into account the state of the art, the costs of implementation and the nature, scope, context and purposes of processing as well as the risk of varying likelihood and severity for the rights and freedoms of natural persons, the controller and the processor shall implement appropriate technical and organisational measures to ensure a level of security appropriate to the risk, including inter alia as appropriate. In assessing the appropriate level of security account shall be taken in particular of the risks that are presented by processing, in particular from accidental or unlawful destruction, loss, alteration, unauthorised disclosure of, or access to personal data transmitted, stored or otherwise processed. Adherence to an approved code of conduct as referred to in Article 40 or an approved certification mechanism as referred to in Article 42 may be used as an element by which to demonstrate compliance with the requirements set out in paragraph 1 of this Article. The controller and processor shall take steps to ensure that any natural person acting under the authority of the controller or the processor who has access to personal data does not process them except on instructions from the controller, unless he or she is required to do so by Union or Member State law.","checks_status": {"fail": 9,"pass": 4,"total": 25,"manual": 0}}},"requirements_passed": 0,"requirements_failed": 3,"requirements_manual": 0,"total_requirements": 3,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "df010cc1-e468-42d1-8b7c-37d614adf364","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "aws_well_architected_framework_security_pillar_aws","framework": "AWS-Well-Architected-Framework-Security-Pillar","version": "","description": "Best Practices for AWS Well-Architected Framework Security Pillar. The focus of this framework is the security pillar of the AWS Well-Architected Framework. It provides guidance to help you apply best practices, current recommendations in the design, delivery, and maintenance of secure AWS workloads.","region": "eu-west-1","requirements": {"SEC01-BP01": {"name": "SEC01-BP01","checks": {"organizations_account_part_of_organizations": null},"status": "PASS","attributes": [{"Name": "SEC01-BP01 Separate workloads using accounts","Section": "Security foundations","SubSection": "AWS account management and separation","Description": "Establish common guardrails and isolation between environments (such as production, development, and test) and workloads through a multi-account strategy. Account-level separation is strongly recommended, as it provides a strong isolation boundary for security, billing, and access.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_securely_operate_multi_accounts.html#implementation-guidance.","WellArchitectedPracticeId": "sec_securely_operate_multi_accounts","WellArchitectedQuestionId": "securely-operate"}],"description": "Establish common guardrails and isolation between environments (such as production, development, and test) and workloads through a multi-account strategy. Account-level separation is strongly recommended, as it provides a strong isolation boundary for security, billing, and access.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"SEC01-BP02": {"name": "SEC01-BP02","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_root_hardware_mfa_enabled": null},"status": "PASS","attributes": [{"Name": "SEC01-BP02 Secure account root user and properties","Section": "Security foundations","SubSection": "AWS account management and separation","Description": "The root user is the most privileged user in an AWS account, with full administrative access to all resources within the account, and in some cases cannot be constrained by security policies. Deactivating programmatic access to the root user, establishing appropriate controls for the root user, and avoiding routine use of the root user helps reduce the risk of inadvertent exposure of the root credentials and subsequent compromise of the cloud environment.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_securely_operate_aws_account.html#implementation-guidance.","WellArchitectedPracticeId": "sec_securely_operate_aws_account","WellArchitectedQuestionId": "securely-operate"}],"description": "The root user is the most privileged user in an AWS account, with full administrative access to all resources within the account, and in some cases cannot be constrained by security policies. Deactivating programmatic access to the root user, establishing appropriate controls for the root user, and avoiding routine use of the root user helps reduce the risk of inadvertent exposure of the root credentials and subsequent compromise of the cloud environment.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"SEC01-BP03": {"name": "SEC01-BP03","checks": {},"status": "PASS","attributes": [{"Name": "SEC01-BP03 Identify and validate control objectives","Section": "Security foundations","SubSection": "Operating your workloads securely","Description": "Based on your compliance requirements and risks identified from your threat model, derive and validate the control objectives and controls that you need to apply to your workload. Ongoing validation of control objectives and controls help you measure the effectiveness of risk mitigation.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_securely_operate_control_objectives.html#implementation-guidance.","WellArchitectedPracticeId": "sec_securely_operate_control_objectives","WellArchitectedQuestionId": "securely-operate"}],"description": "Based on your compliance requirements and risks identified from your threat model, derive and validate the control objectives and controls that you need to apply to your workload. Ongoing validation of control objectives and controls help you measure the effectiveness of risk mitigation.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC01-BP04": {"name": "SEC01-BP04","checks": {},"status": "PASS","attributes": [{"Name": "SEC01-BP04 Keep up-to-date with security threats","Section": "Security foundations","SubSection": "Operating your workloads securely","Description": "To help you define and implement appropriate controls, recognize attack vectors by staying up to date with the latest security threats. Consume AWS Managed Services to make it easier to receive notification of unexpected or unusual behavior in your AWS accounts. Investigate using AWS Partner tools or third-party threat information feeds as part of your security information flow. The Common Vulnerabilities and Exposures (CVE) Listlist contains publicly disclosed cyber security vulnerabilities that you can use to stay up to date.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_securely_operate_updated_threats.html#implementation-guidance.","WellArchitectedPracticeId": "sec_securely_operate_updated_threats","WellArchitectedQuestionId": "securely-operate"}],"description": "To help you define and implement appropriate controls, recognize attack vectors by staying up to date with the latest security threats. Consume AWS Managed Services to make it easier to receive notification of unexpected or unusual behavior in your AWS accounts. Investigate using AWS Partner tools or third-party threat information feeds as part of your security information flow. The Common Vulnerabilities and Exposures (CVE) Listlist contains publicly disclosed cyber security vulnerabilities that you can use to stay up to date.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC01-BP05": {"name": "SEC01-BP05","checks": {},"status": "PASS","attributes": [{"Name": "SEC01-BP05 Keep up-to-date with security recommendations","Section": "Security foundations","SubSection": "Operating your workloads securely","Description": "Stay up-to-date with both AWS and industry security recommendations to evolve the security posture of your workload. AWS Security Bulletins contain important information about security and privacy notifications.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_securely_operate_updated_recommendations.html#implementation-guidance.","WellArchitectedPracticeId": "sec_securely_operate_updated_recommendations","WellArchitectedQuestionId": "securely-operate"}],"description": "Stay up-to-date with both AWS and industry security recommendations to evolve the security posture of your workload. AWS Security Bulletins contain important information about security and privacy notifications.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC01-BP06": {"name": "SEC01-BP06","checks": {"ec2_instance_managed_by_ssm": "FAIL","ecr_repositories_scan_images_on_push_enabled": "FAIL","ecr_repositories_scan_vulnerabilities_in_latest_image": null},"status": "FAIL","attributes": [{"Name": "SEC01-BP06 Automate testing and validation of security controls in pipelines","Section": "Security foundations","SubSection": "Operating your workloads securely","Description": "Establish secure baselines and templates for security mechanisms that are tested and validated as part of your build, pipelines, and processes. Use tools and automation to test and validate all security controls continuously. For example, scan items such as machine images and infrastructure-as-code templates for security vulnerabilities, irregularities, and drift from an established baseline at each stage. AWS CloudFormation Guard can help you verify that CloudFormation templates are safe, save you time, and reduce the risk of configuration error.Reducing the number of security misconfigurations introduced into a production environment is criticalโ€”the more quality control and reduction of defects you can perform in the build process, the better. Design continuous integration and continuous deployment (CI/CD) pipelines to test for security issues whenever possible. CI/CD pipelines offer the opportunity to enhance security at each stage of build and delivery. CI/CD security tooling must also be kept updated to mitigate evolving threats.Track changes to your workload configuration to help with compliance auditing, change management, and investigations that may apply to you. You can use AWS Config to record and evaluate your AWS and third-party resources. It allows you to continuously audit and assess the overall compliance with rules and conformance packs, which are collections of rules with remediation actions.Change tracking should include planned changes, which are part of your organization's change control process (sometimes referred to as MACDโ€”Move, Add, Change, Delete), unplanned changes, and unexpected changes, such as incidents. Changes might occur on the infrastructure, but they might also be related to other categories, such as changes in code repositories, machine images and application inventory changes, process and policy changes, or documentation changes.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_securely_operate_test_validate_pipeline.html#implementation-guidance.","WellArchitectedPracticeId": "sec_securely_operate_test_validate_pipeline","WellArchitectedQuestionId": "securely-operate"}],"description": "Establish secure baselines and templates for security mechanisms that are tested and validated as part of your build, pipelines, and processes. Use tools and automation to test and validate all security controls continuously. For example, scan items such as machine images and infrastructure-as-code templates for security vulnerabilities, irregularities, and drift from an established baseline at each stage. AWS CloudFormation Guard can help you verify that CloudFormation templates are safe, save you time, and reduce the risk of configuration error.Reducing the number of security misconfigurations introduced into a production environment is criticalโ€”the more quality control and reduction of defects you can perform in the build process, the better. Design continuous integration and continuous deployment (CI/CD) pipelines to test for security issues whenever possible. CI/CD pipelines offer the opportunity to enhance security at each stage of build and delivery. CI/CD security tooling must also be kept updated to mitigate evolving threats.Track changes to your workload configuration to help with compliance auditing, change management, and investigations that may apply to you. You can use AWS Config to record and evaluate your AWS and third-party resources. It allows you to continuously audit and assess the overall compliance with rules and conformance packs, which are collections of rules with remediation actions.Change tracking should include planned changes, which are part of your organization's change control process (sometimes referred to as MACDโ€”Move, Add, Change, Delete), unplanned changes, and unexpected changes, such as incidents. Changes might occur on the infrastructure, but they might also be related to other categories, such as changes in code repositories, machine images and application inventory changes, process and policy changes, or documentation changes.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"SEC01-BP07": {"name": "SEC01-BP07","checks": {"wellarchitected_workload_no_high_or_medium_risks": "FAIL"},"status": "FAIL","attributes": [{"Name": "SEC01-BP07 Identify threats and prioritize mitigations using a threat model","Section": "Security foundations","SubSection": "Operating your workloads securely","Description": "Perform threat modeling to identify and maintain an up-to-date register of potential threats and associated mitigations for your workload. Prioritize your threats and adapt your security control mitigations to prevent, detect, and respond. Revisit and maintain this in the context of your workload, and the evolving security landscape.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_securely_operate_threat_model.html#implementation-guidance.","WellArchitectedPracticeId": "sec_securely_operate_threat_model","WellArchitectedQuestionId": "securely-operate"}],"description": "Perform threat modeling to identify and maintain an up-to-date register of potential threats and associated mitigations for your workload. Prioritize your threats and adapt your security control mitigations to prevent, detect, and respond. Revisit and maintain this in the context of your workload, and the evolving security landscape.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"SEC01-BP08": {"name": "SEC01-BP08","checks": {},"status": "PASS","attributes": [{"Name": "SEC01-BP08 Evaluate and implement new security services and features regularly","Section": "Security foundations","SubSection": "Operating your workloads securely","Description": "Evaluate and implement security services and features from AWS and AWS Partners that allow you to evolve the security posture of your workload. The AWS Security Blog highlights new AWS services and features, implementation guides, and general security guidance. What's New with AWS? is a great way to stay up to date with all new AWS features, services, and announcements.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_securely_operate_implement_services_features.html#implementation-guidance.","WellArchitectedPracticeId": "sec_securely_operate_implement_services_features","WellArchitectedQuestionId": "securely-operate"}],"description": "Evaluate and implement security services and features from AWS and AWS Partners that allow you to evolve the security posture of your workload. The AWS Security Blog highlights new AWS services and features, implementation guides, and general security guidance. What's New with AWS? is a great way to stay up to date with all new AWS features, services, and announcements.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC02-BP01": {"name": "SEC02-BP01","checks": {"iam_avoid_root_usage": null,"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_reuse_24": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_user_hardware_mfa_enabled": null,"iam_user_two_active_access_key": null,"iam_user_mfa_enabled_console_access": null,"iam_user_no_setup_initial_access_key": null,"iam_password_policy_minimum_length_14": null,"directoryservice_supported_mfa_radius_enabled": null,"directoryservice_radius_server_security_protocol": null,"sagemaker_notebook_instance_root_access_disabled": null,"opensearch_service_domains_use_cognito_authentication_for_kibana": null},"status": "PASS","attributes": [{"Name": "SEC02-BP01 Use strong sign-in mechanisms","Section": "Identity and access management","SubSection": "Identity management","Description": "Sign-ins (authentication using sign-in credentials) can present risks when not using mechanisms like multi-factor authentication (MFA), especially in situations where sign-in credentials have been inadvertently disclosed or are easily guessed. Use strong sign-in mechanisms to reduce these risks by requiring MFA and strong password policies.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_identities_enforce_mechanisms.html#implementation-guidance.","WellArchitectedPracticeId": "sec_identities_enforce_mechanisms","WellArchitectedQuestionId": "identities"}],"description": "Sign-ins (authentication using sign-in credentials) can present risks when not using mechanisms like multi-factor authentication (MFA), especially in situations where sign-in credentials have been inadvertently disclosed or are easily guessed. Use strong sign-in mechanisms to reduce these risks by requiring MFA and strong password policies.","checks_status": {"fail": 0,"pass": 0,"total": 15,"manual": 0}},"SEC02-BP02": {"name": "SEC02-BP02","checks": {"iam_rotate_access_key_90_days": null},"status": "PASS","attributes": [{"Name": "SEC02-BP02 Use temporary credentials","Section": "Identity and access management","SubSection": "Identity management","Description": "When doing any type of authentication, itโ€™s best to use temporary credentials instead of long-term credentials to reduce or eliminate risks, such as credentials being inadvertently disclosed, shared, or stolen.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_identities_unique.html#implementation-guidance.","WellArchitectedPracticeId": "sec_identities_unique","WellArchitectedQuestionId": "identities"}],"description": "When doing any type of authentication, itโ€™s best to use temporary credentials instead of long-term credentials to reduce or eliminate risks, such as credentials being inadvertently disclosed, shared, or stolen.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"SEC02-BP03": {"name": "SEC02-BP03","checks": {"ssm_document_secrets": "PASS","ec2_instance_secrets_user_data": "PASS","ec2_launch_template_no_secrets": "PASS","awslambda_function_no_secrets_in_code": "PASS","cloudformation_stack_outputs_find_secrets": "PASS","awslambda_function_no_secrets_in_variables": "PASS","ecs_task_definitions_no_environment_secrets": "PASS","autoscaling_find_secrets_ec2_launch_configuration": "PASS"},"status": "PASS","attributes": [{"Name": "SEC02-BP03 Store and use secrets securely","Section": "Identity and access management","SubSection": "Identity management","Description": "A workload requires an automated capability to prove its identity to databases, resources, and third-party services. This is accomplished using secret access credentials, such as API access keys, passwords, and OAuth tokens. Using a purpose-built service to store, manage, and rotate these credentials helps reduce the likelihood that those credentials become compromised.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_identities_secrets.html#implementation-guidance.","WellArchitectedPracticeId": "sec_identities_secrets","WellArchitectedQuestionId": "identities"}],"description": "A workload requires an automated capability to prove its identity to databases, resources, and third-party services. This is accomplished using secret access credentials, such as API access keys, passwords, and OAuth tokens. Using a purpose-built service to store, manage, and rotate these credentials helps reduce the likelihood that those credentials become compromised.","checks_status": {"fail": 0,"pass": 8,"total": 8,"manual": 0}},"SEC02-BP04": {"name": "SEC02-BP04","checks": {"iam_role_cross_service_confused_deputy_prevention": null},"status": "PASS","attributes": [{"Name": "SEC02-BP04 Rely on a centralized identity provider","Section": "Identity and access management","SubSection": "Identity management","Description": "For workforce identities, rely on an identity provider that enables you to manage identities in a centralized place. This makes it easier to manage access across multiple applications and services, because you are creating, managing, and revoking access from a single location. For example, if someone leaves your organization, you can revoke access for all applications and services (including AWS) from one location. This reduces the need for multiple credentials and provides an opportunity to integrate with existing human resources (HR) processes. For federation with individual AWS accounts, you can use centralized identities for AWS with a SAML 2.0-based provider with AWS Identity and Access Management. You can use any providerโ€” whether hosted by you in AWS, external to AWS, or supplied by the AWS Partnerโ€”that is compatible with the SAML 2.0 protocol. You can use federation between your AWS account and your chosen provider to grant a user or application access to call AWS API operations by using a SAML assertion to get temporary security credentials. Web-based single sign-on is also supported, allowing users to sign in to the AWS Management Console from your sign in website. For federation to multiple accounts in your AWS Organizations, you can configure your identity source in AWS IAM Identity Center (successor to AWS Single Sign-On) (IAM Identity Center), and specify where your users and groups are stored. Once configured, your identity provider is your source of truth, and information can be synchronized using the System for Cross-domain Identity Management (SCIM) v2.0 protocol. You can then look up users or groups and grant them IAM Identity Center access to AWS accounts, cloud applications, or both. IAM Identity Center integrates with AWS Organizations, which enables you to configure your identity provider once and then grant access to existing and new accounts managed in your organization. IAM Identity Center provides you with a default store, which you can use to manage your users and groups. If you choose to use the IAM Identity Center store, create your users and groups and assign their level of access to your AWS accounts and applications, keeping in mind the best practice of least privilege. Alternatively, you can choose to Connect to Your External Identity Provider using SAML 2.0, or Connect to Your Microsoft AD Directory using AWS Directory Service. Once configured, you can sign into the AWS Management Console, or the AWS mobile app, by authenticating through your central identity provider. For managing end-users or consumers of your workloads, such as a mobile app, you can use Amazon Cognito. It provides authentication, authorization, and user management for your web and mobile apps. Your users can sign in directly with sign-in credentials, or through a third party, such as Amazon, Apple, Facebook, or Google.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_identities_identity_provider.html#implementation-guidance.","WellArchitectedPracticeId": "sec_identities_identity_provider","WellArchitectedQuestionId": "identities"}],"description": "For workforce identities, rely on an identity provider that enables you to manage identities in a centralized place. This makes it easier to manage access across multiple applications and services, because you are creating, managing, and revoking access from a single location. For example, if someone leaves your organization, you can revoke access for all applications and services (including AWS) from one location. This reduces the need for multiple credentials and provides an opportunity to integrate with existing human resources (HR) processes. For federation with individual AWS accounts, you can use centralized identities for AWS with a SAML 2.0-based provider with AWS Identity and Access Management. You can use any providerโ€” whether hosted by you in AWS, external to AWS, or supplied by the AWS Partnerโ€”that is compatible with the SAML 2.0 protocol. You can use federation between your AWS account and your chosen provider to grant a user or application access to call AWS API operations by using a SAML assertion to get temporary security credentials. Web-based single sign-on is also supported, allowing users to sign in to the AWS Management Console from your sign in website. For federation to multiple accounts in your AWS Organizations, you can configure your identity source in AWS IAM Identity Center (successor to AWS Single Sign-On) (IAM Identity Center), and specify where your users and groups are stored. Once configured, your identity provider is your source of truth, and information can be synchronized using the System for Cross-domain Identity Management (SCIM) v2.0 protocol. You can then look up users or groups and grant them IAM Identity Center access to AWS accounts, cloud applications, or both. IAM Identity Center integrates with AWS Organizations, which enables you to configure your identity provider once and then grant access to existing and new accounts managed in your organization. IAM Identity Center provides you with a default store, which you can use to manage your users and groups. If you choose to use the IAM Identity Center store, create your users and groups and assign their level of access to your AWS accounts and applications, keeping in mind the best practice of least privilege. Alternatively, you can choose to Connect to Your External Identity Provider using SAML 2.0, or Connect to Your Microsoft AD Directory using AWS Directory Service. Once configured, you can sign into the AWS Management Console, or the AWS mobile app, by authenticating through your central identity provider. For managing end-users or consumers of your workloads, such as a mobile app, you can use Amazon Cognito. It provides authentication, authorization, and user management for your web and mobile apps. Your users can sign in directly with sign-in credentials, or through a third party, such as Amazon, Apple, Facebook, or Google.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"SEC02-BP05": {"name": "SEC02-BP05","checks": {"kms_cmk_rotation_enabled": null,"iam_rotate_access_key_90_days": null,"secretsmanager_automatic_rotation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Name": "SEC02-BP05 Audit and rotate credentials periodically","Section": "Identity and access management","SubSection": "Identity management","Description": "When you cannot rely on temporary credentials and require long-term credentials, audit credentials to ensure that the defined controls for example, multi-factor authentication (MFA), are enforced, rotated regularly, and have the appropriate access level. Periodic validation, preferably through an automated tool, is necessary to verify that the correct controls are enforced. For human identities, you should require users to change their passwords periodically and retire access keys in favor of temporary credentials. As you are moving from users to centralized identities, you can generate a credential report to audit your users. We also recommend that you enforce MFA settings in your identity provider. You can set up AWS Config Rules to monitor these settings. For machine identities, you should rely on temporary credentials using IAM roles. For situations where this is not possible, frequent auditing and rotating access keys is necessary.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_identities_audit.html#implementation-guidance.","WellArchitectedPracticeId": "sec_identities_audit","WellArchitectedQuestionId": "identities"}],"description": "When you cannot rely on temporary credentials and require long-term credentials, audit credentials to ensure that the defined controls for example, multi-factor authentication (MFA), are enforced, rotated regularly, and have the appropriate access level. Periodic validation, preferably through an automated tool, is necessary to verify that the correct controls are enforced. For human identities, you should require users to change their passwords periodically and retire access keys in favor of temporary credentials. As you are moving from users to centralized identities, you can generate a credential report to audit your users. We also recommend that you enforce MFA settings in your identity provider. You can set up AWS Config Rules to monitor these settings. For machine identities, you should rely on temporary credentials using IAM roles. For situations where this is not possible, frequent auditing and rotating access keys is necessary.","checks_status": {"fail": 1,"pass": 0,"total": 3,"manual": 0}},"SEC02-BP06": {"name": "SEC02-BP06","checks": {"iam_policy_allows_privilege_escalation": null,"iam_policy_attached_only_to_group_or_roles": null},"status": "PASS","attributes": [{"Name": "SEC02-BP06 Leverage user groups and attributes","Section": "Identity and access management","SubSection": "Identity management","Description": "As the number of users you manage grows, you will need to determine ways to organize them so that you can manage them at scale. Place users with common security requirements in groups defined by your identity provider, and put mechanisms in place to ensure that user attributes that may be used for access control (for example, department or location) are correct and updated. Use these groups and attributes to control access, rather than individual users. This allows you to manage access centrally by changing a user's group membership or attributes once with a permission set, rather than updating many individual policies when a user's access needs change. You can use AWS IAM Identity Center (successor to AWS Single Sign-On) (IAM Identity Center) to manage user groups and attributes. IAM Identity Center supports most commonly used attributes whether they are entered manually during user creation or automatically provisioned using a synchronization engine, such as defined in the System for Cross-Domain Identity Management (SCIM) specification.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_identities_groups_attributes.html#implementation-guidance.","WellArchitectedPracticeId": "sec_identities_groups_attributes","WellArchitectedQuestionId": "identities"}],"description": "As the number of users you manage grows, you will need to determine ways to organize them so that you can manage them at scale. Place users with common security requirements in groups defined by your identity provider, and put mechanisms in place to ensure that user attributes that may be used for access control (for example, department or location) are correct and updated. Use these groups and attributes to control access, rather than individual users. This allows you to manage access centrally by changing a user's group membership or attributes once with a permission set, rather than updating many individual policies when a user's access needs change. You can use AWS IAM Identity Center (successor to AWS Single Sign-On) (IAM Identity Center) to manage user groups and attributes. IAM Identity Center supports most commonly used attributes whether they are entered manually during user creation or automatically provisioned using a synchronization engine, such as defined in the System for Cross-Domain Identity Management (SCIM) specification.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"SEC03-BP01": {"name": "SEC03-BP01","checks": {"ec2_instance_imdsv2_enabled": "PASS","ec2_instance_profile_attached": "PASS","cloudwatch_cross_account_sharing_disabled": null},"status": "PASS","attributes": [{"Name": "SEC03-BP01 Define access requirements","Section": "Identity and access management","SubSection": "Permissions management","Description": "Each component or resource of your workload needs to be accessed by administrators, end users, or other components. Have a clear definition of who or what should have access to each component, choose the appropriate identity type and method of authentication and authorization.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_permissions_define.html#implementation-guidance.","WellArchitectedPracticeId": "sec_permissions_define","WellArchitectedQuestionId": "permissions"}],"description": "Each component or resource of your workload needs to be accessed by administrators, end users, or other components. Have a clear definition of who or what should have access to each component, choose the appropriate identity type and method of authentication and authorization.","checks_status": {"fail": 0,"pass": 2,"total": 3,"manual": 0}},"SEC03-BP02": {"name": "SEC03-BP02","checks": {"ec2_instance_profile_attached": "PASS","iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"opensearch_service_domains_internal_user_database_enabled": null,"iam_customer_unattached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Name": "SEC03-BP02 Grant least privilege access","Section": "Identity and access management","SubSection": "Permissions management","Description": "Grant only the access that identities require by allowing access to specific actions on specific AWS resources under specific conditions. Rely on groups and identity attributes to dynamically set permissions at scale, rather than defining permissions for individual users. For example, you can allow a group of developers access to manage only resources for their project. This way, when a developer is removed from the group, access for the developer is revoked everywhere that group was used for access control, without requiring any changes to the access policies.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_permissions_least_privileges.html#implementation-guidance.","WellArchitectedPracticeId": "sec_permissions_least_privileges","WellArchitectedQuestionId": "permissions"}],"description": "Grant only the access that identities require by allowing access to specific actions on specific AWS resources under specific conditions. Rely on groups and identity attributes to dynamically set permissions at scale, rather than defining permissions for individual users. For example, you can allow a group of developers access to manage only resources for their project. This way, when a developer is removed from the group, access for the developer is revoked everywhere that group was used for access control, without requiring any changes to the access policies.","checks_status": {"fail": 0,"pass": 1,"total": 6,"manual": 0}},"SEC03-BP03": {"name": "SEC03-BP03","checks": {"account_maintain_current_contact_details": null,"account_security_contact_information_is_registered": null,"account_security_questions_are_registered_in_the_aws_account": null},"status": "PASS","attributes": [{"Name": "SEC03-BP03 Establish emergency access process","Section": "Identity and access management","SubSection": "Permissions management","Description": "A process that allows emergency access to your workload in the unlikely event of an automated process or pipeline issue. This will help you rely on least privilege access, but ensure users can obtain the right level of access when they require it. For example, establish a process for administrators to verify and approve their request, such as an emergency AWS cross-account role for access, or a specific process for administrators to follow to validate and approve an emergency request.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_permissions_emergency_process.html#implementation-guidance.","WellArchitectedPracticeId": "sec_permissions_emergency_process","WellArchitectedQuestionId": "permissions"}],"description": "A process that allows emergency access to your workload in the unlikely event of an automated process or pipeline issue. This will help you rely on least privilege access, but ensure users can obtain the right level of access when they require it. For example, establish a process for administrators to verify and approve their request, such as an emergency AWS cross-account role for access, or a specific process for administrators to follow to validate and approve an emergency request.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"SEC03-BP04": {"name": "SEC03-BP04","checks": {"iam_customer_attached_policy_no_administrative_privileges": null,"iam_customer_unattached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Name": "SEC03-BP04 Reduce permissions continuously","Section": "Identity and access management","SubSection": "Permissions management","Description": "As your teams determine what access is required, remove unneeded permissions and establish review processes to achieve least privilege permissions. Continually monitor and remove unused identities and permissions for both human and machine access.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_permissions_continuous_reduction.html#implementation-guidance.","WellArchitectedPracticeId": "sec_permissions_continuous_reduction","WellArchitectedQuestionId": "permissions"}],"description": "As your teams determine what access is required, remove unneeded permissions and establish review processes to achieve least privilege permissions. Continually monitor and remove unused identities and permissions for both human and machine access.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"SEC03-BP05": {"name": "SEC03-BP05","checks": {"organizations_account_part_of_organizations": null},"status": "PASS","attributes": [{"Name": "SEC03-BP05 Define permission guardrails for your organization","Section": "Identity and access management","SubSection": "Permissions management","Description": "Establish common controls that restrict access to all identities in your organization. For example, you can restrict access to specific AWS Regions, or prevent your operators from deleting common resources, such as an IAM role used for your central security team.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_permissions_define_guardrails.html#implementation-guidance.","WellArchitectedPracticeId": "sec_permissions_define_guardrails","WellArchitectedQuestionId": "permissions"}],"description": "Establish common controls that restrict access to all identities in your organization. For example, you can restrict access to specific AWS Regions, or prevent your operators from deleting common resources, such as an IAM role used for your central security team.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"SEC03-BP06": {"name": "SEC03-BP06","checks": {"ec2_elastic_ip_unassigned": "FAIL","elbv2_listeners_underneath": "PASS","codebuild_project_older_90_days": "FAIL","appstream_fleet_maximum_session_duration": null,"ecr_repositories_lifecycle_policy_enabled": "FAIL","appstream_fleet_session_disconnect_timeout": null,"appstream_fleet_session_idle_disconnect_timeout": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL","iam_password_policy_expires_passwords_within_90_days_or_less": null},"status": "FAIL","attributes": [{"Name": "SEC03-BP06 Manage access based on lifecycle","Section": "Identity and access management","SubSection": "Permissions management","Description": "Integrate access controls with operator and application lifecycle and your centralized federation provider. For example, remove a user's access when they leave the organization or change roles. As you manage workloads using separate accounts, there will be cases where you need to share resources between those accounts. We recommend that you share resources using AWS Resource Access Manager (AWS RAM). This service enables you to easily and securely share AWS resources within your AWS Organizations and Organizational Units. Using AWS RAM, access to shared resources is automatically granted or revoked as accounts are moved in and out of the Organization or Organization Unit with which they are shared. This helps ensure that resources are only shared with the accounts that you intend.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_permissions_lifecycle.html#implementation-guidance.","WellArchitectedPracticeId": "sec_permissions_lifecycle","WellArchitectedQuestionId": "permissions"}],"description": "Integrate access controls with operator and application lifecycle and your centralized federation provider. For example, remove a user's access when they leave the organization or change roles. As you manage workloads using separate accounts, there will be cases where you need to share resources between those accounts. We recommend that you share resources using AWS Resource Access Manager (AWS RAM). This service enables you to easily and securely share AWS resources within your AWS Organizations and Organizational Units. Using AWS RAM, access to shared resources is automatically granted or revoked as accounts are moved in and out of the Organization or Organization Unit with which they are shared. This helps ensure that resources are only shared with the accounts that you intend.","checks_status": {"fail": 4,"pass": 1,"total": 9,"manual": 0}},"SEC03-BP07": {"name": "SEC03-BP07","checks": {"ec2_ami_public": null,"elb_internet_facing": "FAIL","elbv2_internet_facing": "PASS","ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"apigateway_restapi_public": "FAIL","efs_not_publicly_accessible": "FAIL","rds_snapshots_public_access": "PASS","ssm_documents_set_as_public": "PASS","awslambda_function_url_public": null,"rds_instance_no_public_access": "PASS","emr_cluster_publicly_accesible": null,"redshift_cluster_public_access": null,"kms_key_not_publicly_accessible": null,"awslambda_function_url_cors_policy": null,"sns_topics_not_publicly_accessible": "PASS","sqs_queues_not_publicly_accessible": "PASS","eks_cluster_not_publicly_accessible": null,"glacier_vaults_policy_public_access": null,"s3_bucket_policy_public_write_access": "PASS","emr_cluster_master_nodes_no_public_ip": null,"s3_account_level_public_access_blocks": null,"ecr_repositories_not_publicly_accessible": "PASS","emr_cluster_account_public_block_enabled": "PASS","awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","appstream_fleet_default_internet_access_disabled": null,"opensearch_service_domains_not_publicly_accessible": null,"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS","codeartifact_packages_external_public_publishing_disabled": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Name": "SEC03-BP07 Analyze public and cross-account access","Section": "Identity and access management","SubSection": "Permissions management","Description": "Continuously monitor findings that highlight public and cross-account access. Reduce public access and cross-account access to only resources that require this type of access.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_permissions_analyze_cross_account.html#implementation-guidance.","WellArchitectedPracticeId": "sec_permissions_analyze_cross_account","WellArchitectedQuestionId": "permissions"}],"description": "Continuously monitor findings that highlight public and cross-account access. Reduce public access and cross-account access to only resources that require this type of access.","checks_status": {"fail": 4,"pass": 13,"total": 35,"manual": 0}},"SEC03-BP08": {"name": "SEC03-BP08","checks": {"ssm_document_secrets": "PASS","awslambda_function_not_publicly_accessible": "PASS","codebuild_project_user_controlled_buildspec": "PASS","opensearch_service_domains_not_publicly_accessible": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "PASS","attributes": [{"Name": "SEC03-BP08 Share resources securely within your organization","Section": "Identity and access management","SubSection": "Permissions management","Description": "Govern the consumption of shared resources across accounts or within your AWS Organizations. Monitor shared resources and review shared resource access.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_permissions_share_securely.html#implementation-guidance.","WellArchitectedPracticeId": "sec_permissions_share_securely","WellArchitectedQuestionId": "permissions"}],"description": "Govern the consumption of shared resources across accounts or within your AWS Organizations. Monitor shared resources and review shared resource access.","checks_status": {"fail": 0,"pass": 3,"total": 5,"manual": 0}},"SEC04-BP01": {"name": "SEC04-BP01","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"apigatewayv2_api_access_logging_enabled": "FAIL","s3_bucket_server_access_logging_enabled": "FAIL","cloudfront_distributions_logging_enabled": null,"rds_instance_integration_cloudwatch_logs": "FAIL","acm_certificates_transparency_logs_enabled": "PASS","eks_control_plane_logging_all_types_enabled": null,"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL","opensearch_service_domains_audit_logging_enabled": null,"directoryservice_directory_log_forwarding_enabled": null,"opensearch_service_domains_cloudwatch_logging_enabled": null,"route53_public_hosted_zones_cloudwatch_logging_enabled": null,"awslambda_function_invoke_api_operations_cloudtrail_logging_enabled": "PASS"},"status": "FAIL","attributes": [{"Name": "SEC04-BP01 Configure service and application logging","Section": "Detection","SubSection": "Detection","Description": "Retain security event logs from services and applications. This is a fundamental principle of security for audit, investigations, and operational use cases, and a common security requirement driven by governance, risk, and compliance (GRC) standards, policies, and procedures.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_detect_investigate_events_app_service_logging.html#implementation-guidance.","WellArchitectedPracticeId": "sec_detect_investigate_events_app_service_logging","WellArchitectedQuestionId": "detect-investigate-events"}],"description": "Retain security event logs from services and applications. This is a fundamental principle of security for audit, investigations, and operational use cases, and a common security requirement driven by governance, risk, and compliance (GRC) standards, policies, and procedures.","checks_status": {"fail": 8,"pass": 3,"total": 21,"manual": 0}},"SEC04-BP02": {"name": "SEC04-BP02","checks": {"vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS","config_recorder_all_regions_enabled": null},"status": "FAIL","attributes": [{"Name": "SEC04-BP02 Analyze logs, findings, and metrics centrally","Section": "Detection","SubSection": "Detection","Description": "Security operations teams rely on the collection of logs and the use of search tools to discover potential events of interest, which might indicate unauthorized activity or unintentional change. However, simply analyzing collected data and manually processing information is insufficient to keep up with the volume of information flowing from complex architectures. Analysis and reporting alone don't facilitate the assignment of the right resources to work an event in a timely fashion. A best practice for building a mature security operations team is to deeply integrate the flow of security events and findings into a notification and workflow system such as a ticketing system, a bug or issue system, or other security information and event management (SIEM) system. This takes the workflow out of email and static reports, and allows you to route, escalate, and manage events or findings. Many organizations are also integrating security alerts into their chat or collaboration, and developer productivity platforms. For organizations embarking on automation, an API-driven, low-latency ticketing system offers considerable flexibility when planning what to automate first. This best practice applies not only to security events generated from log messages depicting user activity or network events, but also from changes detected in the infrastructure itself. The ability to detect change, determine whether a change was appropriate, and then route that information to the correct remediation workflow is essential in maintaining and validating a secure architecture, in the context of changes where the nature of their undesirability is sufficiently subtle that their execution cannot currently be prevented with a combination of AWS Identity and Access Management (IAM) and AWS Organizations configuration. Amazon GuardDuty and AWS Security Hub provide aggregation, deduplication, and analysis mechanisms for log records that are also made available to you via other AWS services. GuardDuty ingests, aggregates, and analyzes information from sources such as AWS CloudTrail management and data events, VPC DNS logs, and VPC Flow Logs. Security Hub can ingest, aggregate, and analyze output from GuardDuty, AWS Config, Amazon Inspector, Amazon Macie, AWS Firewall Manager, and a significant number of third-party security products available in the AWS Marketplace, and if built accordingly, your own code. Both GuardDuty and Security Hub have an Administrator-Member model that can aggregate findings and insights across multiple accounts, and Security Hub is often used by customers who have an on- premises SIEM as an AWS-side log and alert preprocessor and aggregator from which they can then ingest Amazon EventBridge through a AWS Lambda-based processor and forwarder.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_detect_investigate_events_analyze_all.html#implementation-guidance.","WellArchitectedPracticeId": "sec_detect_investigate_events_analyze_all","WellArchitectedQuestionId": "detect-investigate-events"}],"description": "Security operations teams rely on the collection of logs and the use of search tools to discover potential events of interest, which might indicate unauthorized activity or unintentional change. However, simply analyzing collected data and manually processing information is insufficient to keep up with the volume of information flowing from complex architectures. Analysis and reporting alone don't facilitate the assignment of the right resources to work an event in a timely fashion. A best practice for building a mature security operations team is to deeply integrate the flow of security events and findings into a notification and workflow system such as a ticketing system, a bug or issue system, or other security information and event management (SIEM) system. This takes the workflow out of email and static reports, and allows you to route, escalate, and manage events or findings. Many organizations are also integrating security alerts into their chat or collaboration, and developer productivity platforms. For organizations embarking on automation, an API-driven, low-latency ticketing system offers considerable flexibility when planning what to automate first. This best practice applies not only to security events generated from log messages depicting user activity or network events, but also from changes detected in the infrastructure itself. The ability to detect change, determine whether a change was appropriate, and then route that information to the correct remediation workflow is essential in maintaining and validating a secure architecture, in the context of changes where the nature of their undesirability is sufficiently subtle that their execution cannot currently be prevented with a combination of AWS Identity and Access Management (IAM) and AWS Organizations configuration. Amazon GuardDuty and AWS Security Hub provide aggregation, deduplication, and analysis mechanisms for log records that are also made available to you via other AWS services. GuardDuty ingests, aggregates, and analyzes information from sources such as AWS CloudTrail management and data events, VPC DNS logs, and VPC Flow Logs. Security Hub can ingest, aggregate, and analyze output from GuardDuty, AWS Config, Amazon Inspector, Amazon Macie, AWS Firewall Manager, and a significant number of third-party security products available in the AWS Marketplace, and if built accordingly, your own code. Both GuardDuty and Security Hub have an Administrator-Member model that can aggregate findings and insights across multiple accounts, and Security Hub is often used by customers who have an on- premises SIEM as an AWS-side log and alert preprocessor and aggregator from which they can then ingest Amazon EventBridge through a AWS Lambda-based processor and forwarder.","checks_status": {"fail": 1,"pass": 1,"total": 3,"manual": 0}},"SEC04-BP03": {"name": "SEC04-BP03","checks": {"elb_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","cloudtrail_multi_region_enabled": "PASS"},"status": "FAIL","attributes": [{"Name": "SEC04-BP03 Automate response to events","Section": "Detection","SubSection": "Detection","Description": "Using automation to investigate and remediate events reduces human effort and error, and enables you to scale investigation capabilities. Regular reviews will help you tune automation tools, and continuously iterate. In AWS, investigating events of interest and information on potentially unexpected changes into an automated workflow can be achieved using Amazon EventBridge. This service provides a scalable rules engine designed to broker both native AWS event formats (such as AWS CloudTrail events), as well as custom events you can generate from your application. Amazon GuardDuty also allows you to route events to a workflow system for those building incident response systems (AWS Step Functions), or to a central Security Account, or to a bucket for further analysis. Detecting change and routing this information to the correct workflow can also be accomplished using AWS Config Rules and Conformance Packs. AWS Config detects changes to in-scope services (though with higher latency than EventBridge) and generates events that can be parsed using AWS Config Rules for rollback, enforcement of compliance policy, and forwarding of information to systems, such as change management platforms and operational ticketing systems. As well as writing your own Lambda functions to respond to AWS Config events, you can also take advantage of the AWS Config Rules Development Kit, and a library of open source AWS Config Rules. Conformance packs are a collection of AWS Config Rules and remediation actions you deploy as a single entity authored as a YAML template. A sample conformance pack template is available for the Well-Architected Security Pillar.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_detect_investigate_events_auto_response.html#implementation-guidance.","WellArchitectedPracticeId": "sec_detect_investigate_events_auto_response","WellArchitectedQuestionId": "detect-investigate-events"}],"description": "Using automation to investigate and remediate events reduces human effort and error, and enables you to scale investigation capabilities. Regular reviews will help you tune automation tools, and continuously iterate. In AWS, investigating events of interest and information on potentially unexpected changes into an automated workflow can be achieved using Amazon EventBridge. This service provides a scalable rules engine designed to broker both native AWS event formats (such as AWS CloudTrail events), as well as custom events you can generate from your application. Amazon GuardDuty also allows you to route events to a workflow system for those building incident response systems (AWS Step Functions), or to a central Security Account, or to a bucket for further analysis. Detecting change and routing this information to the correct workflow can also be accomplished using AWS Config Rules and Conformance Packs. AWS Config detects changes to in-scope services (though with higher latency than EventBridge) and generates events that can be parsed using AWS Config Rules for rollback, enforcement of compliance policy, and forwarding of information to systems, such as change management platforms and operational ticketing systems. As well as writing your own Lambda functions to respond to AWS Config events, you can also take advantage of the AWS Config Rules Development Kit, and a library of open source AWS Config Rules. Conformance packs are a collection of AWS Config Rules and remediation actions you deploy as a single entity authored as a YAML template. A sample conformance pack template is available for the Well-Architected Security Pillar.","checks_status": {"fail": 2,"pass": 1,"total": 3,"manual": 0}},"SEC04-BP04": {"name": "SEC04-BP04","checks": {"macie_is_enabled": "PASS","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"directoryservice_directory_monitor_notifications": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Name": "SEC04-BP04 Implement actionable security events","Section": "Detection","SubSection": "Detection","Description": "Create alerts that are sent to and can be actioned by your team. Ensure that alerts include relevant information for the team to take action. For each detective mechanism you have, you should also have a process, in the form of a runbook or playbook, to investigate. For example, when you enable Amazon GuardDuty, it generates different findings. You should have a runbook entry for each finding type, for example, if a trojan is discovered, your runbook has simple instructions that instruct someone to investigate and remediate.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_detect_investigate_events_actionable_events.html#implementation-guidance.","WellArchitectedPracticeId": "sec_detect_investigate_events_actionable_events","WellArchitectedQuestionId": "detect-investigate-events"}],"description": "Create alerts that are sent to and can be actioned by your team. Ensure that alerts include relevant information for the team to take action. For each detective mechanism you have, you should also have a process, in the form of a runbook or playbook, to investigate. For example, when you enable Amazon GuardDuty, it generates different findings. You should have a runbook entry for each finding type, for example, if a trojan is discovered, your runbook has simple instructions that instruct someone to investigate and remediate.","checks_status": {"fail": 1,"pass": 3,"total": 20,"manual": 0}},"SEC05-BP01": {"name": "SEC05-BP01","checks": {"cloudfront_distributions_using_waf": null,"apigateway_restapi_waf_acl_attached": "FAIL","eks_cluster_not_publicly_accessible": null,"sagemaker_models_vpc_settings_configured": null,"vpc_endpoint_connections_trust_boundaries": "FAIL","awslambda_function_not_publicly_accessible": "PASS","sagemaker_models_network_isolation_enabled": null,"sagemaker_training_jobs_vpc_settings_configured": null,"sagemaker_training_jobs_network_isolation_enabled": null,"opensearch_service_domains_not_publicly_accessible": null,"sagemaker_notebook_instance_vpc_settings_configured": null,"vpc_endpoint_services_allowed_principals_trust_boundaries": null},"status": "FAIL","attributes": [{"Name": "SEC05-BP01 Create network layers","Section": "Infrastructure protection","SubSection": "Protecting networks","Description": "Group components that share reachability requirements into layers. For example, a database cluster in a virtual private cloud (VPC) with no need for internet access should be placed in subnets with no route to or from the internet. In a serverless workload operating without a VPC, similar layering and segmentation with microservices can achieve the same goal. Components such as Amazon Elastic Compute Cloud (Amazon EC2) instances, Amazon Relational Database Service (Amazon RDS) database clusters, and AWS Lambda functions that share reachability requirements can be segmented into layers formed by subnets. For example, an Amazon RDS database cluster in a VPC with no need for internet access should be placed in subnets with no route to or from the internet. This layered approach for the controls mitigates the impact of a single layer misconfiguration, which could allow unintended access. For Lambda, you can run your functions in your VPC to take advantage of VPC-based controls. For network connectivity that can include thousands of VPCs, AWS accounts, and on-premises networks, you should use AWS Transit Gateway. It acts as a hub that controls how traffic is routed among all the connected networks, which act like spokes. Traffic between an Amazon Virtual Private Cloud and AWS Transit Gateway remains on the AWS private network, which reduces external threat vectors such as distributed denial of service (DDoS) attacks and common exploits, such as SQL injection, cross-site scripting, cross-site request forgery, or abuse of broken authentication code. AWS Transit Gateway inter-region peering also encrypts inter-region traffic with no single point of failure or bandwidth bottleneck.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_network_protection_create_layers.html#implementation-guidance.","WellArchitectedPracticeId": "sec_network_protection_create_layers","WellArchitectedQuestionId": "network-protection"}],"description": "Group components that share reachability requirements into layers. For example, a database cluster in a virtual private cloud (VPC) with no need for internet access should be placed in subnets with no route to or from the internet. In a serverless workload operating without a VPC, similar layering and segmentation with microservices can achieve the same goal. Components such as Amazon Elastic Compute Cloud (Amazon EC2) instances, Amazon Relational Database Service (Amazon RDS) database clusters, and AWS Lambda functions that share reachability requirements can be segmented into layers formed by subnets. For example, an Amazon RDS database cluster in a VPC with no need for internet access should be placed in subnets with no route to or from the internet. This layered approach for the controls mitigates the impact of a single layer misconfiguration, which could allow unintended access. For Lambda, you can run your functions in your VPC to take advantage of VPC-based controls. For network connectivity that can include thousands of VPCs, AWS accounts, and on-premises networks, you should use AWS Transit Gateway. It acts as a hub that controls how traffic is routed among all the connected networks, which act like spokes. Traffic between an Amazon Virtual Private Cloud and AWS Transit Gateway remains on the AWS private network, which reduces external threat vectors such as distributed denial of service (DDoS) attacks and common exploits, such as SQL injection, cross-site scripting, cross-site request forgery, or abuse of broken authentication code. AWS Transit Gateway inter-region peering also encrypts inter-region traffic with no single point of failure or bandwidth bottleneck.","checks_status": {"fail": 2,"pass": 1,"total": 12,"manual": 0}},"SEC05-BP02": {"name": "SEC05-BP02","checks": {"ec2_ebs_public_snapshot": "PASS","s3_bucket_no_mfa_delete": "FAIL","s3_bucket_acl_prohibited": "FAIL","apigatewayv2_api_authorizers_enabled": "FAIL","apigateway_restapi_authorizers_enabled": "PASS","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Name": "SEC05-BP02 Control traffic at all layers","Section": "Infrastructure protection","SubSection": "Protecting networks","Description": "When architecting your network topology, you should examine the connectivity requirements of each component. For example, if a component requires internet accessibility (inbound and outbound), connectivity to VPCs, edge services, and external data centers. A VPC allows you to define your network topology that spans an AWS Region with a private IPv4 address range that you set, or an IPv6 address range AWS selects. You should apply multiple controls with a defense in depth approach for both inbound and outbound traffic, including the use of security groups (stateful inspection firewall), Network ACLs, subnets, and route tables. Within a VPC, you can create subnets in an Availability Zone. Each subnet can have an associated route table that defines routing rules for managing the paths that traffic takes within the subnet. You can define an internet routable subnet by having a route that goes to an internet or NAT gateway attached to the VPC, or through another VPC. When an instance, Amazon Relational Database Service(Amazon RDS) database, or other service is launched within a VPC, it has its own security group per network interface. This firewall is outside the operating system layer and can be used to define rules for allowed inbound and outbound traffic. You can also define relationships between security groups. For example, instances within a database tier security group only accept traffic from instances within the application tier, by reference to the security groups applied to the instances involved. Unless you are using non-TCP protocols, it shouldn't be necessary to have an Amazon Elastic Compute Cloud(Amazon EC2) instance directly accessible by the internet (even with ports restricted by security groups) without a load balancer, or CloudFront. This helps protect it from unintended access through an operating system or application issue. A subnet can also have a network ACL attached to it, which acts as a stateless firewall. You should configure the network ACL to narrow the scope of traffic allowed between layers, note that you need to define both inbound and outbound rules. Some AWS services require components to access the internet for making API calls, where AWS API endpoints are located. Other AWS services use VPC endpoints within your Amazon VPCs. Many AWS services, including Amazon S3 and Amazon DynamoDB, support VPC endpoints, and this technology has been generalized in AWS PrivateLink. We recommend you use this approach to access AWS services, third-party services, and your own services hosted in other VPCs securely. All network traffic on AWS PrivateLink stays on the global AWS backbone and never traverses the internet. Connectivity can only be initiated by the consumer of the service, and not by the provider of the service. Using AWS PrivateLink for external service access allows you to create air-gapped VPCs with no internet access and helps protect your VPCs from external threat vectors. Third-party services can use AWS PrivateLink to allow their customers to connect to the services from their VPCs over private IP addresses. For VPC assets that need to make outbound connections to the internet, these can be made outbound only (one-way) through an AWS managed NAT gateway, outbound only internet gateway, or web proxies that you create and manage.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_network_protection_layered.html#implementation-guidance.","WellArchitectedPracticeId": "sec_network_protection_layered","WellArchitectedQuestionId": "network-protection"}],"description": "When architecting your network topology, you should examine the connectivity requirements of each component. For example, if a component requires internet accessibility (inbound and outbound), connectivity to VPCs, edge services, and external data centers. A VPC allows you to define your network topology that spans an AWS Region with a private IPv4 address range that you set, or an IPv6 address range AWS selects. You should apply multiple controls with a defense in depth approach for both inbound and outbound traffic, including the use of security groups (stateful inspection firewall), Network ACLs, subnets, and route tables. Within a VPC, you can create subnets in an Availability Zone. Each subnet can have an associated route table that defines routing rules for managing the paths that traffic takes within the subnet. You can define an internet routable subnet by having a route that goes to an internet or NAT gateway attached to the VPC, or through another VPC. When an instance, Amazon Relational Database Service(Amazon RDS) database, or other service is launched within a VPC, it has its own security group per network interface. This firewall is outside the operating system layer and can be used to define rules for allowed inbound and outbound traffic. You can also define relationships between security groups. For example, instances within a database tier security group only accept traffic from instances within the application tier, by reference to the security groups applied to the instances involved. Unless you are using non-TCP protocols, it shouldn't be necessary to have an Amazon Elastic Compute Cloud(Amazon EC2) instance directly accessible by the internet (even with ports restricted by security groups) without a load balancer, or CloudFront. This helps protect it from unintended access through an operating system or application issue. A subnet can also have a network ACL attached to it, which acts as a stateless firewall. You should configure the network ACL to narrow the scope of traffic allowed between layers, note that you need to define both inbound and outbound rules. Some AWS services require components to access the internet for making API calls, where AWS API endpoints are located. Other AWS services use VPC endpoints within your Amazon VPCs. Many AWS services, including Amazon S3 and Amazon DynamoDB, support VPC endpoints, and this technology has been generalized in AWS PrivateLink. We recommend you use this approach to access AWS services, third-party services, and your own services hosted in other VPCs securely. All network traffic on AWS PrivateLink stays on the global AWS backbone and never traverses the internet. Connectivity can only be initiated by the consumer of the service, and not by the provider of the service. Using AWS PrivateLink for external service access allows you to create air-gapped VPCs with no internet access and helps protect your VPCs from external threat vectors. Third-party services can use AWS PrivateLink to allow their customers to connect to the services from their VPCs over private IP addresses. For VPC assets that need to make outbound connections to the internet, these can be made outbound only (one-way) through an AWS managed NAT gateway, outbound only internet gateway, or web proxies that you create and manage.","checks_status": {"fail": 4,"pass": 2,"total": 7,"manual": 0}},"SEC05-BP03": {"name": "SEC05-BP03","checks": {"elbv2_waf_acl_attached": "FAIL","ec2_securitygroup_not_used": "FAIL","elbv2_desync_mitigation_mode": "FAIL","ec2_securitygroup_from_launch_wizard": "FAIL","route53_domains_transferlock_enabled": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","route53_domains_privacy_protection_enabled": null,"ec2_securitygroup_with_many_ingress_egress_rules": "PASS","shield_advanced_protection_in_global_accelerators": null,"shield_advanced_protection_in_route53_hosted_zones": null,"shield_advanced_protection_in_associated_elastic_ips": null,"shield_advanced_protection_in_classic_load_balancers": null,"shield_advanced_protection_in_cloudfront_distributions": null,"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","shield_advanced_protection_in_internet_facing_load_balancers": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Name": "SEC05-BP03 Automate network protections","Section": "Infrastructure protection","SubSection": "Protecting networks","Description": "Automate protection mechanisms to provide a self-defending network based on threat intelligence and anomaly detection. For example, intrusion detection and prevention tools that can adapt to current threats and reduce their impact. A web application firewall is an example of where you can automate network protection, for example, by using the AWS WAF Security Automations solution (https://github.com/awslabs/aws-waf-security-automations) to automatically block requests originating from IP addresses associated with known threat actors.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_network_protection_auto_protect.html#implementation-guidance.","WellArchitectedPracticeId": "sec_network_protection_auto_protect","WellArchitectedQuestionId": "network-protection"}],"description": "Automate protection mechanisms to provide a self-defending network based on threat intelligence and anomaly detection. For example, intrusion detection and prevention tools that can adapt to current threats and reduce their impact. A web application firewall is an example of where you can automate network protection, for example, by using the AWS WAF Security Automations solution (https://github.com/awslabs/aws-waf-security-automations) to automatically block requests originating from IP addresses associated with known threat actors.","checks_status": {"fail": 8,"pass": 16,"total": 33,"manual": 0}},"SEC05-BP04": {"name": "SEC05-BP04","checks": {"guardduty_is_enabled": "PASS","vpc_flow_logs_enabled": "FAIL","apigateway_restapi_authorizers_enabled": "PASS"},"status": "FAIL","attributes": [{"Name": "SEC05-BP04 Implement inspection and protection","Section": "Infrastructure protection","SubSection": "Protecting networks","Description": "Inspect and filter your traffic at each layer. You can inspect your VPC configurations for potential unintended access using VPC Network Access Analyzer. You can specify your network access requirements and identify potential network paths that do not meet them. For components transacting over HTTP-based protocols, a web application firewall can help protect from common attacks. AWS WAF is a web application firewall that lets you monitor and block HTTP(s) requests that match your configurable rules that are forwarded to an Amazon API Gateway API, Amazon CloudFront, or an Application Load Balancer. To get started with AWS WAF, you can use AWS Managed Rules in combination with your own, or use existing partner integrations. For managing AWS WAF, AWS Shield Advanced protections, and Amazon VPC security groups across AWS Organizations, you can use AWS Firewall Manager. It allows you to centrally configure and manage firewall rules across your accounts and applications, making it easier to scale enforcement of common rules. It also enables you to rapidly respond to attacks, using AWS Shield Advanced, or solutions that can automatically block unwanted requests to your web applications. Firewall Manager also works with AWS Network Firewall. AWS Network Firewall is a managed service that uses a rules engine to give you fine-grained control over both stateful and stateless network traffic. It supports the Suricata compatible open source intrusion prevention system (IPS) specifications for rules to help protect your workload.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_network_protection_inspection.html#implementation-guidance.","WellArchitectedPracticeId": "sec_network_protection_inspection","WellArchitectedQuestionId": "network-protection"}],"description": "Inspect and filter your traffic at each layer. You can inspect your VPC configurations for potential unintended access using VPC Network Access Analyzer. You can specify your network access requirements and identify potential network paths that do not meet them. For components transacting over HTTP-based protocols, a web application firewall can help protect from common attacks. AWS WAF is a web application firewall that lets you monitor and block HTTP(s) requests that match your configurable rules that are forwarded to an Amazon API Gateway API, Amazon CloudFront, or an Application Load Balancer. To get started with AWS WAF, you can use AWS Managed Rules in combination with your own, or use existing partner integrations. For managing AWS WAF, AWS Shield Advanced protections, and Amazon VPC security groups across AWS Organizations, you can use AWS Firewall Manager. It allows you to centrally configure and manage firewall rules across your accounts and applications, making it easier to scale enforcement of common rules. It also enables you to rapidly respond to attacks, using AWS Shield Advanced, or solutions that can automatically block unwanted requests to your web applications. Firewall Manager also works with AWS Network Firewall. AWS Network Firewall is a managed service that uses a rules engine to give you fine-grained control over both stateful and stateless network traffic. It supports the Suricata compatible open source intrusion prevention system (IPS) specifications for rules to help protect your workload.","checks_status": {"fail": 1,"pass": 2,"total": 3,"manual": 0}},"SEC06-BP01": {"name": "SEC06-BP01","checks": {"ec2_instance_imdsv2_enabled": "PASS","ssm_managed_compliant_patching": "FAIL","redshift_cluster_automatic_upgrades": null,"cloudtrail_log_file_validation_enabled": "FAIL","rds_instance_minor_version_upgrade_enabled": "PASS","ec2_instance_internet_facing_with_instance_profile": "FAIL","opensearch_service_domains_updated_to_the_latest_service_software_version": null},"status": "FAIL","attributes": [{"Name": "SEC06-BP01 Perform vulnerability management","Section": "Infrastructure protection","SubSection": "Protecting compute","Description": "Frequently scan and patch for vulnerabilities in your code, dependencies, and in your infrastructure to help protect against new threats. Starting with the configuration of your compute infrastructure, you can automate creating and updating resources using AWS CloudFormation. CloudFormation allows you to create templates written in YAML or JSON, either using AWS examples or by writing your own. This allows you to create secure-by-default infrastructure templates that you can verify with CloudFormation Guard, to save you time and reduce the risk of configuration error. You can build your infrastructure and deploy your applications using continuous delivery, for example with AWS CodePipeline, to automate the building, testing, and release. You are responsible for patch management for your AWS resources, including Amazon Elastic Compute Cloud(Amazon EC2) instances, Amazon Machine Images (AMIs), and many other compute resources. For Amazon EC2 instances, AWS Systems Manager Patch Manager automates the process of patching managed instances with both security related and other types of updates. You can use Patch Manager to apply patches for both operating systems and applications. (On Windows Server, application support is limited to updates for Microsoft applications.) You can use Patch Manager to install Service Packs on Windows instances and perform minor version upgrades on Linux instances. You can patch fleets of Amazon EC2 instances or your on-premises servers and virtual machines (VMs) by operating system type. This includes supported versions of Windows Server, Amazon Linux, Amazon Linux 2, CentOS, Debian Server, Oracle Linux, Red Hat Enterprise Linux (RHEL), SUSE Linux Enterprise Server (SLES), and Ubuntu Server. You can scan instances to see only a report of missing patches, or you can scan and automatically install all missing patches.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_network_protection_inspection.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_compute_vulnerability_management","WellArchitectedQuestionId": "protect-compute"}],"description": "Frequently scan and patch for vulnerabilities in your code, dependencies, and in your infrastructure to help protect against new threats. Starting with the configuration of your compute infrastructure, you can automate creating and updating resources using AWS CloudFormation. CloudFormation allows you to create templates written in YAML or JSON, either using AWS examples or by writing your own. This allows you to create secure-by-default infrastructure templates that you can verify with CloudFormation Guard, to save you time and reduce the risk of configuration error. You can build your infrastructure and deploy your applications using continuous delivery, for example with AWS CodePipeline, to automate the building, testing, and release. You are responsible for patch management for your AWS resources, including Amazon Elastic Compute Cloud(Amazon EC2) instances, Amazon Machine Images (AMIs), and many other compute resources. For Amazon EC2 instances, AWS Systems Manager Patch Manager automates the process of patching managed instances with both security related and other types of updates. You can use Patch Manager to apply patches for both operating systems and applications. (On Windows Server, application support is limited to updates for Microsoft applications.) You can use Patch Manager to install Service Packs on Windows instances and perform minor version upgrades on Linux instances. You can patch fleets of Amazon EC2 instances or your on-premises servers and virtual machines (VMs) by operating system type. This includes supported versions of Windows Server, Amazon Linux, Amazon Linux 2, CentOS, Debian Server, Oracle Linux, Red Hat Enterprise Linux (RHEL), SUSE Linux Enterprise Server (SLES), and Ubuntu Server. You can scan instances to see only a report of missing patches, or you can scan and automatically install all missing patches.","checks_status": {"fail": 3,"pass": 2,"total": 7,"manual": 0}},"SEC06-BP02": {"name": "SEC06-BP02","checks": {"awslambda_function_not_publicly_accessible": "PASS","ecr_repositories_scan_images_on_push_enabled": "FAIL"},"status": "FAIL","attributes": [{"Name": "SEC06-BP02 Reduce attack surface","Section": "Infrastructure protection","SubSection": "Protecting compute","Description": "Reduce your exposure to unintended access by hardening operating systems and minimizing the components, libraries, and externally consumable services in use. Start by reducing unused components, whether they are operating system packages or applications, for Amazon Elastic Compute Cloud (Amazon EC2)-based workloads, or external software modules in your code, for all workloads. You can find many hardening and security configuration guides for common operating systems and server software. For example, you can start with the Center for Internet Security and iterate.In Amazon EC2, you can create your own Amazon Machine Images (AMIs), which you have patched and hardened, to help you meet the specific security requirements for your organization. The patches and other security controls you apply on the AMI are effective at the point in time in which they were createdโ€”they are not dynamic unless you modify after launching, for example, with AWS Systems Manager.You can simplify the process of building secure AMIs with EC2 Image Builder. EC2 Image Builder significantly reduces the effort required to create and maintain golden images without writing and maintaining automation. When software updates become available, Image Builder automatically produces a new image without requiring users to manually initiate image builds. EC2 Image Builder allows you to easily validate the functionality and security of your images before using them in production with AWS-provided tests and your own tests. You can also apply AWS-provided security settings to further secure your images to meet internal security criteria. For example, you can produce images that conform to the Security Technical Implementation Guide (STIG) standard using AWS-provided templates.Using third-party static code analysis tools, you can identify common security issues such as unchecked function input bounds, as well as applicable common vulnerabilities and exposures (CVEs). You can use Amazon CodeGuru for supported languages. Dependency checking tools can also be used to determine whether libraries your code links against are the latest versions, are themselves free of CVEs, and have licensing conditions that meet your software policy requirements.Using Amazon Inspector, you can perform configuration assessments against your instances for known CVEs, assess against security benchmarks, and automate the notification of defects. Amazon Inspector runs on production instances or in a build pipeline, and it notifies developers and engineers when findings are present. You can access findings programmatically and direct your team to backlogs and bug-tracking systems. EC2 Image Builder can be used to maintain server images (AMIs) with automated patching, AWS-provided security policy enforcement, and other customizations. When using containers implement ECR Image Scanning in your build pipeline and on a regular basis against your image repository to look for CVEs in your containers.While Amazon Inspector and other tools are effective at identifying configurations and any CVEs that are present, other methods are required to test your workload at the application level. Fuzzing is a well-known method of finding bugs using automation to inject malformed data into input fields and other areas of your application.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_compute_reduce_surface.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_compute_reduce_surface","WellArchitectedQuestionId": "protect-compute"}],"description": "Reduce your exposure to unintended access by hardening operating systems and minimizing the components, libraries, and externally consumable services in use. Start by reducing unused components, whether they are operating system packages or applications, for Amazon Elastic Compute Cloud (Amazon EC2)-based workloads, or external software modules in your code, for all workloads. You can find many hardening and security configuration guides for common operating systems and server software. For example, you can start with the Center for Internet Security and iterate.In Amazon EC2, you can create your own Amazon Machine Images (AMIs), which you have patched and hardened, to help you meet the specific security requirements for your organization. The patches and other security controls you apply on the AMI are effective at the point in time in which they were createdโ€”they are not dynamic unless you modify after launching, for example, with AWS Systems Manager.You can simplify the process of building secure AMIs with EC2 Image Builder. EC2 Image Builder significantly reduces the effort required to create and maintain golden images without writing and maintaining automation. When software updates become available, Image Builder automatically produces a new image without requiring users to manually initiate image builds. EC2 Image Builder allows you to easily validate the functionality and security of your images before using them in production with AWS-provided tests and your own tests. You can also apply AWS-provided security settings to further secure your images to meet internal security criteria. For example, you can produce images that conform to the Security Technical Implementation Guide (STIG) standard using AWS-provided templates.Using third-party static code analysis tools, you can identify common security issues such as unchecked function input bounds, as well as applicable common vulnerabilities and exposures (CVEs). You can use Amazon CodeGuru for supported languages. Dependency checking tools can also be used to determine whether libraries your code links against are the latest versions, are themselves free of CVEs, and have licensing conditions that meet your software policy requirements.Using Amazon Inspector, you can perform configuration assessments against your instances for known CVEs, assess against security benchmarks, and automate the notification of defects. Amazon Inspector runs on production instances or in a build pipeline, and it notifies developers and engineers when findings are present. You can access findings programmatically and direct your team to backlogs and bug-tracking systems. EC2 Image Builder can be used to maintain server images (AMIs) with automated patching, AWS-provided security policy enforcement, and other customizations. When using containers implement ECR Image Scanning in your build pipeline and on a regular basis against your image repository to look for CVEs in your containers.While Amazon Inspector and other tools are effective at identifying configurations and any CVEs that are present, other methods are required to test your workload at the application level. Fuzzing is a well-known method of finding bugs using automation to inject malformed data into input fields and other areas of your application.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"SEC06-BP03": {"name": "SEC06-BP03","checks": {},"status": "PASS","attributes": [{"Name": "SEC06-BP03 Implement managed services","Section": "Infrastructure protection","SubSection": "Protecting compute","Description": "Implement services that manage resources, such as Amazon Relational Database Service (Amazon RDS), AWS Lambda, and Amazon Elastic Container Service (Amazon ECS), to reduce your security maintenance tasks as part of the shared responsibility model. For example, Amazon RDS helps you set up, operate, and scale a relational database, automates administration tasks such as hardware provisioning, database setup, patching, and backups. This means you have more free time to focus on securing your application in other ways described in the AWS Well-Architected Framework. Lambda lets you run code without provisioning or managing servers, so you only need to focus on the connectivity, invocation, and security at the code levelโ€“not the infrastructure or operating system.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_compute_implement_managed_services.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_compute_implement_managed_services","WellArchitectedQuestionId": "protect-compute"}],"description": "Implement services that manage resources, such as Amazon Relational Database Service (Amazon RDS), AWS Lambda, and Amazon Elastic Container Service (Amazon ECS), to reduce your security maintenance tasks as part of the shared responsibility model. For example, Amazon RDS helps you set up, operate, and scale a relational database, automates administration tasks such as hardware provisioning, database setup, patching, and backups. This means you have more free time to focus on securing your application in other ways described in the AWS Well-Architected Framework. Lambda lets you run code without provisioning or managing servers, so you only need to focus on the connectivity, invocation, and security at the code levelโ€“not the infrastructure or operating system.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC06-BP04": {"name": "SEC06-BP04","checks": {"ec2_instance_managed_by_ssm": "FAIL","ec2_instance_profile_attached": "PASS"},"status": "FAIL","attributes": [{"Name": "SEC06-BP04 Automate compute protection","Section": "Infrastructure protection","SubSection": "Protecting compute","Description": "Automate your protective compute mechanisms including vulnerability management, reduction in attack surface, and management of resources. The automation will help you invest time in securing other aspects of your workload, and reduce the risk of human error.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_compute_auto_protection.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_compute_auto_protection","WellArchitectedQuestionId": "protect-compute"}],"description": "Automate your protective compute mechanisms including vulnerability management, reduction in attack surface, and management of resources. The automation will help you invest time in securing other aspects of your workload, and reduce the risk of human error.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"SEC06-BP05": {"name": "SEC06-BP05","checks": {"ec2_instance_managed_by_ssm": "FAIL","ec2_instance_profile_attached": "PASS"},"status": "FAIL","attributes": [{"Name": "SEC06-BP05 Enable people to perform actions at a distance","Section": "Infrastructure protection","SubSection": "Protecting compute","Description": "Removing the ability for interactive access reduces the risk of human error, and the potential for manual configuration or management. For example, use a change management workflow to deploy Amazon Elastic Compute Cloud (Amazon EC2) instances using infrastructure-as-code, then manage Amazon EC2 instances using tools such as AWS Systems Manager instead of allowing direct access or through a bastion host. AWS Systems Manager can automate a variety of maintenance and deployment tasks, using features including automation workflows, documents (playbooks), and the run command. AWS CloudFormation stacks build from pipelines and can automate your infrastructure deployment and management tasks without using the AWS Management Console or APIs directly.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_compute_actions_distance.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_compute_actions_distance","WellArchitectedQuestionId": "protect-compute"}],"description": "Removing the ability for interactive access reduces the risk of human error, and the potential for manual configuration or management. For example, use a change management workflow to deploy Amazon Elastic Compute Cloud (Amazon EC2) instances using infrastructure-as-code, then manage Amazon EC2 instances using tools such as AWS Systems Manager instead of allowing direct access or through a bastion host. AWS Systems Manager can automate a variety of maintenance and deployment tasks, using features including automation workflows, documents (playbooks), and the run command. AWS CloudFormation stacks build from pipelines and can automate your infrastructure deployment and management tasks without using the AWS Management Console or APIs directly.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"SEC06-BP06": {"name": "SEC06-BP06","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Name": "SEC06-BP06 Validate software integrity","Section": "Infrastructure protection","SubSection": "Protecting compute","Description": "Implement mechanisms (for example, code signing) to validate that the software, code and libraries used in the workload are from trusted sources and have not been tampered with. For example, you should verify the code signing certificate of binaries and scripts to confirm the author, and ensure it has not been tampered with since created by the author. AWS Signer can help ensure the trust and integrity of your code by centrally managing the code- signing lifecycle, including signing certification and public and private keys. You can learn how to use advanced patterns and best practices for code signing with AWS Lambda. Additionally, a checksum of software that you download, compared to that of the checksum from the provider, can help ensure it has not been tampered with.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_compute_validate_software_integrity.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_compute_validate_software_integrity","WellArchitectedQuestionId": "protect-compute"}],"description": "Implement mechanisms (for example, code signing) to validate that the software, code and libraries used in the workload are from trusted sources and have not been tampered with. For example, you should verify the code signing certificate of binaries and scripts to confirm the author, and ensure it has not been tampered with since created by the author. AWS Signer can help ensure the trust and integrity of your code by centrally managing the code- signing lifecycle, including signing certification and public and private keys. You can learn how to use advanced patterns and best practices for code signing with AWS Lambda. Additionally, a checksum of software that you download, compared to that of the checksum from the provider, can help ensure it has not been tampered with.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"SEC07-BP01": {"name": "SEC07-BP01","checks": {},"status": "PASS","attributes": [{"Name": "SEC07-BP01 Identify the data within your workload","Section": "Data protection","SubSection": "Data classification","Description": "Itโ€™s critical to understand the type and classification of data your workload is processing, the associated business processes, where the data is stored, and who is the data owner. You should also have an understanding of the applicable legal and compliance requirements of your workload, and what data controls need to be enforced. Identifying data is the first step in the data classification journey.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_data_classification_identify_data.html#implementation-guidance.","WellArchitectedPracticeId": "sec_data_classification_identify_data","WellArchitectedQuestionId": "data-classification"}],"description": "Itโ€™s critical to understand the type and classification of data your workload is processing, the associated business processes, where the data is stored, and who is the data owner. You should also have an understanding of the applicable legal and compliance requirements of your workload, and what data controls need to be enforced. Identifying data is the first step in the data classification journey.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC07-BP02": {"name": "SEC07-BP02","checks": {},"status": "PASS","attributes": [{"Name": "SEC07-BP02 Define data protection controls","Section": "Data protection","SubSection": "Data classification","Description": "Protect data according to its classification level. For example, secure data classified as public by using relevant recommendations while protecting sensitive data with additional controls.By using resource tags, separate AWS accounts per sensitivity (and potentially also for each caveat, enclave, or community of interest), IAM policies, AWS Organizations SCPs, AWS Key Management Service (AWS KMS), and AWS CloudHSM, you can define and implement your policies for data classification and protection with encryption. For example, if you have a project with S3 buckets that contain highly critical data or Amazon Elastic Compute Cloud (Amazon EC2) instances that process confidential data, they can be tagged with a Project=ABC tag. Only your immediate team knows what the project code means, and it provides a way to use attribute-based access control. You can define levels of access to the AWS KMS encryption keys through key policies and grants to ensure that only appropriate services have access to the sensitive content through a secure mechanism. If you are making authorization decisions based on tags you should make sure that the permissions on the tags are defined appropriately using tag policies in AWS Organizations.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_data_classification_define_protection.html#implementation-guidance.","WellArchitectedPracticeId": "sec_data_classification_define_protection","WellArchitectedQuestionId": "data-classification"}],"description": "Protect data according to its classification level. For example, secure data classified as public by using relevant recommendations while protecting sensitive data with additional controls.By using resource tags, separate AWS accounts per sensitivity (and potentially also for each caveat, enclave, or community of interest), IAM policies, AWS Organizations SCPs, AWS Key Management Service (AWS KMS), and AWS CloudHSM, you can define and implement your policies for data classification and protection with encryption. For example, if you have a project with S3 buckets that contain highly critical data or Amazon Elastic Compute Cloud (Amazon EC2) instances that process confidential data, they can be tagged with a Project=ABC tag. Only your immediate team knows what the project code means, and it provides a way to use attribute-based access control. You can define levels of access to the AWS KMS encryption keys through key policies and grants to ensure that only appropriate services have access to the sensitive content through a secure mechanism. If you are making authorization decisions based on tags you should make sure that the permissions on the tags are defined appropriately using tag policies in AWS Organizations.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC07-BP03": {"name": "SEC07-BP03","checks": {},"status": "PASS","attributes": [{"Name": "SEC07-BP03 Automate identification and classification","Section": "Data protection","SubSection": "Data classification","Description": "Automating the identification and classification of data can help you implement the correct controls. Using automation for this instead of direct access from a person reduces the risk of human error and exposure. You should evaluate using a tool, such as Amazon Macie, that uses machine learning to automatically discover, classify, and protect sensitive data in AWS. Amazon Macie recognizes sensitive data, such as personally identifiable information (PII) or intellectual property, and provides you with dashboards and alerts that give visibility into how this data is being accessed or moved.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_data_classification_auto_classification.html#implementation-guidance.","WellArchitectedPracticeId": "sec_data_classification_auto_classification","WellArchitectedQuestionId": "data-classification"}],"description": "Automating the identification and classification of data can help you implement the correct controls. Using automation for this instead of direct access from a person reduces the risk of human error and exposure. You should evaluate using a tool, such as Amazon Macie, that uses machine learning to automatically discover, classify, and protect sensitive data in AWS. Amazon Macie recognizes sensitive data, such as personally identifiable information (PII) or intellectual property, and provides you with dashboards and alerts that give visibility into how this data is being accessed or moved.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC07-BP04": {"name": "SEC07-BP04","checks": {},"status": "PASS","attributes": [{"Name": "SEC07-BP04 Define data lifecycle management","Section": "Data protection","SubSection": "Data classification","Description": "Your defined lifecycle strategy should be based on sensitivity level as well as legal and organization requirements. Aspects including the duration for which you retain data, data destruction processes, data access management, data transformation, and data sharing should be considered. When choosing a data classification methodology, balance usability versus access. You should also accommodate the multiple levels of access and nuances for implementing a secure, but still usable, approach for each level. Always use a defense in depth approach and reduce human access to data and mechanisms for transforming, deleting, or copying data. For example, require users to strongly authenticate to an application, and give the application, rather than the users, the requisite access permission to perform action at a distance. In addition, ensure that users come from a trusted network path and require access to the decryption keys. Use tools, such as dashboards and automated reporting, to give users information from the data rather than giving them direct access to the data.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_data_classification_lifecycle_management.html#implementation-guidance.","WellArchitectedPracticeId": "sec_data_classification_lifecycle_management","WellArchitectedQuestionId": "data-classification"}],"description": "Your defined lifecycle strategy should be based on sensitivity level as well as legal and organization requirements. Aspects including the duration for which you retain data, data destruction processes, data access management, data transformation, and data sharing should be considered. When choosing a data classification methodology, balance usability versus access. You should also accommodate the multiple levels of access and nuances for implementing a secure, but still usable, approach for each level. Always use a defense in depth approach and reduce human access to data and mechanisms for transforming, deleting, or copying data. For example, require users to strongly authenticate to an application, and give the application, rather than the users, the requisite access permission to perform action at a distance. In addition, ensure that users come from a trusted network path and require access to the decryption keys. Use tools, such as dashboards and automated reporting, to give users information from the data rather than giving them direct access to the data.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC08-BP01": {"name": "SEC08-BP01","checks": {"kms_cmk_are_used": null},"status": "PASS","attributes": [{"Name": "SEC08-BP01 Implement secure key management","Section": "Data protection","SubSection": "Protecting data at rest","Description": "By defining an encryption approach that includes the storage, rotation, and access control of keys, you can help provide protection for your content against unauthorized users and against unnecessary exposure to authorized users. AWS Key Management Service (AWS KMS) helps you manage encryption keys and integrates with many AWS services. This service provides durable, secure, and redundant storage for your AWS KMS keys. You can define your key aliases as well as key-level policies. The policies help you define key administrators as well as key users. Additionally, AWS CloudHSM is a cloud-based hardware security module (HSM) that enables you to easily generate and use your own encryption keys in the AWS Cloud. It helps you meet corporate, contractual, and regulatory compliance requirements for data security by using FIPS 140-2 Level 3 validated HSMs.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_data_rest_key_mgmt.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_data_rest_key_mgmt","WellArchitectedQuestionId": "protect-data-rest"}],"description": "By defining an encryption approach that includes the storage, rotation, and access control of keys, you can help provide protection for your content against unauthorized users and against unnecessary exposure to authorized users. AWS Key Management Service (AWS KMS) helps you manage encryption keys and integrates with many AWS services. This service provides durable, secure, and redundant storage for your AWS KMS keys. You can define your key aliases as well as key-level policies. The policies help you define key administrators as well as key users. Additionally, AWS CloudHSM is a cloud-based hardware security module (HSM) that enables you to easily generate and use your own encryption keys in the AWS Cloud. It helps you meet corporate, contractual, and regulatory compliance requirements for data security by using FIPS 140-2 Level 3 validated HSMs.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"SEC08-BP02": {"name": "SEC08-BP02","checks": {"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","ec2_ebs_snapshots_encrypted": "FAIL","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","cloudtrail_kms_encryption_enabled": "FAIL","workspaces_volume_encryption_enabled": null,"glue_database_connections_ssl_enabled": null,"sqs_queues_server_side_encryption_enabled": "PASS","dynamodb_tables_kms_cmk_encryption_enabled": null,"glue_etl_jobs_amazon_s3_encryption_enabled": "PASS","cloudwatch_log_group_kms_encryption_enabled": "FAIL","glue_etl_jobs_job_bookmark_encryption_enabled": "FAIL","glue_data_catalogs_metadata_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null,"glue_development_endpoints_s3_encryption_enabled": null,"glue_etl_jobs_cloudwatch_logs_encryption_enabled": "FAIL","eks_cluster_kms_cmk_encryption_in_secrets_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"sagemaker_training_jobs_intercontainer_encryption_enabled": null,"glue_data_catalogs_connection_passwords_encryption_enabled": "FAIL","glue_development_endpoints_job_bookmark_encryption_enabled": null,"sagemaker_training_jobs_volume_and_output_encryption_enabled": null,"glue_development_endpoints_cloudwatch_logs_encryption_enabled": null},"status": "FAIL","attributes": [{"Name": "SEC08-BP02 Enforce encryption at rest","Section": "Data protection","SubSection": "Protecting data at rest","Description": "You should ensure that the only way to store data is by using encryption. AWS Key Management Service (AWS KMS) integrates seamlessly with many AWS services to make it easier for you to encrypt all your data at rest. For example, in Amazon Simple Storage Service (Amazon S3), you can set default encryption on a bucket so that all new objects are automatically encrypted. Additionally, Amazon Elastic Compute Cloud (Amazon EC2) and Amazon S3 support the enforcement of encryption by setting default encryption. You can use AWS Config Rules to check automatically that you are using encryption, for example, for Amazon Elastic Block Store (Amazon EBS) volumes, Amazon Relational Database Service (Amazon RDS) instances, and Amazon S3 buckets.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_data_rest_encrypt.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_data_rest_encrypt","WellArchitectedQuestionId": "protect-data-rest"}],"description": "You should ensure that the only way to store data is by using encryption. AWS Key Management Service (AWS KMS) integrates seamlessly with many AWS services to make it easier for you to encrypt all your data at rest. For example, in Amazon Simple Storage Service (Amazon S3), you can set default encryption on a bucket so that all new objects are automatically encrypted. Additionally, Amazon Elastic Compute Cloud (Amazon EC2) and Amazon S3 support the enforcement of encryption by setting default encryption. You can use AWS Config Rules to check automatically that you are using encryption, for example, for Amazon Elastic Block Store (Amazon EBS) volumes, Amazon Relational Database Service (Amazon RDS) instances, and Amazon S3 buckets.","checks_status": {"fail": 9,"pass": 4,"total": 25,"manual": 0}},"SEC08-BP03": {"name": "SEC08-BP03","checks": {"s3_bucket_default_encryption": "PASS","sagemaker_notebook_instance_encryption_enabled": null},"status": "PASS","attributes": [{"Name": "SEC08-BP03 Automate data at rest protection","Section": "Data protection","SubSection": "Protecting data at rest","Description": "Use automated tools to validate and enforce data at rest controls continuously, for example, verify that there are only encrypted storage resources. You can automate validation that all EBS volumes are encrypted using AWS Config Rules. AWS Security Hub can also verify several different controls through automated checks against security standards. Additionally, your AWS Config Rules can automatically remediate noncompliant resources.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_data_rest_automate_protection.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_data_rest_automate_protection","WellArchitectedQuestionId": "protect-data-rest"}],"description": "Use automated tools to validate and enforce data at rest controls continuously, for example, verify that there are only encrypted storage resources. You can automate validation that all EBS volumes are encrypted using AWS Config Rules. AWS Security Hub can also verify several different controls through automated checks against security standards. Additionally, your AWS Config Rules can automatically remediate noncompliant resources.","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"SEC08-BP04": {"name": "SEC08-BP04","checks": {"s3_bucket_object_versioning": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","organizations_account_part_of_organizations": null},"status": "FAIL","attributes": [{"Name": "SEC08-BP04 Enforce access control","Section": "Data protection","SubSection": "Protecting data at rest","Description": "To help protect your data at rest, enforce access control using mechanisms, such as isolation and versioning, and apply the principle of least privilege. Prevent the granting of public access to your data.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_data_rest_access_control.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_data_rest_access_control","WellArchitectedQuestionId": "protect-data-rest"}],"description": "To help protect your data at rest, enforce access control using mechanisms, such as isolation and versioning, and apply the principle of least privilege. Prevent the granting of public access to your data.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"SEC08-BP05": {"name": "SEC08-BP05","checks": {},"status": "PASS","attributes": [{"Name": "SEC08-BP05 Use mechanisms to keep people away from data","Section": "Data protection","SubSection": "Protecting data at rest","Description": "Keep all users away from directly accessing sensitive data and systems under normal operational circumstances. For example, use a change management workflow to manage Amazon Elastic Compute Cloud (Amazon EC2) instances using tools instead of allowing direct access or a bastion host. This can be achieved using AWS Systems Manager Automation, which uses automation documents that contain steps you use to perform tasks. These documents can be stored in source control, be peer reviewed before running, and tested thoroughly to minimize risk compared to shell access. Business users could have a dashboard instead of direct access to a data store to run queries. Where CI/CD pipelines are not used, determine which controls and processes are required to adequately provide a normally disabled break-glass access mechanism.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_data_rest_use_people_away.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_data_rest_use_people_away","WellArchitectedQuestionId": "protect-data-rest"}],"description": "Keep all users away from directly accessing sensitive data and systems under normal operational circumstances. For example, use a change management workflow to manage Amazon Elastic Compute Cloud (Amazon EC2) instances using tools instead of allowing direct access or a bastion host. This can be achieved using AWS Systems Manager Automation, which uses automation documents that contain steps you use to perform tasks. These documents can be stored in source control, be peer reviewed before running, and tested thoroughly to minimize risk compared to shell access. Business users could have a dashboard instead of direct access to a data store to run queries. Where CI/CD pipelines are not used, determine which controls and processes are required to adequately provide a normally disabled break-glass access mechanism.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC09-BP01": {"name": "SEC09-BP01","checks": {"acm_certificates_expiration_check": "PASS","directoryservice_ldap_certificate_expiration": null},"status": "PASS","attributes": [{"Name": "SEC09-BP01 Implement secure key and certificate management","Section": "Data protection","SubSection": "Protecting data in transit","Description": "Store encryption keys and certificates securely and rotate them at appropriate time intervals with strict access control. The best way to accomplish this is to use a managed service, such as AWS Certificate Manager (ACM). It lets you easily provision, manage, and deploy public and private Transport Layer Security (TLS) certificates for use with AWS services and your internal connected resources. TLS certificates are used to secure network communications and establish the identity of websites over the internet as well as resources on private networks. ACM integrates with AWS resources, such as Elastic Load Balancers (ELBs), AWS distributions, and APIs on API Gateway, also handling automatic certificate renewals. If you use ACM to deploy a private root CA, both certificates and private keys can be provided by it for use in Amazon Elastic Compute Cloud (Amazon EC2) instances, containers, and so on.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_data_transit_key_cert_mgmt.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_data_transit_key_cert_mgmt","WellArchitectedQuestionId": "protect-data-transit"}],"description": "Store encryption keys and certificates securely and rotate them at appropriate time intervals with strict access control. The best way to accomplish this is to use a managed service, such as AWS Certificate Manager (ACM). It lets you easily provision, manage, and deploy public and private Transport Layer Security (TLS) certificates for use with AWS services and your internal connected resources. TLS certificates are used to secure network communications and establish the identity of websites over the internet as well as resources on private networks. ACM integrates with AWS resources, such as Elastic Load Balancers (ELBs), AWS distributions, and APIs on API Gateway, also handling automatic certificate renewals. If you use ACM to deploy a private root CA, both certificates and private keys can be provided by it for use in Amazon Elastic Compute Cloud (Amazon EC2) instances, containers, and so on.","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"SEC09-BP02": {"name": "SEC09-BP02","checks": {"elb_ssl_listeners": "FAIL","elbv2_ssl_listeners": "FAIL","elb_insecure_ssl_ciphers": "PASS","elbv2_insecure_ssl_ciphers": "PASS","s3_bucket_secure_transport_policy": "FAIL","cloudfront_distributions_https_enabled": null,"apigateway_restapi_client_certificate_enabled": "FAIL","cloudfront_distributions_field_level_encryption_enabled": null,"cloudfront_distributions_using_deprecated_ssl_protocols": null,"opensearch_service_domains_https_communications_enforced": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Name": "SEC09-BP02 Enforce encryption in transit","Section": "Data protection","SubSection": "Protecting data in transit","Description": "Enforce your defined encryption requirements based on appropriate standards and recommendations to help you meet your organizational, legal, and compliance requirements. AWS services provide HTTPS endpoints using TLS for communication, thus providing encryption in transit when communicating with the AWS APIs. Insecure protocols, such as HTTP, can be audited and blocked in a VPC through the use of security groups. HTTP requests can also be automatically redirected to HTTPS in Amazon CloudFront or on an Application Load Balancer. You have full control over your computing resources to implement encryption in transit across your services. Additionally, you can use VPN connectivity into your VPC from an external network to facilitate encryption of traffic. Third-party solutions are available in the AWS Marketplace, if you have special requirements.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_data_transit_encrypt.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_data_transit_encrypt","WellArchitectedQuestionId": "protect-data-transit"}],"description": "Enforce your defined encryption requirements based on appropriate standards and recommendations to help you meet your organizational, legal, and compliance requirements. AWS services provide HTTPS endpoints using TLS for communication, thus providing encryption in transit when communicating with the AWS APIs. Insecure protocols, such as HTTP, can be audited and blocked in a VPC through the use of security groups. HTTP requests can also be automatically redirected to HTTPS in Amazon CloudFront or on an Application Load Balancer. You have full control over your computing resources to implement encryption in transit across your services. Additionally, you can use VPN connectivity into your VPC from an external network to facilitate encryption of traffic. Third-party solutions are available in the AWS Marketplace, if you have special requirements.","checks_status": {"fail": 4,"pass": 2,"total": 11,"manual": 0}},"SEC09-BP03": {"name": "SEC09-BP03","checks": {},"status": "PASS","attributes": [{"Name": "SEC09-BP03 Automate detection of unintended data access","Section": "Data protection","SubSection": "Protecting data in transit","Description": "Use tools such as Amazon GuardDuty to automatically detect suspicious activity or attempts to move data outside of defined boundaries. For example, GuardDuty can detect Amazon Simple Storage Service (Amazon S3) read activity that is unusual with the Exfiltration:S3/AnomalousBehavior finding. In addition to GuardDuty, Amazon VPC Flow Logs, which capture network traffic information, can be used with Amazon EventBridge to trigger detection of abnormal connectionsโ€“both successful and denied. Amazon S3 Access Analyzer can help assess what data is accessible to who in your Amazon S3 buckets.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_data_transit_auto_unintended_access.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_data_transit_auto_unintended_access","WellArchitectedQuestionId": "protect-data-transit"}],"description": "Use tools such as Amazon GuardDuty to automatically detect suspicious activity or attempts to move data outside of defined boundaries. For example, GuardDuty can detect Amazon Simple Storage Service (Amazon S3) read activity that is unusual with the Exfiltration:S3/AnomalousBehavior finding. In addition to GuardDuty, Amazon VPC Flow Logs, which capture network traffic information, can be used with Amazon EventBridge to trigger detection of abnormal connectionsโ€“both successful and denied. Amazon S3 Access Analyzer can help assess what data is accessible to who in your Amazon S3 buckets.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC09-BP04": {"name": "SEC09-BP04","checks": {"vpc_flow_logs_enabled": "FAIL"},"status": "FAIL","attributes": [{"Name": "SEC09-BP04 Authenticate network communications","Section": "Data protection","SubSection": "Protecting data in transit","Description": "Verify the identity of communications by using protocols that support authentication, such as Transport Layer Security (TLS) or IPsec. Using network protocols that support authentication, allows for trust to be established between the parties. This adds to the encryption used in the protocol to reduce the risk of communications being altered or intercepted. Common protocols that implement authentication include Transport Layer Security (TLS), which is used in many AWS services, and IPsec, which is used in AWS Virtual Private Network (AWS VPN).","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_protect_data_transit_authentication.html#implementation-guidance.","WellArchitectedPracticeId": "sec_protect_data_transit_authentication","WellArchitectedQuestionId": "protect-data-transit"}],"description": "Verify the identity of communications by using protocols that support authentication, such as Transport Layer Security (TLS) or IPsec. Using network protocols that support authentication, allows for trust to be established between the parties. This adds to the encryption used in the protocol to reduce the risk of communications being altered or intercepted. Common protocols that implement authentication include Transport Layer Security (TLS), which is used in many AWS services, and IPsec, which is used in AWS Virtual Private Network (AWS VPN).","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"SEC10-BP01": {"name": "SEC10-BP01","checks": {"iam_support_role_created": null,"account_maintain_current_contact_details": null,"account_security_contact_information_is_registered": null,"account_security_questions_are_registered_in_the_aws_account": null},"status": "PASS","attributes": [{"Name": "SEC10-BP01 Identify key personnel and external resources","Section": "Incident response","SubSection": "Prepare","Description": "Identify internal and external personnel, resources, and legal obligations that would help your organization respond to an incident.When you define your approach to incident response in the cloud, in unison with other teams (such as your legal counsel, leadership, business stakeholders, AWS Support Services, and others), you must identify key personnel, stakeholders, and relevant contacts. To reduce dependency and decrease response time, make sure that your team, specialist security teams, and responders are educated about the services that you use and have opportunities to practice hands-on.We encourage you to identify external AWS security partners that can provide you with outside expertise and a different perspective to augment your response capabilities. Your trusted security partners can help you identify potential risks or threats that you might not be familiar with.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_incident_response_identify_personnel.html#implementation-guidance.","WellArchitectedPracticeId": "sec_incident_response_identify_personnel","WellArchitectedQuestionId": "incident-response"}],"description": "Identify internal and external personnel, resources, and legal obligations that would help your organization respond to an incident.When you define your approach to incident response in the cloud, in unison with other teams (such as your legal counsel, leadership, business stakeholders, AWS Support Services, and others), you must identify key personnel, stakeholders, and relevant contacts. To reduce dependency and decrease response time, make sure that your team, specialist security teams, and responders are educated about the services that you use and have opportunities to practice hands-on.We encourage you to identify external AWS security partners that can provide you with outside expertise and a different perspective to augment your response capabilities. Your trusted security partners can help you identify potential risks or threats that you might not be familiar with.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"SEC10-BP02": {"name": "SEC10-BP02","checks": {},"status": "PASS","attributes": [{"Name": "SEC10-BP02 Develop incident management plans","Section": "Incident response","SubSection": "Prepare","Description": "Create plans to help you respond to, communicate during, and recover from an incident. For example, you can start an incident response plan with the most likely scenarios for your workload and organization. Include how you would communicate and escalate both internally and externally.","LevelOfRisk": "High","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_incident_response_develop_management_plans.html#implementation-guidance.","WellArchitectedPracticeId": "sec_incident_response_develop_management_plans","WellArchitectedQuestionId": "incident-response"}],"description": "Create plans to help you respond to, communicate during, and recover from an incident. For example, you can start an incident response plan with the most likely scenarios for your workload and organization. Include how you would communicate and escalate both internally and externally.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC10-BP03": {"name": "SEC10-BP03","checks": {},"status": "PASS","attributes": [{"Name": "SEC10-BP03 Prepare forensic capabilities","Section": "Incident response","SubSection": "Prepare","Description": "It's important for your incident responders to understand when and how the forensic investigation fits into your response plan. Your organization should define what evidence is collected and what tools are used in the process. Identify and prepare forensic investigation capabilities that are suitable, including external specialists, tools, and automation. A key decision that you should make upfront is if you will collect data from a live system. Some data, such as the contents of volatile memory or active network connections, will be lost if the system is powered off or rebooted.Your response team can combine tools, such as AWS Systems Manager, Amazon EventBridge, and AWS Lambda, to automatically run forensic tools within an operating system and VPC traffic mirroring to obtain a network packet capture, to gather non-persistent evidence. Conduct other activities, such as log analysis or analyzing disk images, in a dedicated security account with customized forensic workstations and tools accessible to your responders.Routinely ship relevant logs to a data store that provides high durability and integrity. Responders should have access to those logs. AWS offers several tools that can make log investigation easier, such as Amazon Athena, Amazon OpenSearch Service (OpenSearch Service), and Amazon CloudWatch Logs Insights. Additionally, preserve evidence securely using Amazon Simple Storage Service (Amazon S3) Object Lock. This service follows the WORM (write-once- read-many) model and prevents objects from being deleted or overwritten for a defined period. As forensic investigation techniques require specialist training, you might need to engage external specialists.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_incident_response_prepare_forensic.html#implementation-guidance.","WellArchitectedPracticeId": "sec_incident_response_prepare_forensic","WellArchitectedQuestionId": "incident-response"}],"description": "It's important for your incident responders to understand when and how the forensic investigation fits into your response plan. Your organization should define what evidence is collected and what tools are used in the process. Identify and prepare forensic investigation capabilities that are suitable, including external specialists, tools, and automation. A key decision that you should make upfront is if you will collect data from a live system. Some data, such as the contents of volatile memory or active network connections, will be lost if the system is powered off or rebooted.Your response team can combine tools, such as AWS Systems Manager, Amazon EventBridge, and AWS Lambda, to automatically run forensic tools within an operating system and VPC traffic mirroring to obtain a network packet capture, to gather non-persistent evidence. Conduct other activities, such as log analysis or analyzing disk images, in a dedicated security account with customized forensic workstations and tools accessible to your responders.Routinely ship relevant logs to a data store that provides high durability and integrity. Responders should have access to those logs. AWS offers several tools that can make log investigation easier, such as Amazon Athena, Amazon OpenSearch Service (OpenSearch Service), and Amazon CloudWatch Logs Insights. Additionally, preserve evidence securely using Amazon Simple Storage Service (Amazon S3) Object Lock. This service follows the WORM (write-once- read-many) model and prevents objects from being deleted or overwritten for a defined period. As forensic investigation techniques require specialist training, you might need to engage external specialists.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC10-BP04": {"name": "SEC10-BP04","checks": {},"status": "PASS","attributes": [{"Name": "SEC10-BP04 Automate containment capability","Section": "Incident response","SubSection": "Iterate","Description": "Automate containment and recovery of an incident to reduce response times and organizational impact.Once you create and practice the processes and tools from your playbooks, you can deconstruct the logic into a code-based solution, which can be used as a tool by many responders to automate the response and remove variance or guess-work by your responders. This can speed up the lifecycle of a response. The next goal is to enable this code to be fully automated by being invoked by the alerts or events themselves, rather than by a human responder, to create an event-driven response. These processes should also automatically add relevant data to your security systems. For example, an incident involving traffic from an unwanted IP address can automatically populate an AWS WAF block list or Network Firewall rule group to prevent further activity.With an event-driven response system, a detective mechanism triggers a responsive mechanism to automatically remediate the event. You can use event-driven response capabilities to reduce the time-to-value between detective mechanisms and responsive mechanisms. To create this event-driven architecture, you can use AWS Lambda, which is a serverless compute service that runs your code in response to events and automatically manages the underlying compute resources for you. For example, assume that you have an AWS account with the AWS CloudTrail service enabled. If CloudTrail is ever disabled (through the cloudtrail:StopLogging API call), you can use Amazon EventBridge to monitor for the specific cloudtrail:StopLogging event, and invoke a Lambda function to call cloudtrail:StartLogging to restart logging.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_incident_response_auto_contain.html#implementation-guidance.","WellArchitectedPracticeId": "sec_incident_response_auto_contain","WellArchitectedQuestionId": "incident-response"}],"description": "Automate containment and recovery of an incident to reduce response times and organizational impact.Once you create and practice the processes and tools from your playbooks, you can deconstruct the logic into a code-based solution, which can be used as a tool by many responders to automate the response and remove variance or guess-work by your responders. This can speed up the lifecycle of a response. The next goal is to enable this code to be fully automated by being invoked by the alerts or events themselves, rather than by a human responder, to create an event-driven response. These processes should also automatically add relevant data to your security systems. For example, an incident involving traffic from an unwanted IP address can automatically populate an AWS WAF block list or Network Firewall rule group to prevent further activity.With an event-driven response system, a detective mechanism triggers a responsive mechanism to automatically remediate the event. You can use event-driven response capabilities to reduce the time-to-value between detective mechanisms and responsive mechanisms. To create this event-driven architecture, you can use AWS Lambda, which is a serverless compute service that runs your code in response to events and automatically manages the underlying compute resources for you. For example, assume that you have an AWS account with the AWS CloudTrail service enabled. If CloudTrail is ever disabled (through the cloudtrail:StopLogging API call), you can use Amazon EventBridge to monitor for the specific cloudtrail:StopLogging event, and invoke a Lambda function to call cloudtrail:StartLogging to restart logging.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC10-BP05": {"name": "SEC10-BP05","checks": {},"status": "PASS","attributes": [{"Name": "SEC10-BP05 Pre-provision access","Section": "Incident response","SubSection": "Prepare","Description": "Verify that incident responders have the correct access pre-provisioned in AWS to reduce the time needed for investigation through to recovery.Common anti-patterns:Using the root account for incident response.Altering existing accounts.Manipulating IAM permissions directly when providing just-in-time privilege elevation.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_incident_response_pre_provision_access.html#implementation-guidance.","WellArchitectedPracticeId": "sec_incident_response_pre_provision_access","WellArchitectedQuestionId": "incident-response"}],"description": "Verify that incident responders have the correct access pre-provisioned in AWS to reduce the time needed for investigation through to recovery.Common anti-patterns:Using the root account for incident response.Altering existing accounts.Manipulating IAM permissions directly when providing just-in-time privilege elevation.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC10-BP06": {"name": "SEC10-BP06","checks": {},"status": "PASS","attributes": [{"Name": "SEC10-BP06 Pre-deploy tools","Section": "Incident response","SubSection": "Prepare","Description": "Ensure that security personnel have the right tools pre-deployed into AWS to reduce the time for investigation through to recovery.To automate security engineering and operations functions, you can use a comprehensive set of APIs and tools from AWS. You can fully automate identity management, network security, data protection, and monitoring capabilities and deliver them using popular software development methods that you already have in place. When you build security automation, your system can monitor, review, and initiate a response, rather than having people monitor your security position and manually react to events. An effective way to automatically provide searchable and relevant log data across AWS services to your incident responders is to enable Amazon Detective.If your incident response teams continue to respond to alerts in the same way, they risk alert fatigue. Over time, the team can become desensitized to alerts and can either make mistakes handling ordinary situations or miss unusual alerts. Automation helps avoid alert fatigue by using functions that process the repetitive and ordinary alerts, leaving humans to handle the sensitive and unique incidents. Integrating anomaly detection systems, such as Amazon GuardDuty, AWS CloudTrail Insights, and Amazon CloudWatch Anomaly Detection, can reduce the burden of common threshold-based alerts.You can improve manual processes by programmatically automating steps in the process. After you define the remediation pattern to an event, you can decompose that pattern into actionable logic, and write the code to perform that logic. Responders can then execute that code to remediate the issue. Over time, you can automate more and more steps, and ultimately automatically handle whole classes of common incidents.For tools that execute within the operating system of your Amazon Elastic Compute Cloud (Amazon EC2) instance, you should evaluate using the AWS Systems Manager Run Command, which enables you to remotely and securely administrate instances using an agent that you install on your Amazon EC2 instance operating system. It requires the Systems Manager Agent (SSM Agent), which is installed by default on many Amazon Machine Images (AMIs). Be aware, though, that once an instance has been compromised, no responses from tools or agents running on it should be considered trustworthy.","LevelOfRisk": "Low","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_incident_response_pre_deploy_tools.html#implementation-guidance.","WellArchitectedPracticeId": "sec_incident_response_pre_deploy_tools","WellArchitectedQuestionId": "incident-response"}],"description": "Ensure that security personnel have the right tools pre-deployed into AWS to reduce the time for investigation through to recovery.To automate security engineering and operations functions, you can use a comprehensive set of APIs and tools from AWS. You can fully automate identity management, network security, data protection, and monitoring capabilities and deliver them using popular software development methods that you already have in place. When you build security automation, your system can monitor, review, and initiate a response, rather than having people monitor your security position and manually react to events. An effective way to automatically provide searchable and relevant log data across AWS services to your incident responders is to enable Amazon Detective.If your incident response teams continue to respond to alerts in the same way, they risk alert fatigue. Over time, the team can become desensitized to alerts and can either make mistakes handling ordinary situations or miss unusual alerts. Automation helps avoid alert fatigue by using functions that process the repetitive and ordinary alerts, leaving humans to handle the sensitive and unique incidents. Integrating anomaly detection systems, such as Amazon GuardDuty, AWS CloudTrail Insights, and Amazon CloudWatch Anomaly Detection, can reduce the burden of common threshold-based alerts.You can improve manual processes by programmatically automating steps in the process. After you define the remediation pattern to an event, you can decompose that pattern into actionable logic, and write the code to perform that logic. Responders can then execute that code to remediate the issue. Over time, you can automate more and more steps, and ultimately automatically handle whole classes of common incidents.For tools that execute within the operating system of your Amazon Elastic Compute Cloud (Amazon EC2) instance, you should evaluate using the AWS Systems Manager Run Command, which enables you to remotely and securely administrate instances using an agent that you install on your Amazon EC2 instance operating system. It requires the Systems Manager Agent (SSM Agent), which is installed by default on many Amazon Machine Images (AMIs). Be aware, though, that once an instance has been compromised, no responses from tools or agents running on it should be considered trustworthy.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC10-BP07": {"name": "SEC10-BP07","checks": {},"status": "PASS","attributes": [{"Name": "SEC10-BP07 Run game days","Section": "Incident response","SubSection": "Simulate","Description": "Game days, also known as simulations or exercises, are internal events that provide a structured opportunity to practice your incident management plans and procedures during a realistic scenario. These events should exercise responders using the same tools and techniques that would be used in a real-world scenario - even mimicking real-world environments. Game days are fundamentally about being prepared and iteratively improving your response capabilities. Some of the reasons you might find value in performing game day activities include:Validating readinessDeveloping confidence โ€“ learning from simulations and training staffFollowing compliance or contractual obligationsGenerating artifacts for accreditationBeing agile โ€“ incremental improvementBecoming faster and improving toolsRefining communication and escalationDeveloping comfort with the rare and the unexpectedFor these reasons, the value derived from participating in a simulation activity increases an organization's effectiveness during stressful events. Developing a simulation activity that is both realistic and beneficial can be a difficult exercise. Although testing your procedures or automation that handles well-understood events has certain advantages, it is just as valuable to participate in creative Security Incident Response Simulations (SIRS) activities to test yourself against the unexpected and continuously improve.Create custom simulations tailored to your environment, team, and tools. Find an issue and design your simulation around it. This could be something like a leaked credential, a server communicating with unwanted systems, or a misconfiguration that results in unauthorized exposure. Identify engineers who are familiar with your organization to create the scenario and another group to participate. The scenario should be realistic and challenging enough to be valuable. It should include the opportunity to get hands on with logging, notifications, escalations, and executing runbooks or automation. During the simulation, your responders should exercise their technical and organizational skills, and leaders should be involved to build their incident management skills. At the end of the simulation, celebrate the efforts of the team and look for ways to iterate, repeat, and expand into further simulations.AWS has created Incident Response Runbook templates that you can use not only to prepare your response efforts, but also as a basis for a simulation. When planning, a simulation can be broken into five phases.Evidence gathering: In this phase, a team will get alerts through various means, such as an internal ticketing system, alerts from monitoring tooling, anonymous tips, or even public news. Teams then start to review infrastructure and application logs to determine the source of the compromise. This step should also involve internal escalations and incident leadership. Once identified, teams move on to containing the incidentContain the incident: Teams will have determined there has been an incident and established the source of the compromise. Teams now should take action to contain it, for example, by disabling compromised credentials, isolating a compute resource, or revoking a role's permission.Eradicate the incident: Now that they've contained the incident, teams will work towards mitigating any vulnerabilities in applications or infrastructure configurations that were susceptible to the compromise. This could include rotating all credentials used for a workload, modifying Access Control Lists (ACLs) or changing network configurations.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_incident_response_run_game_days.html#implementation-guidance.","WellArchitectedPracticeId": "sec_incident_response_run_game_days","WellArchitectedQuestionId": "incident-response"}],"description": "Game days, also known as simulations or exercises, are internal events that provide a structured opportunity to practice your incident management plans and procedures during a realistic scenario. These events should exercise responders using the same tools and techniques that would be used in a real-world scenario - even mimicking real-world environments. Game days are fundamentally about being prepared and iteratively improving your response capabilities. Some of the reasons you might find value in performing game day activities include:Validating readinessDeveloping confidence โ€“ learning from simulations and training staffFollowing compliance or contractual obligationsGenerating artifacts for accreditationBeing agile โ€“ incremental improvementBecoming faster and improving toolsRefining communication and escalationDeveloping comfort with the rare and the unexpectedFor these reasons, the value derived from participating in a simulation activity increases an organization's effectiveness during stressful events. Developing a simulation activity that is both realistic and beneficial can be a difficult exercise. Although testing your procedures or automation that handles well-understood events has certain advantages, it is just as valuable to participate in creative Security Incident Response Simulations (SIRS) activities to test yourself against the unexpected and continuously improve.Create custom simulations tailored to your environment, team, and tools. Find an issue and design your simulation around it. This could be something like a leaked credential, a server communicating with unwanted systems, or a misconfiguration that results in unauthorized exposure. Identify engineers who are familiar with your organization to create the scenario and another group to participate. The scenario should be realistic and challenging enough to be valuable. It should include the opportunity to get hands on with logging, notifications, escalations, and executing runbooks or automation. During the simulation, your responders should exercise their technical and organizational skills, and leaders should be involved to build their incident management skills. At the end of the simulation, celebrate the efforts of the team and look for ways to iterate, repeat, and expand into further simulations.AWS has created Incident Response Runbook templates that you can use not only to prepare your response efforts, but also as a basis for a simulation. When planning, a simulation can be broken into five phases.Evidence gathering: In this phase, a team will get alerts through various means, such as an internal ticketing system, alerts from monitoring tooling, anonymous tips, or even public news. Teams then start to review infrastructure and application logs to determine the source of the compromise. This step should also involve internal escalations and incident leadership. Once identified, teams move on to containing the incidentContain the incident: Teams will have determined there has been an incident and established the source of the compromise. Teams now should take action to contain it, for example, by disabling compromised credentials, isolating a compute resource, or revoking a role's permission.Eradicate the incident: Now that they've contained the incident, teams will work towards mitigating any vulnerabilities in applications or infrastructure configurations that were susceptible to the compromise. This could include rotating all credentials used for a workload, modifying Access Control Lists (ACLs) or changing network configurations.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"SEC11-BP02": {"name": "SEC11-BP02","checks": {"ecr_repositories_scan_images_on_push_enabled": "FAIL","ecr_repositories_scan_vulnerabilities_in_latest_image": null},"status": "FAIL","attributes": [{"Name": "SEC11-BP02 Automate testing throughout the development and release lifecycle","Section": "Application Security","SubSection": null,"Description": "Automate the testing for security properties throughout the development and release lifecycle. Automation makes it easier to consistently and repeatably identify potential issues in software prior to release, which reduces the risk of security issues in the software being provided. The goal of automated testing is to provide a programmatic way of detecting potential issues early and often throughout the development lifecycle. When you automate regression testing, you can rerun functional and non-functional tests to verify that previously tested software still performs as expected after a change. When you define security unit tests to check for common misconfigurations, such as broken or missing authentication, you can identify and fix these issues early in the development process. Test automation uses purpose-built test cases for application validation, based on the applicationโ€™s requirements and desired functionality. The result of the automated testing is based on comparing the generated test output to its respective expected output, which expedites the overall testing lifecycle. Testing methodologies such as regression testing and unit test suites are best suited for automation. Automating the testing of security properties allows builders to receive automated feedback without having to wait for a security review. Automated tests in the form of static or dynamic code analysis can increase code quality and help detect potential software issues early in the development lifecycle.","LevelOfRisk": "Medium","AssessmentMethod": "Automated","ImplementationGuidanceUrl": "https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/sec_appsec_automate_testing_throughout_lifecycle.html#implementation-guidance.","WellArchitectedPracticeId": "sec_appsec_automate_testing_throughout_lifecycle","WellArchitectedQuestionId": "application-security"}],"description": "Automate the testing for security properties throughout the development and release lifecycle. Automation makes it easier to consistently and repeatably identify potential issues in software prior to release, which reduces the risk of security issues in the software being provided. The goal of automated testing is to provide a programmatic way of detecting potential issues early and often throughout the development lifecycle. When you automate regression testing, you can rerun functional and non-functional tests to verify that previously tested software still performs as expected after a change. When you define security unit tests to check for common misconfigurations, such as broken or missing authentication, you can identify and fix these issues early in the development process. Test automation uses purpose-built test cases for application validation, based on the applicationโ€™s requirements and desired functionality. The result of the automated testing is based on comparing the generated test output to its respective expected output, which expedites the overall testing lifecycle. Testing methodologies such as regression testing and unit test suites are best suited for automation. Automating the testing of security properties allows builders to receive automated feedback without having to wait for a security review. Automated tests in the form of static or dynamic code analysis can increase code quality and help detect potential software issues early in the development lifecycle.","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}}},"requirements_passed": 17,"requirements_failed": 23,"requirements_manual": 17,"total_requirements": 57,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "e079e750-59b8-4d29-9e57-1a10a0e63be4","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "aws_foundational_security_best_practices_aws","framework": "AWS-Foundational-Security-Best-Practices","version": "","description": "The AWS Foundational Security Best Practices standard is a set of controls that detect when your deployed accounts and resources deviate from security best practices.","region": "eu-west-1","requirements": {"s3": {"name": "Benchmark: S3","checks": {"s3_bucket_public_access": null,"s3_bucket_acl_prohibited": "FAIL","s3_bucket_object_versioning": "FAIL","s3_bucket_default_encryption": "PASS","s3_bucket_secure_transport_policy": "FAIL","s3_bucket_policy_public_write_access": "PASS","s3_account_level_public_access_blocks": null,"s3_bucket_server_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "s3","Section": "S3","Service": "s3","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS S3 resources and options.","checks_status": {"fail": 4,"pass": 2,"total": 9,"manual": 0}},"acm": {"name": "ACM","checks": {"account_security_contact_information_is_registered": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "acm","Section": "Acm","Service": "acm","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring ACM resources.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"dms": {"name": "Benchmark: DMS","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "dms","Section": "DMS","Service": "dms","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS DMS resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"ec2": {"name": "Benchmark: EC2","checks": {"vpc_flow_logs_enabled": "FAIL","ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","ec2_securitygroup_not_used": "FAIL","ec2_instance_imdsv2_enabled": "PASS","ec2_instance_older_than_specific_days": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ec2","Section": "EC2","Service": "ec2","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring EC2 resources and options.","checks_status": {"fail": 6,"pass": 4,"total": 10,"manual": 0}},"ecr": {"name": "Benchmark: Elastic Container Registry","checks": {"ecr_repositories_lifecycle_policy_enabled": "FAIL","ecr_repositories_scan_images_on_push_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ecr","Section": "ECR","Service": "ecr","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS ECR resources and options.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"ecs": {"name": "Benchmark: Elastic Container Service","checks": {"ecs_task_definitions_no_environment_secrets": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "ecs","Section": "ECS","Service": "ecs","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring ECS resources and options.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"efs": {"name": "Benchmark: EFS","checks": {"efs_have_backup_enabled": "FAIL","efs_encryption_at_rest_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "efs","Section": "EFS","Service": "efs","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS EFS resources and options.","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"eks": {"name": "Benchmark: EKS","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "eks","Section": "EKS","Service": "eks","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS EKS resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"elb": {"name": "Benchmark: ELB","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","elbv2_deletion_protection": "FAIL","elbv2_desync_mitigation_mode": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "elb","Section": "ELB","Service": "elb","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring Elastic Load Balancer resources and options.","checks_status": {"fail": 4,"pass": 0,"total": 4,"manual": 0}},"emr": {"name": "Benchmark: EMR","checks": {"emr_cluster_master_nodes_no_public_ip": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "emr","Section": "EMR","Service": "emr","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring EMR resources.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"iam": {"name": "Benchmark: IAM","checks": {"iam_no_root_access_key": null,"iam_user_accesskey_unused": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "iam","Section": "IAM","Service": "iam","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS IAM resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 10,"manual": 0}},"kms": {"name": "Benchmark: KMS","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "kms","Section": "KMS","Service": "kms","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS KMS resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"rds": {"name": "Benchmark: RDS","checks": {"rds_instance_multi_az": "FAIL","rds_snapshots_public_access": "PASS","rds_instance_no_public_access": "PASS","rds_instance_storage_encrypted": "FAIL","rds_instance_deletion_protection": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","rds_instance_minor_version_upgrade_enabled": "PASS"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "rds","Section": "RDS","Service": "rds","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS RDS resources and options.","checks_status": {"fail": 5,"pass": 3,"total": 10,"manual": 0}},"sns": {"name": "Benchmark: SNS","checks": {"sns_topics_kms_encryption_at_rest_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "sns","Section": "SNS","Service": "sns","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS SNS resources and options.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"sqs": {"name": "Benchmark: SQS","checks": {"sqs_queues_server_side_encryption_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "sqs","Section": "SQS","Service": "sqs","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS SQS resources and options.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"ssm": {"name": "Benchmark: SSM","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "ssm","Section": "SSM","Service": "ssm","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Systems Manager resources and options.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"waf": {"name": "Benchmark: WAF","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "waf","Section": "WAF","Service": "waf","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS WAF resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"elbv2": {"name": "Benchmark: ELBv2","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "elbv2","Section": "ELBv2","Service": "elbv2","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring Elastic Load Balancer resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"config": {"name": "Benchmark: Config","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "config","Section": "Config","Service": "config","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Config.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"lambda": {"name": "Benchmark: Lambda","checks": {"awslambda_function_url_public": null,"awslambda_function_using_supported_runtimes": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "lambda","Section": "Lambda","Service": "lambda","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring Lambda resources and options.","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"account": {"name": "Account","checks": {"account_security_contact_information_is_registered": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "account","Section": "Account","Service": "account","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Account.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"kinesis": {"name": "Benchmark: Kinesis","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "kinesis","Section": "Kinesis","Service": "kinesis","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Kinesis resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"dynamodb": {"name": "Benchmark: DynamoDB","checks": {"dynamodb_tables_pitr_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "dynamodb","Section": "DynamoDB","Service": "dynamodb","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Dynamo DB resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"redshift": {"name": "Benchmark: Redshift","checks": {"redshift_cluster_public_access": null,"redshift_cluster_automated_snapshot": null,"redshift_cluster_automatic_upgrades": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "redshift","Section": "Redshift","Service": "redshift","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Redshift resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"codebuild": {"name": "Benchmark: CodeBuild","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "codebuild","Section": "CodeBuild","Service": "codebuild","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring CodeBuild resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"guardduty": {"name": "Benchmark: GuardDuty","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Type": null,"ItemId": "guardduty","Section": "GuardDuty","Service": "guardduty","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS GuardDuty resources and options.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"sagemaker": {"name": "Benchmark: SageMaker","checks": {"sagemaker_notebook_instance_root_access_disabled": null,"sagemaker_notebook_instance_vpc_settings_configured": null,"sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "sagemaker","Section": "SageMaker","Service": "sagemaker","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Sagemaker resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"cloudfront": {"name": "Benchmark: CloudFront","checks": {"cloudfront_distributions_using_waf": null,"cloudfront_distributions_https_enabled": null,"cloudfront_distributions_logging_enabled": null,"cloudfront_distributions_field_level_encryption_enabled": null,"cloudfront_distributions_using_deprecated_ssl_protocols": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "cloudfront","Section": "CloudFront","Service": "cloudfront","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring CloudFront resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 5,"manual": 0}},"cloudtrail": {"name": "Benchmark: CloudTrail","checks": {"cloudtrail_multi_region_enabled": "PASS","cloudtrail_kms_encryption_enabled": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "cloudtrail","Section": "CloudTrail","Service": "cloudtrail","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring CloudTrail resources and options.","checks_status": {"fail": 3,"pass": 1,"total": 4,"manual": 0}},"opensearch": {"name": "Benchmark: OpenSearch","checks": {"opensearch_service_domains_not_publicly_accessible": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "opensearch","Section": "OpenSearch","Service": "opensearch","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring OpenSearch resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"api-gateway": {"name": "API Gateway","checks": {"apigateway_restapi_logging_enabled": "PASS","apigateway_restapi_waf_acl_attached": "FAIL","apigatewayv2_api_authorizers_enabled": "FAIL","apigatewayv2_api_access_logging_enabled": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "api-gateway","Section": "API Gateway","Service": "apigateway","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring API Gateway resources.","checks_status": {"fail": 4,"pass": 1,"total": 5,"manual": 0}},"auto-scaling": {"name": "Benchmark: Auto Scaling","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "auto-scaling","Section": "Auto Scaling","Service": "autoscaling","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring Auto Scaling resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"elasticsearch": {"name": "Benchmark: Elasticsearch","checks": {"opensearch_service_domains_audit_logging_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"opensearch_service_domains_https_communications_enforced": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "elasticsearch","Section": "ElasticSearch","Service": "elasticsearch","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring Elasticsearch resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 5,"manual": 0}},"cloudformation": {"name": "Benchmark: CloudFormation","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "cloudformation","Section": "CloudFormation","Service": "cloudformation","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring CloudFormation resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"secretsmanager": {"name": "Benchmark: Secrets Manager","checks": {"secretsmanager_automatic_rotation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "secretsmanager","Section": "Secrets Manager","Service": "secretsmanager","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Secrets Manager resources.","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"network-firewall": {"name": "Benchmark: Network Firewall","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "network-firewall","Section": "Network Firewall","Service": "network-firewall","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring Network Firewall resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"elastic-beanstalk": {"name": "Benchmark: Elastic Beanstalk","checks": {},"status": "PASS","attributes": [{"Type": null,"ItemId": "elastic-beanstalk","Section": "Elastic Beanstalk","Service": "elasticbeanstalk","SubGroup": null,"SubSection": null}],"description": "This section contains recommendations for configuring AWS Elastic Beanstalk resources and options.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}}},"requirements_passed": 14,"requirements_failed": 12,"requirements_manual": 11,"total_requirements": 37,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "e23a4728-558b-4a92-8e7d-f1473c21cc6e","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "ens_rd2022_aws","framework": "ENS","version": "RD2022","description": "The accreditation scheme of the ENS (National Security Scheme) has been developed by the Ministry of Finance and Public Administrations and the CCN (National Cryptological Center). This includes the basic principles and minimum requirements necessary for the adequate protection of information.","region": "eu-west-1","requirements": {"mp.s.1.aws.wm.1": {"name": "mp.s.1.aws.wm.1","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los servicios","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "mp.s.1","DescripcionControl": "Se deberรก hacer uso del cifrado de la informaciรณn contenida en los correos electrรณnicos."}],"description": "Protecciรณn del correo electrรณnico","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.s.1.aws.wm.2": {"name": "mp.s.1.aws.wm.2","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los servicios","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "mp.s.1","DescripcionControl": "Habilitar el registro de eventos de Workmail en CloudWatch para realizar el seguimiento de mensajes con spam."}],"description": "Protecciรณn del correo electrรณnico","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.s.1.aws.wm.3": {"name": "mp.s.1.aws.wm.3","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los servicios","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "mp.s.1","DescripcionControl": "En SES, se debe hacer uso de la opciรณn que permite a los usuarios enviar correo electrรณnico cifrado con S/MIME"}],"description": "Protecciรณn del correo electrรณnico","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.s.4.aws.as.1": {"name": "mp.s.4.aws.as.1","checks": {"autoscaling_group_multiple_az": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los servicios","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.s.4","DescripcionControl": "Activar la soluciรณn AWS Auto Scaling para dotar a los sistemas de la capacidad suficiente para atender la carga prevista con holgura y desplegar tecnologรญas para la prevenciรณn de ataques conocidos."}],"description": "Protecciรณn frente a la denegaciรณn de servicio ","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"mp.s.2.aws.waf.1": {"name": "mp.s.2.aws.waf.1","checks": {"cloudfront_distributions_using_waf": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los servicios","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.s.2","DescripcionControl": "Todas las aplicaciones web distribuidas por el servicio de AWS CloudFront deben estar integradas con el servicio de firewall de aplicaciones web AWS WAF."}],"description": "Protecciรณn de servicios y aplicaciones web","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"mp.s.2.aws.waf.2": {"name": "mp.s.2.aws.waf.2","checks": {"apigateway_restapi_waf_acl_attached": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los servicios","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.s.2","DescripcionControl": "Los API gateways deben tener un ACL WAF asociado."}],"description": "Protecciรณn de servicios y aplicaciones web","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.s.2.aws.waf.3": {"name": "mp.s.2.aws.waf.3","checks": {"elbv2_waf_acl_attached": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los servicios","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.s.2","DescripcionControl": "Todos los balanceadores de aplicaciรณn deben estar integrados con el servicio de firewall de aplicaciรณn web para quedar protegidos ante ataques de la capa de aplicaciรณn"}],"description": "Protecciรณn de servicios y aplicaciones web","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.si.2.aws.es.1": {"name": "mp.si.2.aws.es.1","checks": {"opensearch_service_domains_encryption_at_rest_enabled": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los soportes de informaciรณn","Dimensiones": ["confidencialidad","integridad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.si.2","DescripcionControl": "Aplicar cifrado sobre todos los dominios del servicio Amazon Elasticsearch Service (ES). En caso de usar este servicio, deberรก asegurarse la activaciรณn del cifrado en reposo."}],"description": "Criptografรญa","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"mp.si.2.aws.s3.1": {"name": "mp.si.2.aws.s3.1","checks": {"s3_bucket_default_encryption": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los soportes de informaciรณn","Dimensiones": ["confidencialidad","integridad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.si.2","DescripcionControl": "Aplicar cifrado sobre los distintos buckets de S3, de los cuales se debe asegurar que tengan activado el cifrado en reposo."}],"description": "Criptografรญa","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.pl.4.aws.sq.1": {"name": "op.pl.4.aws.sq.1","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "planificaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.pl.4","DescripcionControl": "La entidad usuaria deberรก llevar a cabo el estudio de capacidades a las que hace referencia la medida de seguridad, si bien (โ€ฆ) deberรก tener especialmente en cuenta: * Las cuotas de los servicios a utilizar."}],"description": "Necesidades de procesamiento","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.com.1.aws.cf.1": {"name": "mp.com.1.aws.cf.1","checks": {"cloudfront_distributions_https_enabled": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.1","DescripcionControl": "Asegurar que la distribuciรณn entre frontales CloudFront y sus orรญgenes รบnicamente emplee trรกfico HTTPs "}],"description": "Perรญmetro seguro","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"mp.com.1.aws.s3.1": {"name": "mp.com.1.aws.s3.1","checks": {"s3_bucket_secure_transport_policy": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.1","DescripcionControl": "Asegurar que los Buckets S3 de almacenamiento apliquen cifrado para la transferencia de datos empleando Secure Sockets Layer (SSL)"}],"description": "Perรญmetro seguro","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.com.1.aws.sg.1": {"name": "mp.com.1.aws.sg.1","checks": {"ec2_securitygroup_from_launch_wizard": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.1","DescripcionControl": "Asegurar que el Security Group restrinja todo el trรกfico. Para ello, se deberรกn agregar las reglas del Security Group que se aplica por defecto cuando se crea una VPC."}],"description": "Perรญmetro seguro","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"mp.com.1.aws.sg.2": {"name": "mp.com.1.aws.sg.2","checks": {"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.1","DescripcionControl": "Evitar la existencia de Security Groups que dejen abierto todo el trรกfico entrante."}],"description": "Perรญmetro seguro","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"mp.com.1.aws.sg.3": {"name": "mp.com.1.aws.sg.3","checks": {"ec2_securitygroup_not_used": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.1","DescripcionControl": "Evitar tener un repositorio de Security Groups que no estรฉn siendo usados."}],"description": "Perรญmetro seguro","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.com.3.aws.cf.1": {"name": "mp.com.3.aws.cf.1","checks": {"cloudfront_distributions_https_enabled": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["integridad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.3","DescripcionControl": "Asegurar que la distribuciรณn entre frontales CloudFront y sus orรญgenes รบnicamente emplee trรกfico HTTPS."}],"description": "Protecciรณn de la integridad y de la autenticidad","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"mp.com.3.aws.s3.1": {"name": "mp.com.3.aws.s3.1","checks": {"s3_bucket_secure_transport_policy": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["integridad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.3","DescripcionControl": "Asegurar que los Buckets de almacenamiento S3 apliquen cifrado para la transferencia de datos empleando TLS."}],"description": "Protecciรณn de la integridad y de la autenticidad","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.com.4.aws.ws.1": {"name": "mp.com.4.aws.ws.1","checks": {"workspaces_vpc_2private_1public_subnets_nat": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "segregaciรณn de redes","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.4","DescripcionControl": "Se deberรกn abrir solo los puertos necesarios para el uso del servicio AWS WorkSpaces."}],"description": "Separaciรณn de flujos de informaciรณn en la red","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"mp.si.2.aws.elb.1": {"name": "mp.si.2.aws.elb.1","checks": {"ec2_ebs_snapshots_encrypted": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "recomendacion","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los soportes de informaciรณn","Dimensiones": ["confidencialidad","integridad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.si.2","DescripcionControl": "Se recomienda dejar activada la opciรณn de cifrado por defecto para nuevos volรบmenes."}],"description": "Criptografรญa","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.si.2.aws.kms.1": {"name": "mp.si.2.aws.kms.1","checks": {"ec2_ebs_volume_encryption": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los soportes de informaciรณn","Dimensiones": ["confidencialidad","integridad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.si.2","DescripcionControl": "Aplicar cifrado sobre el almacenamiento de las instancias en todos sus volรบmenes de datos."}],"description": "Criptografรญa","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"mp.si.2.aws.rds.1": {"name": "mp.si.2.aws.rds.1","checks": {"rds_instance_storage_encrypted": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los soportes de informaciรณn","Dimensiones": ["confidencialidad","integridad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.si.2","DescripcionControl": "Aplicar cifrado sobre las bases de datos AWS RDS."}],"description": "Criptografรญa","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.si.2.aws.sqs.1": {"name": "mp.si.2.aws.sqs.1","checks": {"sqs_queues_server_side_encryption_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los soportes de informaciรณn","Dimensiones": ["confidencialidad","integridad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.si.2","DescripcionControl": "Aplicar cifrado sobre las colas de mensajes de AWS (Amazon SQS)."}],"description": "Criptografรญa","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.exp.1.aws.re.1": {"name": "op.exp.1.aws.re.1","checks": {"resourceexplorer2_indexes_found": "PASS"},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.1","DescripcionControl": "Se recomienda el uso de AWS Resource Explorer para la exploraciรณn de los recursos como instancias RDB, buckets S3o tablas de Amazon DynamoDB."}],"description": "Inventario de activos","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.exp.5.aws.cm.1": {"name": "op.exp.5.aws.cm.1","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.5","DescripcionControl": "La entidad usuaria puede hacer uso de la utilidad AWS Change Manager para mantener un registro actualizado de las plantillas y peticiones de cambio en las que se incluya informaciรณn en detalle sobre estos."}],"description": "Gestiรณn de cambios","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.5.aws.ct.1": {"name": "op.exp.5.aws.ct.1","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.5","DescripcionControl": "Asegurar que CloudTrail estรฉ activo para todas las regiones."}],"description": "Gestiรณn de cambios","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.exp.6.aws.gd.1": {"name": "op.exp.6.aws.gd.1","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.6","DescripcionControl": "Activar la protecciรณn contra software malintencionado de GuardDuty en todas las regiones."}],"description": "Protecciรณn frente a cรณdigo daรฑino","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.exp.7.aws.cf.1": {"name": "op.exp.7.aws.cf.1","checks": {"cloudfront_distributions_logging_enabled": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.7","DescripcionControl": "Habilitar los logs de acceso de CloudFront"}],"description": "Gestiรณn de incidentes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.7.aws.gd.1": {"name": "op.exp.7.aws.gd.1","checks": {"guardduty_is_enabled": "PASS","guardduty_no_high_severity_findings": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.7","DescripcionControl": "Habilitar GuardDuty para la detecciรณn de incidentes de seguridad"}],"description": "Gestiรณn de incidentes","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"op.exp.7.aws.sh.1": {"name": "op.exp.7.aws.sh.1","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.7","DescripcionControl": "Habilitar Security Hub"}],"description": "Gestiรณn de incidentes","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.exp.8.aws.ct.1": {"name": "op.exp.8.aws.ct.1","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8","DescripcionControl": "Habilitar la herramienta CloudTrail en todas las regiones. Este serviio estรก habilitado por defecto cuando se crea una nueva cuenta, pero es posible deshabilitarlo."}],"description": "Registro de actividad","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.exp.8.aws.ct.2": {"name": "op.exp.8.aws.ct.2","checks": {"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8","DescripcionControl": "Establecer un filtro de mรฉtricas desde AWS CloudWatch para detectar cambios en las configuraciones de CloudTrail"}],"description": "Registro de actividad","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.8.aws.ct.3": {"name": "op.exp.8.aws.ct.3","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8","DescripcionControl": "Habilitar la validaciรณn de archivos en todos los trails, evitando asรญ que estos se vean modificados o eliminados."}],"description": "Registro de actividad","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.exp.8.aws.ct.4": {"name": "op.exp.8.aws.ct.4","checks": {"cloudtrail_s3_dataevents_write_enabled": null,"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8","DescripcionControl": "Habilitar la entrega continua de eventos de CloudTrail a un bucket S3 dedicado con el fin de unificar los archivos de registro."}],"description": "Registro de actividad","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"op.exp.8.aws.ct.5": {"name": "op.exp.8.aws.ct.5","checks": {"cloudwatch_log_metric_filter_root_usage": null,"cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8","DescripcionControl": "Se deberรกn habilitar alertas para los siguientes eventos: * Llamadas no permitidas a la API, * Accesos no permitidos a la consola, * Todos los intentos de acceso sin el correcto uso de MFA, * Toda la actividad realizada sobre y por la cuenta root, * Cualquier cambio en las polรญticas IAM"}],"description": "Registro de actividad","checks_status": {"fail": 0,"pass": 0,"total": 5,"manual": 0}},"op.exp.8.aws.ct.6": {"name": "op.exp.8.aws.ct.6","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "medida","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8","DescripcionControl": "Activar el servicio de AWS CloudTrail para registrar la actividad de los usuarios relativa a la configuraciรณn de los servicios VPN Site-to-Site y AWS DirectConnect"}],"description": "Registro de actividad","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.exp.8.aws.cw.1": {"name": "op.exp.8.aws.cw.1","checks": {"cloudwatch_log_metric_filter_root_usage": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8","DescripcionControl": "Crear alertas utilizando herramientas como Amazon CloudWatch Events para anunciar el inicio de sesiรณn y el uso de las credenciales de usuario root de la cuenta de administraciรณn"}],"description": "Registro de actividad","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.9.aws.ct.1": {"name": "op.exp.9.aws.ct.1","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.9","DescripcionControl": "Habilitar AWS Incident Manager y AWS CloudTrail en todas las regiones con el fin de recopilar informaciรณn para generar contenido prescriptivo para la creaciรณn de informes exigidos por la medida de seguridad."}],"description": "Registro de la gestiรณn de incidentes","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.ext.7.aws.am.1": {"name": "op.ext.7.aws.am.1","checks": {"account_maintain_current_contact_details": null,"account_security_contact_information_is_registered": null,"account_security_questions_are_registered_in_the_aws_account": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.7","DescripcionControl": "Deberรก proveerse la informaciรณn relacionada con contactos alternativos (de facturaciรณn, operaciones y seguridad), con correos que no dependan de la misma persona. Deberรก comprobarse regularmente que estas cuentas funcionan correctamente y mantener listas de correo para asegurar la recepciรณn de avisos por personal disponible en cada momento. Ademรกs, deberรกn establecerse preguntas de desafรญo de seguridad y respuestas para el caso de que sea necesario autenticarse como propiterio de la cuenta para ponerse en contacto con el soporte de AWS."}],"description": "Gestiรณn de incidentes","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"op.mon.1.aws.ct.1": {"name": "op.mon.1.aws.ct.1","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.1","DescripcionControl": "Activar el servicio de eventos AWS CloudTrail para todas las regiones."}],"description": "Detecciรณn de intrusiรณn","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.mon.1.aws.gd.1": {"name": "op.mon.1.aws.gd.1","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.1","DescripcionControl": "En ausencia de otras herramientas de terceros, habilitar Amazon GuarDuty para la detecciรณn de amenazas e intrusiones."}],"description": "Detecciรณn de intrusiรณn","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.mon.1.aws.gd.2": {"name": "op.mon.1.aws.gd.2","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.1","DescripcionControl": "Deberรก habilitarse Amazon GuardDuty para todas las regiones tanto en la cuenta raรญz como en las cuentas miembro de un entorno multi-cuenta."}],"description": "Detecciรณn de intrusiรณn","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.mon.1.aws.gd.3": {"name": "op.mon.1.aws.gd.3","checks": {"guardduty_centrally_managed": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.1","DescripcionControl": "Todas las cuentas miembro deberรกn estar aรฑadidas para la supervisiรณn bajo la cuenta raรญz."}],"description": "Detecciรณn de intrusiรณn","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.mon.1.aws.gd.4": {"name": "op.mon.1.aws.gd.4","checks": {},"status": "PASS","attributes": [{"Tipo": "medida","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.mon.1","DescripcionControl": "La administraciรณn de Amazon GuardDuty quedarรก delegada exclusivamente a la cuenta de seguridad para garantizar una correcta asignaciรณn de los roles para este servicio."}],"description": "Detecciรณn de intrusiรณn","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.mon.2.aws.sh.1": {"name": "op.mon.2.aws.sh.1","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.2","DescripcionControl": "Utilizar Security Hub para obtener una vista consolidada de los hallazgos de seguridad en los servicios de AWS habilitados."}],"description": "Sistema de mรฉtricas","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.pl.4.aws.ec2.1": {"name": "op.pl.4.aws.ec2.1","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "planificaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.pl.4","DescripcionControl": "La entidad usuaria deberรก llevar a cabo el estudio de capacidades a las que hace referencia la medida de seguridad, si bien (โ€ฆ) deberรก tener especialmente en cuenta: * Las capacidades de procesamiento, almacenamiento y comunicaciones de las instancais desplegadas en AWS."}],"description": "Necesidades de procesamiento","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.com.1.aws.elb.1": {"name": "mp.com.1.aws.elb.1","checks": {"elb_ssl_listeners": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.1","DescripcionControl": "Habilitar TLS en los balanceadores de carga ELB "}],"description": "Perรญmetro seguro","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.com.1.aws.elb.2": {"name": "mp.com.1.aws.elb.2","checks": {"elb_insecure_ssl_ciphers": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.1","DescripcionControl": "Evitar el uso de protocolos de cifrado inseguros para las polรญticas de seguridad de ELB. Esto podrรญa dejar la conexiรณn SSL entre balanceadores y clientes vulnerables a ser explotados. En particular deberรก evitarse el uso de TLS 1.0. "}],"description": "Perรญmetro seguro","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"mp.com.1.aws.nfw.1": {"name": "mp.com.1.aws.nfw.1","checks": {"networkfirewall_in_all_vpc": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.1","DescripcionControl": "Filtrar todo el trรกfico entrante y saliente de la VPC a travรฉs de Firewalls de red."}],"description": "Perรญmetro seguro","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.com.1.aws.nfw.2": {"name": "mp.com.1.aws.nfw.2","checks": {"fms_policy_compliant": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.1","DescripcionControl": "Incidir en la utilizaciรณn de AWS Firewall Manager para gestionar los firewalls de forma centralizada."}],"description": "Perรญmetro seguro","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"mp.com.2.aws.vpn.1": {"name": "mp.com.2.aws.vpn.1","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "mp.com.2","DescripcionControl": "Garantizar que las conexiones entre la VPC y la red local (remota) se canalizan a travรฉs de VPN Site-to-Site o bien a travรฉs de Direct Connect."}],"description": "Protecciรณn de la confidencialidad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.com.2.aws.vpn.2": {"name": "mp.com.2.aws.vpn.2","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "mp.com.2","DescripcionControl": "Garantizar que las conexiones entre la VPC y la red local (remota) se canalizan a travรฉs de VPN Site-to-Site o bien a travรฉs de Direct Connect."}],"description": "Protecciรณn de la confidencialidad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.com.3.aws.elb.1": {"name": "mp.com.3.aws.elb.1","checks": {"elbv2_insecure_ssl_ciphers": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["integridad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.3","DescripcionControl": "Habilitar TLS en los balanceadores de carga ELB."}],"description": "Protecciรณn de la integridad y de la autenticidad","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"mp.com.3.aws.elb.2": {"name": "mp.com.3.aws.elb.2","checks": {"elbv2_insecure_ssl_ciphers": "PASS"},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["integridad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.3","DescripcionControl": "Evitar el uso de protocolos de cifrado inseguros en la conexiรณn TLS entre clientes y balanceadores de carga. En particular, se deberรก evitar el uso de TLS 1.0."}],"description": "Protecciรณn de la integridad y de la autenticidad","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"mp.com.4.aws.vpc.1": {"name": "mp.com.4.aws.vpc.1","checks": {"vpc_subnet_separate_private_public": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.4","DescripcionControl": "Los flujos de informaciรณn de red se deben separar a travรฉs de la utilizaciรณn de diferentes subnets."}],"description": "Separaciรณn de flujos de informaciรณn en la red","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.com.4.aws.vpc.2": {"name": "mp.com.4.aws.vpc.2","checks": {"ec2_instance_internet_facing_with_instance_profile": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.4","DescripcionControl": "Evitar el uso de subnets con la opciรณn de asignaciรณn automรกtica de IPs (auto-assign Public IP)."}],"description": "Separaciรณn de flujos de informaciรณn en la red","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.si.2.aws.dydb.1": {"name": "mp.si.2.aws.dydb.1","checks": {"dynamodb_tables_kms_cmk_encryption_enabled": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los soportes de informaciรณn","Dimensiones": ["confidencialidad","integridad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.si.2","DescripcionControl": "Aplicar cifrado sobre las bases de datos DynamoDB, que deben implementar cifrado seguro mediante el uso de claves de cliente (KMS)."}],"description": "Criptografรญa","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.acc.1.aws.iam.1": {"name": "op.acc.1.aws.iam.1","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.1","DescripcionControl": "Utilizar los grupos y roles, en lugar de los usuarios individuales, para controlar el acceso. Esto permitirรก implementar un conjunto de permisos en lugar de actualizar muchas polรญticas individuales cuando el acceso de un usuario necesita cambiar."}],"description": "Identificador รบnico","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.1.aws.iam.2": {"name": "op.acc.1.aws.iam.2","checks": {"iam_check_saml_providers_sts": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.1","DescripcionControl": "Es muy recomendable la utilizaciรณn de un proveedor de identidades que permita administrar las identidades en un lugar centralizado, en vez de utilizar IAM para ello."}],"description": "Proveedor de identidad centralizado","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.acc.1.aws.iam.3": {"name": "op.acc.1.aws.iam.3","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.1","DescripcionControl": "El usuario raรญz actรบa como usuario IAM de seguridad (usuario \"breakglass\"), dado que no se encuentra sincronizado con el proveedor de identidades externo, lo que permite la recuperaciรณn de emergencia del acceso a AWS en caso de imposibilidad de autenticar a los usuarios a travรฉs del proveedor de identidades."}],"description": "Proveedor de identidad centralizado","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.1.aws.iam.4": {"name": "op.acc.1.aws.iam.4","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.1","DescripcionControl": "Utilizar identificadores รบnicos para los usuarios del sistema."}],"description": "Identificador รบnico","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.1.aws.iam.5": {"name": "op.acc.1.aws.iam.5","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.1","DescripcionControl": "Cada cuenta estarรก asociada a un identificador รบnico."}],"description": "Identificador รบnico","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.1.aws.iam.6": {"name": "op.acc.1.aws.iam.6","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.1","DescripcionControl": "Las cuentas deben ser inhabilitadas en los siguientes casos: cuando el usuario deja la organizaciรณn; cuando el usuario cesa en la funciรณn para la cual se requerรญa la cuenta de usuario; o, cuando la persona que la autorizรณ, da orden en sentido contrario."}],"description": "Cuentas de usuario","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.1.aws.iam.7": {"name": "op.acc.1.aws.iam.7","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.1","DescripcionControl": "Las cuentas se retendrรกn durante el periodo necesario para atender a las necesidades de trazabilidad de los registros de actividad asociados a las mismas. A este periodo se le denominarรก periodo de retenciรณn."}],"description": "Cuentas de usuario","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.2.aws.iam.1": {"name": "op.acc.2.aws.iam.1","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.2","DescripcionControl": "Hacer uso de las polรญticas IAM para la asignaciรณn de privilegios de acceso. Deberรกn administrarse permisos para controlar el acceso de las identidades de personas y mรกquinas y sus cargas de trabajo."}],"description": "Requisitos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.2.aws.iam.2": {"name": "op.acc.2.aws.iam.2","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.2","DescripcionControl": "Deberรก definirse una polรญtica IAM que conceda permiso al usuario o rol de IAM para utilizar los recursos y las acciones de la API especรญficos que necesita"}],"description": "Requisitos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.2.aws.iam.3": {"name": "op.acc.2.aws.iam.3","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.2","DescripcionControl": "De acuerdo con las medidas del Esquema Nacional de seguridad los derechos de acceso de cada recurso, se establecerรกn segรบn las decisiones de la persona responsable del recurso, ateniรฉndose a la polรญtica y normativa de seguridad del sistema"}],"description": "Requisitos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.2.aws.iam.4": {"name": "op.acc.2.aws.iam.4","checks": {"iam_avoid_root_usage": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.2","DescripcionControl": "Se deberรก delegar en cuentas administradoras la administraciรณn de la organizaciรณn, dejando la cuenta maestra sin uso y con las medidas de seguridad pertinentes."}],"description": "Requisitos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.acc.2.aws.vpn.1": {"name": "op.acc.2.aws.vpn.1","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.2","DescripcionControl": "Deberรก definirse una correcta polรญtica de permisos IAM para operaciones de Amazon WorkSpaces segรบn las recomendaciรณnes establecidas en la secciรณn 3.1.1 Control de Acceso de la guรญa CCN STIC-887A Guรญa de configuraciรณn segura AWS."}],"description": "Requisitos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.2.aws.vpn.2": {"name": "op.acc.2.aws.vpn.2","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.2","DescripcionControl": "Deberรกn restringirse los permisos a usuarios para utilizar la acciรณn ec2:DescribeVpnConnections. Esta acciรณn permite a los usuarios ver la informaciรณn de configuraciรณn de la gateway de cliente sobre las conexiones Site-to-Site VPN de su cuenta."}],"description": "Requisitos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.2.aws.vpn.3": {"name": "op.acc.2.aws.vpn.3","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.2","DescripcionControl": "La rotaciรณn de certificados de VPN deberรก asignarse siguiendo las recomendaciรณnes de segregaciรณn de funciones tal y como se explica en la secciรณn 3.1.1."}],"description": "Requisitos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.3.aws.iam.1": {"name": "op.acc.3.aws.iam.1","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.3","DescripcionControl": "Enumerar los recursos especรญficos a los que puede obtener acceso una funciรณn de trabajo."}],"description": "Segregaciรณn de funciones y tareas","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.3.aws.iam.2": {"name": "op.acc.3.aws.iam.2","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.3","DescripcionControl": "Emplear correctamente el uso de RBAC/ABAC para separar las funciones de desarrollo y operaciรณn."}],"description": "Segregaciรณn de funciones y tareas","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.3.aws.iam.3": {"name": "op.acc.3.aws.iam.3","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.3","DescripcionControl": "Emplear correctamente el uso de RBAC/ABAC para separar las funciones de autorizaciรณn y control de uso."}],"description": "Segregaciรณn de funciones y tareas","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.3.aws.iam.4": {"name": "op.acc.3.aws.iam.4","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.3","DescripcionControl": "Las polรญticas IAM deberรญan estar asociadas solo a grupos y a roles."}],"description": "Segregaciรณn de funciones y tareas","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.4.aws.iam.1": {"name": "op.acc.4.aws.iam.1","checks": {"awslambda_function_url_public": null,"awslambda_function_url_cors_policy": null,"sqs_queues_not_publicly_accessible": "PASS","organizations_scp_check_deny_regions": null,"s3_bucket_policy_public_write_access": "PASS","iam_policy_allows_privilege_escalation": null,"cloudwatch_cross_account_sharing_disabled": null,"awslambda_function_not_publicly_accessible": "PASS","organizations_account_part_of_organizations": null,"iam_inline_policy_no_administrative_privileges": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.4","DescripcionControl": "Las polรญticas IAM deben permitir sรณlo los privilegios necesarios para cada rol. Se recomienda comenzar con el mรญnimo nivel de permisos e ir aรฑadiendo permisos adicionales segรบn vaya surgiendo la necesidad en lugar de comenzar con permisos administrativos."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 3,"total": 13,"manual": 0}},"op.acc.4.aws.iam.2": {"name": "op.acc.4.aws.iam.2","checks": {"iam_policy_allows_privilege_escalation": null,"iam_inline_policy_no_administrative_privileges": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.4","DescripcionControl": "Evitar polรญticas con comodines (wildcards) en su definiciรณn, que puedan otorgar privilegios administrativos completos."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 5,"manual": 0}},"op.acc.4.aws.iam.3": {"name": "op.acc.4.aws.iam.3","checks": {"ec2_instance_managed_by_ssm": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automatico","IdGrupoControl": "op.acc.4","DescripcionControl": "Para una correcta implementaciรณn de la estrategia de polรญticas de acceso, se recomienda utilizar la herramienta Policy Simulator para probar y solucionar posibles problemas en la asignaciรณn de polรญticas."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.acc.4.aws.iam.4": {"name": "op.acc.4.aws.iam.4","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.4","DescripcionControl": "Se puede utilizar Acces Analyzer para identificar recursos y cuentas, validar las polรญticas contra las prรกcticas recomendadas y generar polรญticas con base en la actividad de acceso de registros de CloudTrail."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.4.aws.iam.5": {"name": "op.acc.4.aws.iam.5","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.4","DescripcionControl": "En cuanto a los accesos a las instancias alojadas en AWS se recomienda emplear mecanismos para mantener a las personas alejadas de los datos. Es decir, limitar al mรกximo el acceso directo a los datos por parte de los usuarios."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.4.aws.iam.6": {"name": "op.acc.4.aws.iam.6","checks": {"ec2_instance_managed_by_ssm": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.4","DescripcionControl": "Con AWS Systems Manager Automation pueden utilizarse documentos de automatizaciรณn y diseรฑar flujos de trabajo para la administraciรณn de cambios o la ejecuciรณn de operaciones estรกndar para administrar las instancias EC2 (p. ej., actualizar los sistemas operativos), en lugar de permitir el acceso directo. "}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.acc.4.aws.iam.7": {"name": "op.acc.4.aws.iam.7","checks": {"iam_avoid_root_usage": null,"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.4","DescripcionControl": "Se restringirรก todo acceso a las acciones especificadas para el usuario root de una cuenta."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"op.acc.4.aws.iam.8": {"name": "op.acc.4.aws.iam.8","checks": {"organizations_scp_check_deny_regions": null,"organizations_account_part_of_organizations": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.4","DescripcionControl": "Se restringirรก todo acceso a las acciones especificadas para el usuario root de una cuenta."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"op.acc.4.aws.iam.9": {"name": "op.acc.4.aws.iam.9","checks": {"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.4","DescripcionControl": "Se configurarรกn diferentes permisos a las cuentas de usuario, limitando la utilizaciรณn de la cuenta โ€œrootโ€ para tareas especรญficas que necesiten un nivel de privilegios elevado, esta configuraciรณn debe entenderse como un mecanismo para impedir que el trabajo directo con usuarios con privilegios de administrador repercuta negativamente en la seguridad, a acometer todas las acciones con el mรกximo privilegio cuando este no es siempre requerido."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"op.acc.4.aws.sys.1": {"name": "op.acc.4.aws.sys.1","checks": {"ec2_instance_managed_by_ssm": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.4","DescripcionControl": "Habilitar systems manager automation para evitar acceso remoto humano a tareas automatizables."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.acc.4.aws.vpn.1": {"name": "op.acc.4.aws.vpn.1","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.4","DescripcionControl": "Las configuraciones de las polรญticas de las AWS VPN deben tener las redes especรญficas con las que se va a establecer la conectividad y evitar polรญticas genรฉricas basadas en routing donde se pierde el control granular de las redes permitidas en los SA de la VPN."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.4.aws.vpn.2": {"name": "op.acc.4.aws.vpn.2","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.4","DescripcionControl": "En configuraciones de AWS DirectConnect, deberรกn controlarse los AS y el routing que se lleva por BGP, de modo que se propague el mรญnimo de rutas y se asegure que no exista redistribuciรณn de rutas/redes privadas de entornos del cliente hacia el ISP."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.6.aws.iam.1": {"name": "op.acc.6.aws.iam.1","checks": {"iam_user_two_active_access_key": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.6","DescripcionControl": "Evitar el uso permanente de mรบltiples claves de acceso para un mismo usuario IAM."}],"description": "Mecanismo de autenticaciรณn (usuarios de la organizaciรณn)","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.acc.6.aws.iam.2": {"name": "op.acc.6.aws.iam.2","checks": {"iam_user_accesskey_unused": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.6","DescripcionControl": "Las claves de acceso deberรกn rotarse cada 90 dรญas o menos."}],"description": "Mecanismo de autenticaciรณn (usuarios de la organizaciรณn)","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"op.acc.6.aws.iam.3": {"name": "op.acc.6.aws.iam.3","checks": {"iam_user_accesskey_unused": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_password_policy_expires_passwords_within_90_days_or_less": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.6","DescripcionControl": "Deberรก habilitarse el vencimiento de las credenciales de los usuarios. (Bien a travรฉs de la polรญtica de contraseรฑas de IAM o del proveedor de identidades federado)."}],"description": "Mecanismo de autenticaciรณn (usuarios de la organizaciรณn)","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"op.acc.6.aws.iam.4": {"name": "op.acc.6.aws.iam.4","checks": {"iam_user_no_setup_initial_access_key": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.6","DescripcionControl": "Se deberรก evitar la asignaciรณn por defecto de claves de acceso para todos los usuarios que tengan acceso a la consola. Para cumplir con este requisito, se recomienda revisar quรฉ usuarios se encuentran dados de alta en la cuenta de AWS y disponen de acceso a la consola de administraciรณn y evitar la asignaciรณn de claves de acceso cuando no son necesarias."}],"description": "Mecanismo de autenticaciรณn (usuarios de la organizaciรณn)","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.cont.2.aws.az.1": {"name": "op.cont.2.aws.az.1","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "continuidad del servicio","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.cont.2","DescripcionControl": "(Organizativo) Deberรก implementarse correctamente la distribuciรณn de servicios segรบn regiones y zonas de disponibilidad para limitar al mรกximo los riesgos asociados a una รบnica ubicaciรณn."}],"description": "Plan de continuidad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.1.aws.cfg.1": {"name": "op.exp.1.aws.cfg.1","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.1","DescripcionControl": "En lo referente al inventariado de activos, asegurar que AWS Config estรก habilitado en todas las regiones y utilizar la herramienta para obtener una vista de los recursos existentes en las cuentas de AWS."}],"description": "Inventario de activos","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.1.aws.cfg.2": {"name": "op.exp.1.aws.cfg.2","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "bajo","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.1","DescripcionControl": "Configurar una regla de Config Rules que alerte sobre el despliegue de recursos sin las etiquetas correspondientes asociadas."}],"description": "Inventario de activos","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.1.aws.sys.1": {"name": "op.exp.1.aws.sys.1","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.1","DescripcionControl": "En el รกmbito del software desplegado en las instancias de EC2, habilitar AWS System Manager Inventory para todo el entorno de EC2 en caso de no utilizar herramientas de terceros."}],"description": "Inventario de activos","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"op.exp.1.aws.sys.2": {"name": "op.exp.1.aws.sys.2","checks": {"organizations_tags_policies_enabled_and_attached": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.1","DescripcionControl": "Asignar metadatos personalizados a cada nodo administrado con informaciรณn sobre el responsable del activo."}],"description": "Inventario de activos","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.1.aws.tag.1": {"name": "op.exp.1.aws.tag.1","checks": {"organizations_tags_policies_enabled_and_attached": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.1","DescripcionControl": "Para la correcta identificaciรณn del responsable, asociar etiquetas para todos los activos."}],"description": "Inventario de activos","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.3.aws.cfg.1": {"name": "op.exp.3.aws.cfg.1","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "bajo","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.3","DescripcionControl": "El cumplimiento de los requisitos se puede apoyar en la utilizaciรณn de los servicios Config, Config Rules y Conformance Packs para identificar lรญneas base de configuraciรณn para evaluar si los recursos de AWS se ajustan a las prรกcticas autorizadas por la organizaciรณn."}],"description": "Gestiรณn de la configuraciรณn de seguridad","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.4.aws.ami.1": {"name": "op.exp.4.aws.ami.1","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.4","DescripcionControl": "Una forma eficiente de garantizar la instalaciรณn de las versiones actualizadas y aprobadas del software de los sistemas es la utilizaciรณn de Golden AMIs."}],"description": "Mantenimiento y actualizaciones de seguridad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.4.aws.sys.2": {"name": "op.exp.4.aws.sys.2","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.4","DescripcionControl": "Utilizar AWS Systems Manager Patch Manager para planificar y gestionar la aplicaciรณn de parches minimizando los riesgos asociados a tener instancias con software desactualizado y expuesto a vulnerabilidades conocidas."}],"description": "Mantenimiento y actualizaciones de seguridad","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"op.exp.5.aws.cal.1": {"name": "op.exp.5.aws.cal.1","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.5","DescripcionControl": "Utilizar AWS Change Calendar para establecer una ventana de tiempo (fecha y hora) en la que realizar los cambios y las pruebas de preproducciรณn en equipos equivalentes a los de producciรณn sin riesgo a que estas afecten a la continuidad del servicio prestado."}],"description": "Gestiรณn de cambios","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.9.aws.img.1": {"name": "op.exp.9.aws.img.1","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssmincidents_enabled_with_plans": null},"status": "FAIL","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.9","DescripcionControl": "Habilitar AWS Incident Manager y AWS CloudTrail en todas las regiones con el fin de recopilar informaciรณn para generar contenido prescriptivo para la creaciรณn de informes exigidos por la medida de seguridad."}],"description": "Registro de la gestiรณn de incidentes","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"op.mon.3.aws.cwl.1": {"name": "op.mon.3.aws.cwl.1","checks": {"cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automatico","IdGrupoControl": "op.mon.3","DescripcionControl": "Deberรก asegurarse que todos los servicios que se utilicen en la arquitectura de la aplicaciรณn desplegada en AWS estรฉn generando logs"}],"description": "Vigilancia","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.info.6.aws.iam.1": {"name": "mp.info.6.aws.iam.1","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de la informaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "mp.info.6","DescripcionControl": "La organizaciรณn puede hacer uso de roles y polรญticas IAM para la definiciรณn y asignaciรณn de permisos en cuanto a controles de acceso de las copias de respaldo."}],"description": "Copias de seguridad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.info.6.aws.tag.1": {"name": "mp.info.6.aws.tag.1","checks": {"organizations_tags_policies_enabled_and_attached": null},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de la informaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.info.6","DescripcionControl": "Los planes de respaldo se pueden integrar con AWS Tags, acotando con base en las etiquetas de los recursos el alcance de cada proceso de copiado."}],"description": "Copias de seguridad","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.acc.4.aws.iam.10": {"name": "op.acc.4.aws.iam.10","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.4","DescripcionControl": "Se evitarรก que los usuarios puedan deshabilitar o modificar servicios relacionados con el รกrea de seguridad como AWS Config o AWS CloudWatch."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.4.aws.iam.11": {"name": "op.acc.4.aws.iam.11","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.4","DescripcionControl": "Deberรก definirse una polรญtica de IAM que conceda permiso al usuario o rol de IAMpara utilizar exclusivamente los recursos y las acciones de WorkSpace que necesita."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.4.aws.iam.12": {"name": "op.acc.4.aws.iam.12","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.4","DescripcionControl": "Las polรญticas IAM รบnicamente deben poder asignarse por el usuario que tenga la funciรณn de control de accesos expresamente atribuida."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.4.aws.iam.13": {"name": "op.acc.4.aws.iam.13","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.4","DescripcionControl": "No utilizar el usuario raรญz salvo necesidad expresa."}],"description": "Proceso de gestiรณn de derechos de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.cont.3.aws.drs.1": {"name": "op.cont.3.aws.drs.1","checks": {"drs_job_exist": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "continuidad del servicio","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.cont.3","DescripcionControl": "La organizaciรณn puede hacer uso del servicio AWS Elastic Disaster Recovery, programando y ejecutando pruebas no disruptivas (simulacros que no afectan ni al servidor de origen ni a la replicaciรณn de datos en curso) que prueben el correcto funcionamiento de las recuperaciones del plan de continuidad."}],"description": "Pruebas periรณdicas","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.exp.10.aws.cmk.1": {"name": "op.exp.10.aws.cmk.1","checks": {"iam_policy_no_full_access_to_kms": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.10","DescripcionControl": "Los usuarios o roles con privilegios para la creaciรณn de claves deben ser diferentes a los que van a utilizar las claves para operaciones de cifrado."}],"description": "Protecciรณn de claves criptogrรกficas","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.10.aws.cmk.2": {"name": "op.exp.10.aws.cmk.2","checks": {"iam_policy_no_full_access_to_kms": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.10","DescripcionControl": "Utilizar claves gestionadas por los clientes (CMK)."}],"description": "Protecciรณn de claves criptogrรกficas","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.10.aws.cmk.3": {"name": "op.exp.10.aws.cmk.3","checks": {"kms_cmk_rotation_enabled": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.10","DescripcionControl": "Activar la rotaciรณn de las claves CMK."}],"description": "Protecciรณn de claves criptogrรกficas","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.10.aws.cmk.4": {"name": "op.exp.10.aws.cmk.4","checks": {"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.10","DescripcionControl": "Para el archivo posterior a la explotaciรณn y destrucciรณn de las claves se debe deshabilitar todas las claves CMK que no estรฉn en uso."}],"description": "Protecciรณn de claves criptogrรกficas","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.10.aws.cmk.5": {"name": "op.exp.10.aws.cmk.5","checks": {"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.10","DescripcionControl": "Eliminar las claves deshabilitadas que no estรฉn en uso y no mantengan ningรบn objeto o recurso cifrado, completando el ciclo de vida de la clave."}],"description": "Protecciรณn de claves criptogrรกficas","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.10.aws.cmk.6": {"name": "op.exp.10.aws.cmk.6","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.10","DescripcionControl": "Utilizar el principio de mรญnimos privilegios para las polรญticas asociadas a claves."}],"description": "Protecciรณn de claves criptogrรกficas","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.10.aws.cmk.7": {"name": "op.exp.10.aws.cmk.7","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.10","DescripcionControl": "Utilizar tags y alias para una mejor administraciรณn de las claves."}],"description": "Protecciรณn de claves criptogrรกficas","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.10.aws.cmk.8": {"name": "op.exp.10.aws.cmk.8","checks": {},"status": "PASS","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.10","DescripcionControl": "Utilizar las polรญticas IAM y las concesiones de claves para el acceso a las mismas."}],"description": "Protecciรณn de claves criptogrรกficas","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.10.aws.tag.1": {"name": "op.exp.10.aws.tag.1","checks": {"organizations_tags_policies_enabled_and_attached": null},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.10","DescripcionControl": "Se recomienda utilizar tags y alias para una mejor gestiรณn y administraciรณn de las claves."}],"description": "Protecciรณn de claves criptogrรกficas","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.mon.1.aws.flow.1": {"name": "op.mon.1.aws.flow.1","checks": {"vpc_flow_logs_enabled": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "requisito","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.1","DescripcionControl": "Activar el servicio VPC FlowLogs."}],"description": "Detecciรณn de intrusiรณn","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.pl.2.aws.warch.1": {"name": "op.pl.2.aws.warch.1","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "planificaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.pl.2.r3","DescripcionControl": "Es recomendable que la entidad usuaria se apoye en el marco de trabajo AWS Well-Architected Framework"}],"description": "Validaciรณn de datos","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.pl.4.r1.aws.cw.1": {"name": "op.pl.4.r1.aws.cw.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "planificaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.pl.4.r1","DescripcionControl": "En caso de no disponer de herramientas de terceros, se deberรกn utilizar las herramientas de monitorizaciรณn de la capaciad indicadas para monitorizar las capacidades de la infraestructura y el grado de consumo de los servicios en funciรณn de las cuotas disponibles. (CloudWatch)"}],"description": "Mejora continua de la gestiรณn de la capacidad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.pl.4.r1.aws.sq.1": {"name": "op.pl.4.r1.aws.sq.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "planificaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.pl.4.r1","DescripcionControl": "En caso de no disponer de herramientas de terceros, se deberรกn utilizar las herramientas de monitorizaciรณn de la capaciad indicadas para monitorizar las capacidades de la infraestructura y el grado de consumo de los servicios en funciรณn de las cuotas disponibles. (Service Quotas)"}],"description": "Mejora continua de la gestiรณn de la capacidad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.pl.4.r1.aws.sq.2": {"name": "op.pl.4.r1.aws.sq.2","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "planificaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.pl.4.r1","DescripcionControl": "En cuanto a la monitorizaciรณn sobre el grado de consumo, utilizar la soluciรณn nativa Quota Monitor."}],"description": "Previsiรณn y actualizaciรณn de la capacidad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.pl.4.r1.aws.sq.3": {"name": "op.pl.4.r1.aws.sq.3","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "planificaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.pl.4.r1","DescripcionControl": "Visualizar las cuotas de servicio y configurar alarmas a travรฉs de la integraciรณn de AWS Service Quotas con CloudWatch."}],"description": "Monitorizaciรณn de la capacidad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.info.6.aws.bcku.1": {"name": "mp.info.6.aws.bcku.1","checks": {"backup_plans_exist": "PASS","backup_vaults_exist": null,"backup_reportplans_exist": null},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de la informaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.info.6","DescripcionControl": "Para los procedimientos de respaldo de cualquiera de los dos entornos (local y nube) y siempre y cuando se utilicen recursos compatibles en el entorno local, la entidad puede hacer uso de AWS Backup, que permite elaboraciรณn de planes de respaldo y la definiciรณn de reglas de frecuencia, ciclo de vida, lugar de almacenamiento y etiquetado de las copias de seguridad."}],"description": "Copias de seguridad","checks_status": {"fail": 0,"pass": 1,"total": 3,"manual": 0}},"mp.si.2.r1.aws.kms.1": {"name": "mp.si.2.r1.aws.kms.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los soportes de informaciรณn","Dimensiones": ["confidencialidad","integridad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "mp.si.2.r1","DescripcionControl": "Utilizar productos certificados conforme a op.pl.5, si bien AWS KMS es un producto certificado cuyo uso satisface la exigencia de este control."}],"description": "Productos certificados","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.si.2.r2.aws.ebs.1": {"name": "mp.si.2.r2.aws.ebs.1","checks": {"ec2_ebs_snapshots_encrypted": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "refuerzo","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los soportes de informaciรณn","Dimensiones": ["confidencialidad","integridad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.si.2.r2","DescripcionControl": "Se deberรก asegurar el cifrado de las copias de seguridad (snapshots) de EBS."}],"description": "Copias de seguridad","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"mp.sw.2.r1.aws.acb.1": {"name": "mp.sw.2.r1.aws.acb.1","checks": {"codebuild_project_older_90_days": "FAIL","codebuild_project_user_controlled_buildspec": "PASS"},"status": "FAIL","attributes": [{"Tipo": "recomendacion","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de la informaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.sw.2.r1","DescripcionControl": "Habilitar Amazon CodeBuild para el apoyo de la realizaciรณn de pruebas en entornos aisaldos."}],"description": "Aceptaciรณn y puesta en servicio","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"op.exp.8.r1.aws.ct.2": {"name": "op.exp.8.r1.aws.ct.2","checks": {"cloudtrail_insights_exist": null,"cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r1","DescripcionControl": "Configurar la herramienta CloudTrail de manera que realice el registro de eventos de administraciรณn, eventos de datos y eventos anรณmalos (insights)."}],"description": "Revisiรณn de los registros","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"op.exp.8.r1.aws.ct.3": {"name": "op.exp.8.r1.aws.ct.3","checks": {"cloudtrail_insights_exist": null,"cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r1","DescripcionControl": "Registrar los eventos de lectura y escritura de datos."}],"description": "Revisiรณn de los registros","checks_status": {"fail": 0,"pass": 0,"total": 4,"manual": 0}},"op.exp.8.r1.aws.ct.4": {"name": "op.exp.8.r1.aws.ct.4","checks": {"cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r1","DescripcionControl": "Registrar los eventos de lectura y escritura de datos."}],"description": "Revisiรณn de los registros","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"op.exp.8.r1.aws.ct.6": {"name": "op.exp.8.r1.aws.ct.6","checks": {"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r1","DescripcionControl": "Habilitar la entrega continua de eventos de CloudTrail a un bucket de Amazon S3"}],"description": "Revisiรณn de los registros","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.exp.8.r1.aws.ct.7": {"name": "op.exp.8.r1.aws.ct.7","checks": {"cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r1","DescripcionControl": "Integrar CloudTrail con el servicio CloudWatch Logs"}],"description": "Revisiรณn de los registros","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.exp.8.r1.aws.cw.1": {"name": "op.exp.8.r1.aws.cw.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.8.r1","DescripcionControl": "Utilizar el servicio AWS CloudWatch para centralizar y revisar los registros de todos los sistemas independientemente de su origen."}],"description": "Revisiรณn de los registros","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.8.r3.aws.ct.1": {"name": "op.exp.8.r3.aws.ct.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.8.r3","DescripcionControl": "Ejecutar la acciรณn PutRetentionPolicy de Amazon CloudWatch, permitiendo asรญ establecer la retenciรณn del grupo de registros especificado y configurar el nรบmero de dรญas durante los cuales se conservarรกn los eventos de registro en el grupo seleccionado de acuerdo con el documento de seguridad correspondiente. Paralelamente, se debe definir un periodo de retenciรณn para los datos almacenados en CloudTrail Lakes."}],"description": "Retenciรณn de registros","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.8.r3.aws.cw.1": {"name": "op.exp.8.r3.aws.cw.1","checks": {"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r3","DescripcionControl": "Ejecutar la acciรณn PutRetentionPolicy de Amazon CloudWatch, permitiendo asรญ establecer la retenciรณn del grupo de registros especificado y configurar el nรบmero de dรญas durante los cuales se conservarรกn los eventos de registro en el grupo seleccionado de acuerdo con el documento de seguridad correspondiente. Paralelamente, se debe definir un periodo de retenciรณn para los datos almacenados en CloudTrail Lakes."}],"description": "Retenciรณn de registros","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.exp.8.r4.aws.ct.1": {"name": "op.exp.8.r4.aws.ct.1","checks": {"iam_policy_allows_privilege_escalation": null,"iam_policy_no_full_access_to_cloudtrail": null,"iam_policy_attached_only_to_group_or_roles": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_role_cross_service_confused_deputy_prevention": null,"iam_customer_attached_policy_no_administrative_privileges": null,"iam_customer_unattached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r4","DescripcionControl": "Asignar correctamente las polรญticas AWS IAM para el acceso y borrado de los registros y sus copias de seguridad haciendo uso del principio de mรญnimo privilegio."}],"description": "Control de acceso","checks_status": {"fail": 0,"pass": 0,"total": 7,"manual": 0}},"op.exp.8.r4.aws.ct.2": {"name": "op.exp.8.r4.aws.ct.2","checks": {"s3_bucket_public_access": null,"s3_bucket_policy_public_write_access": "PASS","cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS"},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r4","DescripcionControl": "Utilizar una polรญtica de bucket para restringir el acceso de forma pรบblica e imponer restricciones sobre cuรกles de los usuarios pueden eliminar objetos de Amazon S3."}],"description": "Control de acceso","checks_status": {"fail": 0,"pass": 2,"total": 3,"manual": 0}},"op.exp.8.r4.aws.ct.3": {"name": "op.exp.8.r4.aws.ct.3","checks": {"cloudtrail_bucket_requires_mfa_delete": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r4","DescripcionControl": "Activar el acceso por MFA al registro de actividad almacenado en los buckets de Amazon S3 dedicados para AWS CloudTrail."}],"description": "Control de acceso","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.8.r4.aws.ct.4": {"name": "op.exp.8.r4.aws.ct.4","checks": {"cloudtrail_kms_encryption_enabled": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r4","DescripcionControl": "Configurar los archivos de logs de AWS CloudTrail para aprovechar el cifrado del lado del servidor (SSE โ€“ Server Side Encryption) y las claves maestras creadas por el cliente (CMK de KMS)."}],"description": "Control de acceso","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.exp.8.r4.aws.ct.5": {"name": "op.exp.8.r4.aws.ct.5","checks": {"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r4","DescripcionControl": "El almacรฉn de logs de CloudTrail no deberรญa ser accesible de forma pรบblica"}],"description": "Control de acceso","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.exp.8.r4.aws.ct.6": {"name": "op.exp.8.r4.aws.ct.6","checks": {"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS"},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r4","DescripcionControl": "El almacรฉn de logs de CloudTrail no deberรญa ser accesible de forma pรบblica(ACLs)"}],"description": "Control de acceso","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.exp.8.r4.aws.ct.7": {"name": "op.exp.8.r4.aws.ct.7","checks": {"cloudtrail_kms_encryption_enabled": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r4","DescripcionControl": "Cifrado de los trails con KMS"}],"description": "Control de acceso","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.exp.8.r4.aws.ct.8": {"name": "op.exp.8.r4.aws.ct.8","checks": {"iam_policy_allows_privilege_escalation": null,"iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_role_cross_service_confused_deputy_prevention": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["trazabilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.8.r4","DescripcionControl": "Asignar correctamente las polรญticas IAM para el acceso y borrado de los registros y sus copias de seguridad haciendo uso del principio de mรญnimo privilegio."}],"description": "Control de acceso","checks_status": {"fail": 0,"pass": 0,"total": 7,"manual": 0}},"op.mon.3.r1.aws.gd.1": {"name": "op.mon.3.r1.aws.gd.1","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.3.r1","DescripcionControl": "Activar GuardDuty y Security Hub o bien disponer de un SIEM externo a AWS"}],"description": "Correlaciรณn de eventos","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.mon.3.r1.aws.sh.1": {"name": "op.mon.3.r1.aws.sh.1","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.3.r1","DescripcionControl": "Activar GuardDuty y Security Hub o bien disponer de un SIEM externo a AWS"}],"description": "Correlaciรณn de eventos","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.mon.3.r2.aws.sh.1": {"name": "op.mon.3.r2.aws.sh.1","checks": {"securityhub_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.3.r2","DescripcionControl": "Utilizar las herramientas AWS Config y Security hub"}],"description": "Anรกlisis dinรกmico","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.mon.3.r3.aws.gd.1": {"name": "op.mon.3.r3.aws.gd.1","checks": {"guardduty_is_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.3.r3","DescripcionControl": "Activar GuardDuty (ya cubierto)"}],"description": "Ciberamenazas avanzadas","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.pl.4.r1.aws.sns.1": {"name": "op.pl.4.r1.aws.sns.1","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "planificaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.pl.4.r1","DescripcionControl": "Para la creaciรณn de alarmas en materia de capacidad de las instancias, se debe configurar un tema de SNS que permita el envรญo de mails automรกticos a la direcciรณn de correo seleccionada."}],"description": "Monitorizaciรณn de la capacidad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.pl.4.r1.aws.sns.2": {"name": "op.pl.4.r1.aws.sns.2","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "planificaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.pl.4.r1","DescripcionControl": "Configurar alarmas correspondientes a las diferentes capacidades (SNS) como uso de CPU, capacidad de almacenamiento o latencia."}],"description": "Monitorizaciรณn de la capacidad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.com.3.r1.aws.vpn.1": {"name": "mp.com.3.r1.aws.vpn.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["integridad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "mp.com.3.r1","DescripcionControl": "Utilizar VPN Site-to-Site para conectar las VPCs con las redes locales o externas."}],"description": "Redes privadas virtuales","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.com.4.r1.aws.vpc.1": {"name": "mp.com.4.r1.aws.vpc.1","checks": {"vpc_different_regions": null,"vpc_subnet_separate_private_public": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "refuerzo","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.4.r1","DescripcionControl": "Implementar la segmentaciรณn a travรฉs de la utilizaciรณn de diferentes VPCs."}],"description": "Segmentaciรณn lรณgica avanzada","checks_status": {"fail": 1,"pass": 0,"total": 2,"manual": 0}},"mp.com.4.r2.aws.vpc.1": {"name": "mp.com.4.r2.aws.vpc.1","checks": {"vpc_peering_routing_tables_with_least_privilege": "PASS"},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": ["mp.com.4.r3"],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.4.r2","DescripcionControl": "Implementar la segmentaciรณn a travรฉs de la utilizaciรณn de diferentes VPCs conectadas entre sรญ por VPN."}],"description": "Segmentaciรณn lรณgica avanzada","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"mp.com.4.r3.aws.vpc.1": {"name": "mp.com.4.r3.aws.vpc.1","checks": {"vpc_different_regions": null,"vpc_subnet_different_az": "PASS"},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de las comunicaciones","Dimensiones": ["confidencialidad","integridad"],"Dependencias": ["mp.com.4.r2"],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.com.4.r3","DescripcionControl": "Implementar la segmentaciรณn a travรฉs de diferentes VPCs situadas en diferentes ubicaciones."}],"description": "Segmentaciรณn fรญsica","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"mp.sw.2.r1.aws.cfgd.1": {"name": "mp.sw.2.r1.aws.cfgd.1","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de la informaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "mp.sw.2.r2","DescripcionControl": "Habilitar CloudFormation Guard para el apoyo en las tareas de inspecciรณn de recursos no conformes implementados en el cรณdigo fuente."}],"description": "Aceptaciรณn y puesta en servicio","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.1.r1.aws.iam.1": {"name": "op.acc.1.r1.aws.iam.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.1.r1","DescripcionControl": "Los identificadores de usuario deberรกn ser asignados en el proveedor de identidades (o en IAM) de modo que se permita singularizar a la persona asociada a cada identificador y cumplir con el resto de requisitos del refuerzo"}],"description": "Identificaciรณn de usuario","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.2.r1.aws.iam.1": {"name": "op.acc.2.r1.aws.iam.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.2.r1","DescripcionControl": "Evitar el uso de asunciรณn de roles para cualquier cuenta."}],"description": "Privilegios de acceso","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.3.r1.aws.iam.1": {"name": "op.acc.3.r1.aws.iam.1","checks": {"iam_support_role_created": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.3.r1","DescripcionControl": "En caso de ser de aplicaciรณn, la segregaciรณn deberรก tener en cuenta la separaciรณn de las funciones de configuraciรณn y mantenimiento y de auditorรญa de cualquier otra."}],"description": "Segregaciรณn rigurosa","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.acc.3.r2.aws.iam.1": {"name": "op.acc.3.r2.aws.iam.1","checks": {"iam_securityaudit_role_created": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "opcional","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.3.r2","DescripcionControl": "Disponer de cuentas con privilegios de auditorรญa estrictamente controladas y personalizadas."}],"description": "Privilegios de auditorรญa","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.acc.3.r3.aws.iam.1": {"name": "op.acc.3.r3.aws.iam.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "opcional","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.3.r3","DescripcionControl": "Limitar el acceso a la informaciรณn de seguridad del sistema a los administradores de seguridad utilizando los mecanismos de acceso imprescindibles (consola, interfaz web, acceso remoto etc.)."}],"description": "Acceso a la informaciรณn de seguridad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.6.r1.aws.iam.1": {"name": "op.acc.6.r1.aws.iam.1","checks": {"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_reuse_24": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": ["op.acc.6.r2","op.acc.6.r4"],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.6.r1","DescripcionControl": "Las contraseรฑas de los usuarios deberรกn tener normas de complejidad mรญnima y robustez."}],"description": "Contraseรฑas","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"op.acc.6.r2.aws.iam.1": {"name": "op.acc.6.r2.aws.iam.1","checks": {"iam_root_mfa_enabled": null,"iam_administrator_access_with_mfa": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": ["op.acc.6.r1","op.acc.6.r4"],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.6.r2","DescripcionControl": "MFA deberรก estar habilitado para todas las cuentas que tengan contraseรฑa para acceder a la consola, incluyendo el usuario root."}],"description": "Contraseรฑa + otro factor de autenticaciรณn","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"op.acc.6.r2.aws.iam.2": {"name": "op.acc.6.r2.aws.iam.2","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.6.r2","DescripcionControl": "Se recomienda que la organizaciรณn determine quรฉ llamadas a la API deben tambiรฉn contar con seguridad reforzada a travรฉs de un doble factor de autenticaciรณn."}],"description": "Contraseรฑa + otro factor de autenticaciรณn","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.6.r3.aws.iam.1": {"name": "op.acc.6.r3.aws.iam.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.6.r3","DescripcionControl": "Utilizar el servicio AWS IAM Roles Anywhere para crear un ancla de confianza en la que se haga referencia al servicio AWS Certificate Manager Private CA o registrar sus propias autoridades de certificaciรณn (CA), permitiendo usar el certificado emitido por la misma para obtener credenciales temporales para el acceso al entorno AWS. Estos certificados deberรกn estar protegidos por un segundo factor."}],"description": "Certificados","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.6.r4.aws.iam.1": {"name": "op.acc.6.r4.aws.iam.1","checks": {"iam_root_hardware_mfa_enabled": null,"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": ["op.acc.6.r1","op.acc.6.r3"],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.6.r4","DescripcionControl": "Habilitar los dispositivos MFA fรญsicos para todos los usuarios IAM mediante la consola, lรญnea de comandos o la propia API de IAM. Del mismo modo, el uso de estos certificados deberรก estar protegido por un segundo factor de tipo PIN o biomรฉtrico."}],"description": "Certificados en dispositvo fรญsico","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"op.acc.6.r5.aws.iam.1": {"name": "op.acc.6.r5.aws.iam.1","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.6.r5","DescripcionControl": "Para registrar los intentos de acceso, se deberรก habilitar CloudTrail en todas las regiones y activar el registro de acceso de usuarios."}],"description": "Registro","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"op.acc.6.r5.aws.iam.2": {"name": "op.acc.6.r5.aws.iam.2","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.6.r5","DescripcionControl": "Habilitar la informaciรณn de usuario sobre la fecha de รบltimo uso de sus claves de acceso."}],"description": "Registro","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.6.r7.aws.iam.1": {"name": "op.acc.6.r7.aws.iam.1","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.6.r7","DescripcionControl": "Activar la deshabilitaciรณn de las credenciales de los usuarios IAM que no hayan sido empleadas durante un periodo de tiempo (o bien, se deberรก establecer la deshabilitaciรณn en el proveedor de identidades)."}],"description": "Suspensiรณn por no utilizaciรณn","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"op.acc.6.r8.aws.iam.1": {"name": "op.acc.6.r8.aws.iam.1","checks": {"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.acc.6.r8","DescripcionControl": "Se deberรก emplear como mecanismo de autenticaciรณn o bien una contraseรฑa mรกs otro factor de autenticaciรณn, o bien un certificado cualificado (con o sin soporte fรญsico) protegido por un doble factor de autenticaciรณn."}],"description": "Doble factor para acceso desde o a travรฉs de zonas no controladas","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.acc.6.r9.aws.iam.1": {"name": "op.acc.6.r9.aws.iam.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.6.9","DescripcionControl": "Deberรก asegurarse que se estรก haciendo uso de HTTPS en todas las llamadas a API. Esto se puede lograr a travรฉs de una polรญtica IAM que rechace el trรกfico que no sea HTTPS."}],"description": "Acceso remoto (todos los niveles)","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.acc.6.r9.aws.iam.2": {"name": "op.acc.6.r9.aws.iam.2","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "control de acceso","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.acc.6.9","DescripcionControl": "En caso de que las llamadas a las APIs no se produzcan de manera constante, se recomienda condicionar su realizaciรณn a aquellas franjas horarias en las que sean necesarias. "}],"description": "Acceso remoto (todos los niveles)","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.1.r3.aws.tag.1": {"name": "op.exp.1.r3.aws.tag.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "opcional","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.1.r4","DescripcionControl": "Mantener actualizada una relaciรณn de los componentes software de terceros utilizados en el despliegue del sistema. Listado equivalente a lo requerido en mp.sw.1.r5."}],"description": "Lista de componentes software","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.3.r3.aws.cfg.1": {"name": "op.exp.3.r3.aws.cfg.1","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.3.r3","DescripcionControl": "La entidad usuaria puede consultar el histรณrico de configuraciones de recursos en AWS Config."}],"description": "Copias de seguridad","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.3.r4.aws.cfg.2": {"name": "op.exp.3.r4.aws.cfg.2","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.3.r3","DescripcionControl": "Desplegar toda la infraestructura de AWS a travรฉs de cรณdigo con el servicio AWS CloudFormation."}],"description": "Copias de seguridad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.4.r2.aws.sys.1": {"name": "op.exp.4.r2.aws.sys.1","checks": {"ec2_instance_managed_by_ssm": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.4.r2","DescripcionControl": "Utilizar la soluciรณn AWS Systems Manager Automation para automatizar las tareas de correcciรณn en servicios de AWS como EC2 y RDS."}],"description": "Prevenciรณn de fallos","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"op.exp.6.r1.aws.sys.1": {"name": "op.exp.6.r1.aws.sys.1","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.6.r1","DescripcionControl": "Automatizar las operaciones estรกndar a llevar a cabo para la respuesta en caso de incidente a travรฉs de AWS System Manager"}],"description": "Escaneo periรณdico","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.6.r3.aws.sys.1": {"name": "op.exp.6.r3.aws.sys.1","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.6.r3","DescripcionControl": "Hacer uso de AWS System Manager Inventory para definir, a nivel de software, una lista blanca de aplicaciones."}],"description": "Lista blanca","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.6.r4.aws.sys.1": {"name": "op.exp.6.r4.aws.sys.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.6.r4","DescripcionControl": "Automatizar tareas estรกndar a travรฉs de AWS System Manager"}],"description": "Capacidad de respuesta en caso de incidente","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.mon.3.r2.aws.cfg.1": {"name": "op.mon.3.r2.aws.cfg.1","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.3.r2","DescripcionControl": "Utilizar las herramientas AWS Config y Security hub"}],"description": "Anรกlisis dinรกmico","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.mon.3.r6.aws.cfg.1": {"name": "op.mon.3.r6.aws.cfg.1","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.3.r6","DescripcionControl": "Utilizar Config Rules y AWS Inspector"}],"description": "Inspecciones de seguridad","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"op.exp.4.r4.aws.insp.1": {"name": "op.exp.4.r4.aws.insp.1","checks": {"inspector2_is_enabled": "FAIL","inspector2_active_findings_exist": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "opcional","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.exp.4.r4","DescripcionControl": "Desplegar a nivel de sistema una estrategia de monitorizaciรณn continua de amenazas y vulnerabilidades detallando: indicadores crรญticos de seguridad, polรญtica de aplicaciรณn de parches y criterios de revisiรณn regular y excepcional de amenazas del sistema."}],"description": "Monitorizaciรณn continua","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"op.mon.3.r2.aws.insp.1": {"name": "op.mon.3.r2.aws.insp.1","checks": {"inspector2_is_enabled": "FAIL","inspector2_active_findings_exist": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.3.r2","DescripcionControl": "Utilizar la herramienta Inspector para la detecciรณn de posibles vulneerabilidades de las instancias EC2, las funciones Lambda y las imรกgenes de contenedor."}],"description": "Anรกlisis dinรกmico","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"op.mon.3.r6.aws.insp.1": {"name": "op.mon.3.r6.aws.insp.1","checks": {"inspector2_is_enabled": "FAIL","inspector2_active_findings_exist": "FAIL"},"status": "FAIL","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "alto","Categoria": "monitorizaciรณn del sistema","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "op.mon.3.r6","DescripcionControl": "Utilizar Config Rules y AWS Inspector."}],"description": "Inspecciones de seguridad","checks_status": {"fail": 2,"pass": 0,"total": 2,"manual": 0}},"mp.info.6.r2.aws.bcku.1": {"name": "mp.info.6.r2.aws.bcku.1","checks": {},"status": "PASS","attributes": [{"Tipo": "recomendacion","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de la informaciรณn","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "mp.info.6.r2","DescripcionControl": "La organizaciรณn puede hacer uso de la nube de AWS como ubicaciรณn diferente para el almacenamiento de la copia de seguridad separada del resto o, incluo, utilizar los servicios de ubicaciรณn para separar una copia de seguridad en una ubicaciรณn diferente dentro de la propia nube."}],"description": "Protecciรณn de las copias de seguridad","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"op.exp.1.r2.aws.sminv.1": {"name": "op.exp.1.r2.aws.sminv.1","checks": {},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "operacional","Nivel": "opcional","Categoria": "explotaciรณn","Dimensiones": ["confidencialidad","integridad","trazabilidad","autenticidad","disponibilidad"],"Dependencias": [],"ModoEjecucion": "manual","IdGrupoControl": "op.exp.1.r2","DescripcionControl": "Disponer de herramientas que permitan visualizar de forma continua el estado de todos los equipos en la red, en particular servidores y los dispositivos de red y comunicaciones."}],"description": "Identificaciรณn periรณdica de activos","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"mp.s.4.r1.aws.shieldadv.1": {"name": "mp.s.4.r1.aws.shieldadv.1","checks": {"shield_advanced_protection_in_global_accelerators": null,"shield_advanced_protection_in_route53_hosted_zones": null,"shield_advanced_protection_in_associated_elastic_ips": null,"shield_advanced_protection_in_classic_load_balancers": null,"shield_advanced_protection_in_cloudfront_distributions": null,"shield_advanced_protection_in_internet_facing_load_balancers": null},"status": "PASS","attributes": [{"Tipo": "refuerzo","Marco": "medidas de protecciรณn","Nivel": "alto","Categoria": "protecciรณn de los servicios","Dimensiones": ["disponibilidad"],"Dependencias": [],"ModoEjecucion": "automรกtico","IdGrupoControl": "mp.s.4.r1","DescripcionControl": "Activar AWS Shield Advanced con el fin de disponer de una herramienta de prevenciรณn, detecciรณn y mitigaciรณn de ataques de denegaciรณn de servicio."}],"description": "Detecciรณn y reacciรณn","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}}},"requirements_passed": 83,"requirements_failed": 37,"requirements_manual": 69,"total_requirements": 189,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "eeee198e-1eda-48dc-aeb6-eb28e98f8dde","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "kisa_isms_p_2023_korean_aws","framework": "KISA-ISMS-P","version": "2023-korean","description": "ISMS-P ์ธ์ฆ์€ ํ•œ๊ตญ์ธํ„ฐ๋„ท์ง„ํฅ์›(KISA)์ด ์ œ์ •ํ•œ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„๋ฅผ ๊ธฐ๋ฐ˜์œผ๋กœ, ๋…๋ฆฝ์ ์ธ ์‹ฌ์‚ฌ๊ธฐ๊ด€์ด ๊ธฐ์—…์ด๋‚˜ ์กฐ์ง์˜ ๋ณด์•ˆ ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ ํ™œ๋™์ด ์ธ์ฆ ๊ธฐ์ค€์„ ์ถฉ์กฑํ•˜๋Š”์ง€ ํ‰๊ฐ€ํ•œ ํ›„ ์ธ์ฆ์„ ๋ถ€์—ฌํ•˜๋Š” ์ œ๋„์ž…๋‹ˆ๋‹ค. ์ด๋ฅผ ํ†ตํ•ด ๊ธฐ์—…๊ณผ ๊ธฐ๊ด€์€ ์ œ๊ณตํ•˜๋Š” ์„œ๋น„์Šค์— ๋Œ€ํ•œ ๋Œ€์ค‘์˜ ์‹ ๋ขฐ๋ฅผ ๋†’์ด๊ณ , ์ ์  ๋ณต์žกํ•ด์ง€๋Š” ์‚ฌ์ด๋ฒ„ ์œ„ํ˜‘์— ํšจ๊ณผ์ ์œผ๋กœ ๋Œ€์‘ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋˜ํ•œ, ISMS-P๋Š” ์ •๋ณด๋ณดํ˜ธ์™€ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฅผ ์ฒด๊ณ„์ ์œผ๋กœ ์ˆ˜๋ฆฝํ•˜๊ณ  ์šด์˜ํ•  ์ˆ˜ ์žˆ๋Š” ํฌ๊ด„์ ์ธ ์ง€์นจ์„ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค.","region": "eu-west-1","requirements": {"1.1.1": {"name": "๊ฒฝ์˜์ง„์˜ ์ฐธ์—ฌ","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.1.1 ๊ฒฝ์˜์ง„์˜ ์ฐธ์—ฌ","Subdomain": "1.1. ๊ด€๋ฆฌ์ฒด๊ณ„","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋ณด๊ณ  ์ฒด๊ณ„(์˜์‚ฌ์†Œํ†ต๊ณ„ํš ๋“ฑ)","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์œ„์›ํšŒ ํšŒ์˜๋ก","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…ยท์ง€์นจ(๊ฒฝ์˜์ง„ ์Šน์ธ๋‚ด์—ญ ํฌํ•จ)","์ •๋ณด๋ณดํ˜ธ๊ณ„ํš ๋ฐ ๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš(๊ฒฝ์˜์ง„ ์Šน์ธ๋‚ด์—ญ ํฌํ•จ)","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์กฐ์ง๋„"],"AuditChecklist": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„์˜ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜ํ™œ๋™ ์ „๋ฐ˜์— ๊ฒฝ์˜์ง„์˜ ์ฐธ์—ฌ๊ฐ€ ์ด๋ฃจ์–ด์งˆ ์ˆ˜ ์žˆ๋„๋ก ๋ณด๊ณ  ๋ฐ ์˜์‚ฌ๊ฒฐ์ • ๋“ฑ์˜ ์ฑ…์ž„๊ณผ ์—ญํ• ์„ ๋ฌธ์„œํ™”ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฒฝ์˜์ง„์ด ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํ™œ๋™์— ๊ด€ํ•œ ์˜์‚ฌ๊ฒฐ์ •์— ์ ๊ทน์ ์œผ๋กœ ์ฐธ์—ฌํ•  ์ˆ˜ ์žˆ๋Š” ๋ณด๊ณ , ๊ฒ€ํ†  ๋ฐ ์Šน์ธ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…์„œ์— ๋ถ„๊ธฐ๋ณ„๋กœ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํ˜„ํ™ฉ์„ ๊ฒฝ์˜์ง„์—๊ฒŒ ๋ณด๊ณ ํ•˜๋„๋ก ๋ช…์‹œํ•˜์˜€์œผ๋‚˜, ์žฅ๊ธฐ๊ฐ„ ๊ด€๋ จ ๋ณด๊ณ ๋ฅผ ์ˆ˜ํ–‰ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ค‘์š” ์ •๋ณด๋ณดํ˜ธ ํ™œ๋™(์œ„ํ—˜ํ‰๊ฐ€, ์œ„ํ—˜์ˆ˜์šฉ์ˆ˜์ค€ ๊ฒฐ์ •, ์ •๋ณด๋ณดํ˜ธ๋Œ€์ฑ… ๋ฐ ์ดํ–‰๊ณ„ํš ๊ฒ€ํ† , ์ •๋ณด๋ณดํ˜ธ๋Œ€์ฑ… ์ดํ–‰๊ฒฐ๊ณผ ๊ฒ€ํ† , ๋ณด์•ˆ๊ฐ์‚ฌ ๋“ฑ)์„ ์ˆ˜ํ–‰ํ•˜๋ฉด์„œ ๊ด€๋ จ ํ™œ๋™๊ด€๋ จ ๋ณด๊ณ , ์Šน์ธ ๋“ฑ ์˜์‚ฌ๊ฒฐ์ •์— ๊ฒฝ์˜์ง„ ๋˜๋Š” ๊ฒฝ์˜์ง„์˜ ๊ถŒํ•œ์„ ์œ„์ž„๋ฐ›์€ ์ž๊ฐ€ ์ฐธ์—ฌํ•˜์ง€ ์•Š์•˜๊ฑฐ๋‚˜ ๊ด€๋ จ ์ฆ๊ฑฐ์ž๋ฃŒ๊ฐ€ ํ™•์ธ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์ตœ๊ณ ๊ฒฝ์˜์ž๋Š” ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„์˜ ์ˆ˜๋ฆฝ๊ณผ ์šด์˜ํ™œ๋™ ์ „๋ฐ˜์— ๊ฒฝ์˜์ง„์˜ ์ฐธ์—ฌ๊ฐ€ ์ด๋ฃจ์–ด์งˆ ์ˆ˜ ์žˆ๋„๋ก ๋ณด๊ณ  ๋ฐ ์˜์‚ฌ๊ฒฐ์ • ์ฒด๊ณ„๋ฅผ ์ˆ˜๋ฆฝํ•˜์—ฌ ์šด์˜ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.1.2": {"name": "์ตœ๊ณ ์ฑ…์ž„์ž์˜ ์ง€์ •","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.1.2 ์ตœ๊ณ ์ฑ…์ž„์ž์˜ ์ง€์ •","Subdomain": "1.1. ๊ด€๋ฆฌ์ฒด๊ณ„","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž ์ž„๋ช…๊ด€๋ จ ์ž๋ฃŒ(์ธ์‚ฌ๋ช…๋ น, ์ธ์‚ฌ์นด๋“œ ๋“ฑ)","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์กฐ์ง๋„","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…ยท์ง€์นจ","์ง๋ฌด๊ธฐ์ˆ ์„œ(์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž์˜ ์—ญํ•  ๋ฐ ์ฑ…์ž„์— ๊ด€ํ•œ ์‚ฌํ•ญ)","์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ์‹ ๊ณ  ๋‚ด์—ญ","๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš(๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž ์ง€์ •์— ๊ด€ํ•œ ์‚ฌํ•ญ)"],"AuditChecklist": ["์ตœ๊ณ ๊ฒฝ์˜์ž๋Š” ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ฒ˜๋ฆฌ์— ๊ด€ํ•œ ์—…๋ฌด๋ฅผ ์ด๊ด„ํ•˜์—ฌ ์ฑ…์ž„์งˆ ์ตœ๊ณ ์ฑ…์ž„์ž๋ฅผ ๊ณต์‹์ ์œผ๋กœ ์ง€์ •ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž๋Š” ์˜ˆ์‚ฐ, ์ธ๋ ฅ ๋“ฑ ์ž์›์„ ํ• ๋‹นํ•  ์ˆ˜ ์žˆ๋Š” ์ž„์›๊ธ‰์œผ๋กœ ์ง€์ •ํ•˜๊ณ  ์žˆ์œผ๋ฉฐ, ๊ด€๋ จ ๋ฒ•๋ น์— ๋”ฐ๋ฅธ ์ž๊ฒฉ์š”๊ฑด์„ ์ถฉ์กฑํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณดํ†ต์‹ ๋ง๋ฒ•์— ๋”ฐ๋ฅธ ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ์ง€์ • ๋ฐ ์‹ ๊ณ  ์˜๋ฌด ๋Œ€์ƒ์ž์ž„์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ  ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž๋ฅผ ์ง€์ • ๋ฐ ์‹ ๊ณ ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์™€ ๊ด€๋ จ๋œ ์‹ค์งˆ์ ์ธ ๊ถŒํ•œ ๋ฐ ์ง€์œ„๋ฅผ ๋ณด์œ ํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ์ธ์›์„ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ ์ฑ…์ž„์ž๋กœ ์ง€์ •ํ•˜๊ณ  ์žˆ์–ด, ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์— ๊ด€ํ•œ ์—…๋ฌด๋ฅผ ์ด๊ด„ํ•ด์„œ ์ฑ…์ž„์งˆ ์ˆ˜ ์žˆ๋‹ค๊ณ  ๋ณด๊ธฐ ์–ด๋ ค์šด ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์กฐ์ง๋„์ƒ์— ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž๋ฅผ ๋ช…์‹œํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ธ์‚ฌ๋ฐœ๋ น ๋“ฑ์˜ ๊ณต์‹์ ์ธ ์ง€์ •์ ˆ์ฐจ๋ฅผ ๊ฑฐ์น˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ISMS ์ธ์ฆ ์˜๋ฌด๋Œ€์ƒ์ž์ด๋ฉด์„œ ์ „๋…„๋„ ๋ง ๊ธฐ์ค€ ์ž์‚ฐ์ด์•ก์ด 5์ฒœ์–ต ์›์„ ์ดˆ๊ณผํ•œ ์ •๋ณดํ†ต์‹ ์„œ๋น„์Šค ์ œ๊ณต์ž์ด์ง€๋งŒ ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž๊ฐ€ CIO๋ฅผ ๊ฒธ์งํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด), ์ œ31์กฐ(๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž์˜ ์ง€์ •)","์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ45์กฐ์˜3(์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž์˜ ์ง€์ • ๋“ฑ)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€)"]}],"description": "์ตœ๊ณ ๊ฒฝ์˜์ž๋Š” ์ •๋ณด๋ณดํ˜ธ ์—…๋ฌด๋ฅผ ์ด๊ด„ํ•˜๋Š” ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž์™€ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์—…๋ฌด๋ฅผ ์ด๊ด„ํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ฑ…์ž„์ž๋ฅผ ์˜ˆ์‚ฐยท์ธ๋ ฅ ๋“ฑ ์ž์›์„ ํ• ๋‹นํ•  ์ˆ˜ ์žˆ๋Š” ์ž„์›๊ธ‰์œผ๋กœ ์ง€์ •ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.1.3": {"name": "์กฐ์ง ๊ตฌ์„ฑ","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.1.3 ์กฐ์ง ๊ตฌ์„ฑ","Subdomain": "1.1. ๊ด€๋ฆฌ์ฒด๊ณ„","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์œ„์›ํšŒ ๊ทœ์ •ยทํšŒ์˜๋ก","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์‹ค๋ฌด ํ˜‘์˜์ฒด ๊ทœ์ •ยทํšŒ์˜๋ก","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์กฐ์ง๋„","๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš","์ง๋ฌด๊ธฐ์ˆ ์„œ"],"AuditChecklist": ["์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž์˜ ์—…๋ฌด๋ฅผ ์ง€์›ํ•˜๊ณ  ์กฐ์ง์˜ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํ™œ๋™์„ ์ฒด๊ณ„์ ์œผ๋กœ ์ดํ–‰ํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์ „๋ฌธ์„ฑ์„ ๊ฐ–์ถ˜ ์‹ค๋ฌด์กฐ์ง์„ ๊ตฌ์„ฑํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์กฐ์ง ์ „๋ฐ˜์— ๊ฑธ์นœ ์ค‘์š”ํ•œ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ์‚ฌํ•ญ์— ๋Œ€ํ•˜์—ฌ ๊ฒ€ํ† , ์Šน์ธ ๋ฐ ์˜์‚ฌ๊ฒฐ์ •์„ ํ•  ์ˆ˜ ์žˆ๋Š” ์œ„์›ํšŒ๋ฅผ ๊ตฌ์„ฑํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ „์‚ฌ์  ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํ™œ๋™์„ ์œ„ํ•˜์—ฌ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ๋‹ด๋‹น์ž ๋ฐ ๋ถ€์„œ๋ณ„ ๋‹ด๋‹น์ž๋กœ ๊ตฌ์„ฑ๋œ ์‹ค๋ฌด ํ˜‘์˜์ฒด๋ฅผ ๊ตฌ์„ฑํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์œ„์›ํšŒ๋ฅผ ๊ตฌ์„ฑํ•˜์˜€์œผ๋‚˜, ์ž„์› ๋“ฑ ๊ฒฝ์˜์ง„์ด ํฌํ•จ๋˜์–ด ์žˆ์ง€ ์•Š๊ณ  ์‹ค๋ฌด ๋ถ€์„œ์˜ ์žฅ์œผ๋กœ ๊ตฌ์„ฑ๋˜์–ด ์žˆ์–ด ์กฐ์ง์˜ ์ค‘์š” ์ •๋ณด ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์— ๊ด€ํ•œ ์‚ฌํ•ญ์„ ๊ฒฐ์ •ํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€ ์ง€์นจ์— ๋”ฐ๋ผ ์ค‘์š” ์ •๋ณด์ฒ˜๋ฆฌ ๋ถ€์„œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ ๋ถ€์„œ์˜ ์žฅ(ํŒ€์žฅ๊ธ‰)์œผ๋กœ ๊ตฌ์„ฑ๋œ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์‹ค๋ฌด ํ˜‘์˜์ฒด๋ฅผ ๊ตฌ์„ฑํ•˜์˜€์œผ๋‚˜, ์žฅ๊ธฐ๊ฐ„ ์šด์˜ ์‹ค์ ์ด ์—†๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์œ„์›ํšŒ๋ฅผ ๊ฐœ์ตœํ•˜์˜€์œผ๋‚˜, ์—ฐ๊ฐ„ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ณ„ํš ๋ฐ ๊ต์œก ๊ณ„ํš, ์˜ˆ์‚ฐ ๋ฐ ์ธ๋ ฅ ๋“ฑ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ์— ๊ด€ํ•œ ์ฃผ์š” ์‚ฌํ•ญ์ด ๊ฒ€ํ†  ๋ฐ ์˜์‚ฌ๊ฒฐ์ •์ด ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์‹ฌ์˜ยท์˜๊ฒฐ์„ ์œ„ํ•ด ์ •๋ณด๋ณดํ˜ธ์œ„์›ํšŒ๋ฅผ ๊ตฌ์„ฑํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์šด์˜ ๋ฐ IT๋ณด์•ˆ ๊ด€๋ จ ์กฐ์ง๋งŒ ์ฐธ์—ฌํ•˜๊ณ  ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์กฐ์ง์€ ์ฐธ์—ฌํ•˜์ง€ ์•Š๊ณ  ์žˆ์–ด ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ์— ๊ด€ํ•œ ์‚ฌํ•ญ์„ ๊ฒฐ์ •ํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€)"]}],"description": "์ตœ๊ณ ๊ฒฝ์˜์ž๋Š” ์ •๋ณด๋ณดํ˜ธ์™€ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ์˜ ํšจ๊ณผ์  ๊ตฌํ˜„์„ ์œ„ํ•œ ์‹ค๋ฌด์กฐ์ง, ์กฐ์ง ์ „๋ฐ˜์˜ ์ •๋ณด๋ณดํ˜ธ์™€ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์ฃผ์š” ์‚ฌํ•ญ์„ ๊ฒ€ํ†  ๋ฐ ์˜๊ฒฐํ•  ์ˆ˜ ์žˆ๋Š” ์œ„์›ํšŒ, ์ „์‚ฌ์  ๋ณดํ˜ธํ™œ๋™์„ ์œ„ํ•œ ๋ถ€์„œ๋ณ„ ์ •๋ณด๋ณดํ˜ธ์™€ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋‹ด๋‹น์ž๋กœ ๊ตฌ์„ฑ๋œ ํ˜‘์˜์ฒด๋ฅผ ๊ตฌ์„ฑํ•˜์—ฌ ์šด์˜ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.1.4": {"name": "๋ฒ”์œ„ ์„ค์ •","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.1.4 ๋ฒ”์œ„ ์„ค์ •","Subdomain": "1.1. ๊ด€๋ฆฌ์ฒด๊ณ„","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„ ์ •์˜์„œ","์ •๋ณด์ž์‚ฐ ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ชฉ๋ก","๋ฌธ์„œ ๋ชฉ๋ก","์„œ๋น„์Šค ํ๋ฆ„๋„","๊ฐœ์ธ์ •๋ณด ํ๋ฆ„๋„","์ „์‚ฌ ์กฐ์ง๋„","์‹œ์Šคํ…œ ๋ฐ ๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ๋„"],"AuditChecklist": ["์กฐ์ง์˜ ํ•ต์‹ฌ ์„œ๋น„์Šค ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์— ์˜ํ–ฅ์„ ์ค„ ์ˆ˜ ์žˆ๋Š” ํ•ต์‹ฌ์ž์‚ฐ์„ ํฌํ•จํ•˜๋„๋ก ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„๋ฅผ ์„ค์ •ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •์˜๋œ ๋ฒ”์œ„ ๋‚ด์—์„œ ์˜ˆ์™ธ์‚ฌํ•ญ์ด ์žˆ์„ ๊ฒฝ์šฐ ๋ช…ํ™•ํ•œ ์‚ฌ์œ  ๋ฐ ๊ด€๋ จ์ž ํ˜‘์˜ยท์ฑ…์ž„์ž ์Šน์ธ ๋“ฑ ๊ด€๋ จ ๊ทผ๊ฑฐ๋ฅผ ๊ธฐ๋กยท๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„๋ฅผ ๋ช…ํ™•ํžˆ ํ™•์ธํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ด€๋ จ๋œ ๋‚ด์šฉ(์ฃผ์š” ์„œ๋น„์Šค ๋ฐ ์—…๋ฌด ํ˜„ํ™ฉ, ์ •๋ณด์‹œ์Šคํ…œ ๋ชฉ๋ก, ๋ฌธ์„œ๋ชฉ๋ก ๋“ฑ)์ด ํฌํ•จ๋œ ๋ฌธ์„œ๋ฅผ ์ž‘์„ฑํ•˜์—ฌ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ๊ฐœ๋ฐœ์—…๋ฌด์— ๊ด€๋ จํ•œ ๊ฐœ๋ฐœ ๋ฐ ์‹œํ—˜ ์‹œ์Šคํ…œ, ์™ธ์ฃผ์—…์ฒด์ง์›, PC, ํ…Œ์ŠคํŠธ์šฉ ๋‹จ๋ง๊ธฐ ๋“ฑ์ด ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„์—์„œ ๋ˆ„๋ฝ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„๋กœ ์„ค์ •๋œ ์„œ๋น„์Šค ๋˜๋Š” ์‚ฌ์—…์— ๋Œ€ํ•˜์—ฌ ์ค‘์š” ์˜์‚ฌ๊ฒฐ์ •์ž ์—ญํ• ์„ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š” ์ž„์ง์›, ์‚ฌ์—…๋ถ€์„œ ๋“ฑ์˜ ํ•ต์‹ฌ ์กฐ์ง(์ธ๋ ฅ)์„ ์ธ์ฆ๋ฒ”์œ„์— ํฌํ•จํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ๊ฐœ๋ฐœ์—…๋ฌด์— ๊ด€๋ จํ•œ ๊ฐœ๋ฐœ ๋ฐ ์‹œํ—˜ ์‹œ์Šคํ…œ, ๊ฐœ๋ฐœ์ž PC, ํ…Œ์ŠคํŠธ์šฉ ๋‹จ๋ง๊ธฐ, ๊ฐœ๋ฐœ์กฐ์ง ๋“ฑ์ด ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„์—์„œ ๋ˆ„๋ฝ๋œ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์กฐ์ง์˜ ํ•ต์‹ฌ ์„œ๋น„์Šค์™€ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ํ˜„ํ™ฉ ๋“ฑ์„ ๊ณ ๋ คํ•˜์—ฌ ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„๋ฅผ ์„ค์ •ํ•˜๊ณ , ๊ด€๋ จ๋œ ์„œ๋น„์Šค๋ฅผ ๋น„๋กฏํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ์—…๋ฌด์™€ ์กฐ์ง, ์ž์‚ฐ, ๋ฌผ๋ฆฌ์  ์œ„์น˜ ๋“ฑ์„ ๋ฌธ์„œํ™”ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.1.5": {"name": "์ •์ฑ… ์ˆ˜๋ฆฝ","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.1.5 ์ •์ฑ… ์ˆ˜๋ฆฝ","Subdomain": "1.1. ๊ด€๋ฆฌ์ฒด๊ณ„","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…ยท์ง€์นจยท์ ˆ์ฐจ์„œ(์ œยท๊ฐœ์ • ๋‚ด์—ญ ํฌํ•จ)","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…ยท์ง€์นจ์ ˆ์ฐจ์„œ ์ œยท๊ฐœ์ • ์‹œ ์ดํ•ด๊ด€๊ณ„์ž ๊ฒ€ํ†  ํšŒ์˜๋ก","๊ฐœ์ธ์ •๋ณด ๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…ยท์ง€์นจ ์ œยท๊ฐœ์ • ๊ณต์ง€๋‚ด์—ญ(๊ทธ๋ฃน์›จ์–ด, ์‚ฌ๋‚ด๊ฒŒ์‹œํŒ ๋“ฑ)","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์œ„์›ํšŒ ํšŒ์˜๋ก"],"AuditChecklist": ["์กฐ์ง์ด ์ˆ˜ํ–‰ํ•˜๋Š” ๋ชจ๋“  ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํ™œ๋™์˜ ๊ทผ๊ฑฐ๋ฅผ ํฌํ•จํ•˜๋Š” ์ตœ์ƒ์œ„ ์ˆ˜์ค€์˜ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…์˜ ์‹œํ–‰์„ ์œ„ํ•˜์—ฌ ํ•„์š”ํ•œ ์„ธ๋ถ€์ ์ธ ๋ฐฉ๋ฒ•, ์ ˆ์ฐจ, ์ฃผ๊ธฐ ๋“ฑ์„ ๊ทœ์ •ํ•œ ์ง€์นจ, ์ ˆ์ฐจ, ๋งค๋‰ด์–ผ ๋“ฑ์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…ยท์‹œํ–‰๋ฌธ์„œ์˜ ์ œยท๊ฐœ์ • ์‹œ ์ตœ๊ณ ๊ฒฝ์˜์ž ๋˜๋Š” ์ตœ๊ณ ๊ฒฝ์˜์ž๋กœ๋ถ€ํ„ฐ ๊ถŒํ•œ์„ ์œ„์ž„๋ฐ›์€ ์ž์˜ ์Šน์ธ์„ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…ยท์‹œํ–‰๋ฌธ์„œ์˜ ์ตœ์‹ ๋ณธ์„ ๊ด€๋ จ ์ž„์ง์›์—๊ฒŒ ์ดํ•ดํ•˜๊ธฐ ์‰ฌ์šด ํ˜•ํƒœ๋กœ ์ œ๊ณตํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋‚ด๋ถ€ ๊ทœ์ •์— ๋”ฐ๋ฅด๋ฉด ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…์„œ ์ œยท๊ฐœ์ • ์‹œ์—๋Š” ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์œ„์›ํšŒ์˜ ์˜๊ฒฐ์„ ๊ฑฐ์น˜๋„๋ก ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ตœ๊ทผ ์ •์ฑ…์„œ ๊ฐœ์ • ์‹œ ์œ„์›ํšŒ์— ์•ˆ๊ฑด์œผ๋กœ ์ƒ์ •ํ•˜์ง€ ์•Š๊ณ  ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž์˜ ์Šน์ธ์„ ๊ทผ๊ฑฐ๋กœ๋งŒ ๊ฐœ์ •ํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ… ๋ฐ ์ง€์นจ์„œ๊ฐ€ ์ตœ๊ทผ์— ๊ฐœ์ •๋˜์—ˆ์œผ๋‚˜, ํ•ด๋‹น ์‚ฌํ•ญ์ด ๊ด€๋ จ ๋ถ€์„œ ๋ฐ ์ž„์ง์›์—๊ฒŒ ๊ณต์œ ยท์ „๋‹ฌ๋˜์ง€ ์•Š์•„ ์ผ๋ถ€ ๋ถ€์„œ์—์„œ๋Š” ๊ตฌ๋ฒ„์ „์˜ ์ง€์นจ์„œ๋ฅผ ๊ธฐ์ค€์œผ๋กœ ์—…๋ฌด๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ… ๋ฐ ์ง€์นจ์„œ๋ฅผ ๋ณด์•ˆ๋ถ€์„œ์—์„œ๋งŒ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๊ณ , ์ž„์ง์›์ด ์—ด๋žŒํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ฒŒ์‹œํŒ, ๋ฌธ์„œ ๋“ฑ์˜ ๋ฐฉ๋ฒ•์œผ๋กœ ์ œ๊ณตํ•˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€)"]}],"description": "์ •๋ณด๋ณดํ˜ธ์™€ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ… ๋ฐ ์‹œํ–‰๋ฌธ์„œ๋ฅผ ์ˆ˜๋ฆฝยท์ž‘์„ฑํ•˜๋ฉฐ, ์ด๋•Œ ์กฐ์ง์˜ ์ •๋ณด๋ณดํ˜ธ์™€ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋ฐฉ์นจ ๋ฐ ๋ฐฉํ–ฅ์„ ๋ช…ํ™•ํ•˜๊ฒŒ ์ œ์‹œํ•˜์—ฌ์•ผ ํ•œ๋‹ค. ๋˜ํ•œ ์ •์ฑ…๊ณผ ์‹œํ–‰๋ฌธ์„œ๋Š” ๊ฒฝ์˜์ง„์˜ ์Šน์ธ์„ ๋ฐ›๊ณ , ์ž„์ง์› ๋ฐ ๊ด€๋ จ์ž์—๊ฒŒ ์ดํ•ดํ•˜๊ธฐ ์‰ฌ์šด ํ˜•ํƒœ๋กœ ์ „๋‹ฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.1.6": {"name": "์ž์› ํ• ๋‹น","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.1.6 ์ž์› ํ• ๋‹น","Subdomain": "1.1. ๊ด€๋ฆฌ์ฒด๊ณ„","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํ™œ๋™ ์—ฐ๊ฐ„ ์ถ”์ง„๊ณ„ํš์„œ(์˜ˆ์‚ฐ ๋ฐ ์ธ๋ ฅ์šด์˜๊ณ„ํš)","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํ™œ๋™ ๊ฒฐ๊ณผ ๋ณด๊ณ ์„œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํˆฌ์ž ๋‚ด์—ญ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์กฐ์ง๋„"],"AuditChecklist": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋ถ„์•ผ๋ณ„ ์ „๋ฌธ์„ฑ์„ ๊ฐ–์ถ˜ ์ธ๋ ฅ์„ ํ™•๋ณดํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„์˜ ํšจ๊ณผ์  ๊ตฌํ˜„๊ณผ ์ง€์†์  ์šด์˜์„ ์œ„ํ•˜์—ฌ ํ•„์š”ํ•œ ์ž์›์„ ํ‰๊ฐ€ํ•˜์—ฌ ํ•„์š”ํ•œ ์˜ˆ์‚ฐ๊ณผ ์ธ๋ ฅ์„ ์ง€์›ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์—ฐ๋„๋ณ„ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์—…๋ฌด ์„ธ๋ถ€์ถ”์ง„ ๊ณ„ํš์„ ์ˆ˜๋ฆฝยท์‹œํ–‰ํ•˜๊ณ , ๊ทธ ์ถ”์ง„๊ฒฐ๊ณผ์— ๋Œ€ํ•œ ์‹ฌ์‚ฌ๋ถ„์„ยทํ‰๊ฐ€๋ฅผ ์‹ค์‹œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์กฐ์ง์„ ๊ตฌ์„ฑํ•˜๋Š”๋ฐ, ๋ถ„์•ผ๋ณ„ ์ „๋ฌธ์„ฑ์„ ๊ฐ–์ถ˜ ์ธ๋ ฅ์ด ์•„๋‹Œ ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ๋˜๋Š” IT ๊ด€๋ จ ์ „๋ฌธ์„ฑ์ด ์—†๋Š” ์ธ์›์œผ๋กœ๋งŒ ๋ณด์•ˆ์ธ๋ ฅ์„ ๊ตฌ์„ฑํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์˜ ๊ธฐ์ˆ ์ ยท๊ด€๋ฆฌ์  ๋ณดํ˜ธ์กฐ์น˜์˜ ์š”๊ฑด์„ ๊ฐ–์ถ”๊ธฐ ์œ„ํ•œ ์ตœ์†Œํ•œ์˜ ๋ณด์•ˆ ์†”๋ฃจ์…˜ ๋„์ž…, ์•ˆ์ „์กฐ์น˜ ์ ์šฉ ๋“ฑ์„ ์œ„ํ•œ ๋น„์šฉ์„ ์ตœ๊ณ ๊ฒฝ์˜์ž๊ฐ€ ์ง€์›ํ•˜์ง€ ์•Š๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ธ์ฆ์„ ์ทจ๋“ํ•œ ์ดํ›„์— ์ธ๋ ฅ๊ณผ ์˜ˆ์‚ฐ ์ง€์›์„ ๋Œ€ํญ ์ค„์ด๊ณ  ๊ธฐ์กด ์ธ๋ ฅ์„ ๋‹ค๋ฅธ ๋ถ€์„œ๋กœ ๋ฐฐ์น˜ํ•˜๊ฑฐ๋‚˜ ์ผ๋ถ€ ์˜ˆ์‚ฐ์„ ๋‹ค๋ฅธ ์šฉ๋„๋กœ ์‚ฌ์šฉํ•˜๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์ตœ๊ณ ๊ฒฝ์˜์ž๋Š” ์ •๋ณด๋ณดํ˜ธ์™€ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋ถ„์•ผ๋ณ„ ์ „๋ฌธ์„ฑ์„ ๊ฐ–์ถ˜ ์ธ๋ ฅ์„ ํ™•๋ณดํ•˜๊ณ , ๊ด€๋ฆฌ์ฒด๊ณ„์˜ ํšจ๊ณผ์  ๊ตฌํ˜„๊ณผ ์ง€์†์  ์šด์˜์„ ์œ„ํ•œ ์˜ˆ์‚ฐ ๋ฐ ์ž์›์„ ํ• ๋‹นํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.2.1": {"name": "์ •๋ณด์ž์‚ฐ ์‹๋ณ„","checks": {"macie_is_enabled": "PASS","resourceexplorer2_indexes_found": "PASS","config_recorder_all_regions_enabled": null,"organizations_account_part_of_organizations": null,"organizations_tags_policies_enabled_and_attached": null},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.2.1 ์ •๋ณด์ž์‚ฐ ์‹๋ณ„","Subdomain": "1.2. ์œ„ํ—˜ ๊ด€๋ฆฌ","AuditEvidence": ["์ •๋ณด์ž์‚ฐ ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ž์‚ฐ๋ถ„๋ฅ˜ ๊ธฐ์ค€","์ •๋ณด์ž์‚ฐ ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ž์‚ฐ๋ชฉ๋ก(์ž์‚ฐ๊ด€๋ฆฌ์‹œ์Šคํ…œ ํ™”๋ฉด)","์ •๋ณด์ž์‚ฐ ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณด์•ˆ๋“ฑ๊ธ‰","์ž์‚ฐ์‹ค์‚ฌ ๋‚ด์—ญ","์œ„ํ—˜๋ถ„์„ ๋ณด๊ณ ์„œ(์ž์‚ฐ์‹๋ณ„ ๋‚ด์—ญ)"],"AuditChecklist": ["์ •๋ณด์ž์‚ฐ์˜ ๋ถ„๋ฅ˜๊ธฐ์ค€์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„ ๋‚ด์˜ ๋ชจ๋“  ์ž์‚ฐ์„ ์‹๋ณ„ํ•˜์—ฌ ๋ชฉ๋ก์œผ๋กœ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์‹๋ณ„๋œ ์ •๋ณด์ž์‚ฐ์— ๋Œ€ํ•˜์—ฌ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ ๋ฐ ์—…๋ฌด์— ๋ฏธ์น˜๋Š” ์˜ํ–ฅ ๋“ฑ์„ ๊ณ ๋ คํ•˜์—ฌ ์ค‘์š”๋„๋ฅผ ๊ฒฐ์ •ํ•˜๊ณ  ๋ณด์•ˆ๋“ฑ๊ธ‰์„ ๋ถ€์—ฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๊ธฐ์ ์œผ๋กœ ์ •๋ณด์ž์‚ฐ ํ˜„ํ™ฉ์„ ์กฐ์‚ฌํ•˜์—ฌ ์ •๋ณด์ž์‚ฐ๋ชฉ๋ก์„ ์ตœ์‹ ์œผ๋กœ ์œ ์ง€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„ ๋‚ด์˜ ์ž์‚ฐ ๋ชฉ๋ก์—์„œ ์ค‘์š”์ •๋ณด ์ทจ๊ธ‰์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ทจ๊ธ‰์ž PC๋ฅผ ํ†ต์ œํ•˜๋Š” ๋ฐ ์‚ฌ์šฉ๋˜๋Š” ์ถœ๋ ฅ๋ฌผ ๋ณด์•ˆ, ๋ฌธ์„œ์•”ํ˜ธํ™”, USB๋งค์ฒด์ œ์–ด ๋“ฑ์˜ ๋‚ด๋ถ€์ •๋ณด ์œ ์ถœํ†ต์ œ ์‹œ์Šคํ…œ์ด ๋ˆ„๋ฝ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„ ๋‚ด์—์„œ ์ œ3์ž๋กœ๋ถ€ํ„ฐ ์ œ๊ณต๋ฐ›์€ ๊ฐœ์ธ์ •๋ณด๊ฐ€ ์žˆ์œผ๋‚˜, ํ•ด๋‹น ๊ฐœ์ธ์ •๋ณด์— ๋Œ€ํ•œ ์ž์‚ฐ ์‹๋ณ„์ด ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋‚ด๋ถ€ ์ง€์นจ์— ๋ช…์‹œ๋œ ์ •๋ณด์ž์‚ฐ ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณด์•ˆ๋“ฑ๊ธ‰ ๋ถ„๋ฅ˜ ๊ธฐ์ค€๊ณผ ์ž์‚ฐ๊ด€๋ฆฌ ๋Œ€์žฅ์˜ ๋ถ„๋ฅ˜ ๊ธฐ์ค€์ด ์ผ์น˜ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์˜จํ”„๋ ˆ๋ฏธ์Šค ์ž์‚ฐ์— ๋Œ€ํ•ด์„œ๋Š” ์‹๋ณ„์ด ์ด๋ฃจ์–ด์กŒ์œผ๋‚˜, ์™ธ๋ถ€์— ์œ„ํƒํ•œ IT ์„œ๋น„์Šค(์›นํ˜ธ์ŠคํŒ…, ์„œ๋ฒ„ํ˜ธ์ŠคํŒ…, ํด๋ผ์šฐ๋“œ ๋“ฑ)์— ๋Œ€ํ•œ ์ž์‚ฐ ์‹๋ณ„์ด ๋ˆ„๋ฝ๋œ ๊ฒฝ์šฐ(๋‹จ, ์ธ์ฆ๋ฒ”์œ„ ๋‚ด)","์‚ฌ๋ก€ 5 : ๊ณ ์œ ์‹๋ณ„์ •๋ณด ๋“ฑ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ €์žฅํ•˜๊ณ  ์žˆ๋Š” ๋ฐฑ์—…์„œ๋ฒ„์˜ ๊ธฐ๋ฐ€์„ฑ ๋“ฑ๊ธ‰์„ (ํ•˜)๋กœ ์‚ฐ์ •ํ•˜๋Š” ๋“ฑ ์ •๋ณด์ž์‚ฐ ์ค‘์š”๋„ ํ‰๊ฐ€์˜ ํ•ฉ๋ฆฌ์„ฑ ๋ฐ ์‹ ๋ขฐ์„ฑ์ด ๋ฏธํกํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์กฐ์ง์˜ ์—…๋ฌดํŠน์„ฑ์— ๋”ฐ๋ผ ์ •๋ณด์ž์‚ฐ ๋ถ„๋ฅ˜๊ธฐ์ค€์„ ์ˆ˜๋ฆฝํ•˜์—ฌ ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„ ๋‚ด ๋ชจ๋“  ์ •๋ณด์ž์‚ฐ์„ ์‹๋ณ„ยท๋ถ„๋ฅ˜ํ•˜๊ณ , ์ค‘์š”๋„๋ฅผ ์‚ฐ์ •ํ•œ ํ›„ ๊ทธ ๋ชฉ๋ก์„ ์ตœ์‹ ์œผ๋กœ ๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 2,"total": 5,"manual": 0}},"1.2.2": {"name": "ํ˜„ํ™ฉ ๋ฐ ํ๋ฆ„๋ถ„์„","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.2.2 ํ˜„ํ™ฉ ๋ฐ ํ๋ฆ„๋ถ„์„","Subdomain": "1.2. ์œ„ํ—˜ ๊ด€๋ฆฌ","AuditEvidence": ["์ •๋ณด์„œ๋น„์Šค ํ˜„ํ™ฉํ‘œ","์ •๋ณด์„œ๋น„์Šค ์—…๋ฌดํ๋ฆ„ํ‘œยท์—…๋ฌดํ๋ฆ„๋„","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ํ˜„ํ™ฉํ‘œ(ISMS-P ์ธ์ฆ์ธ ๊ฒฝ์šฐ)","๊ฐœ์ธ์ •๋ณด ํ๋ฆ„ํ‘œยทํ๋ฆ„๋„(ISMS-P ์ธ์ฆ์ธ ๊ฒฝ์šฐ)"],"AuditChecklist": ["๊ด€๋ฆฌ์ฒด๊ณ„ ์ „ ์˜์—ญ์— ๋Œ€ํ•œ ์ •๋ณด์„œ๋น„์Šค ํ˜„ํ™ฉ์„ ์‹๋ณ„ํ•˜๊ณ  ์—…๋ฌด ์ ˆ์ฐจ์™€ ํ๋ฆ„์„ ํŒŒ์•…ํ•˜์—ฌ ๋ฌธ์„œํ™”ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„ ๋‚ด ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ํ˜„ํ™ฉ์„ ์‹๋ณ„ํ•˜๊ณ  ๊ฐœ์ธ์ •๋ณด์˜ ํ๋ฆ„์„ ํŒŒ์•…ํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด ํ๋ฆ„๋„ ๋“ฑ์œผ๋กœ ๋ฌธ์„œํ™”ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์„œ๋น„์Šค ๋ฐ ์—…๋ฌด, ์ •๋ณด์ž์‚ฐ ๋“ฑ์˜ ๋ณ€ํ™”์— ๋”ฐ๋ฅธ ์—…๋ฌด์ ˆ์ฐจ ๋ฐ ๊ฐœ์ธ์ •๋ณด ํ๋ฆ„์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜์—ฌ ํ๋ฆ„๋„ ๋“ฑ ๊ด€๋ จ ๋ฌธ์„œ์˜ ์ตœ์‹ ์„ฑ์„ ์œ ์ง€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„ ๋‚ด ์ฃผ์š” ์„œ๋น„์Šค์˜ ์—…๋ฌด ์ ˆ์ฐจยทํ๋ฆ„ ๋ฐ ํ˜„ํ™ฉ์— ๋ฌธ์„œํ™”๊ฐ€ ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด ํ๋ฆ„๋„๋ฅผ ์ž‘์„ฑํ•˜์˜€์œผ๋‚˜, ์‹ค์ œ ๊ฐœ์ธ์ •๋ณด์˜ ํ๋ฆ„๊ณผ ์ƒ์ดํ•œ ๋ถ€๋ถ„์ด ๋‹ค์ˆ˜ ์กด์žฌํ•˜๊ฑฐ๋‚˜ ์ค‘์š”ํ•œ ๊ฐœ์ธ์ •๋ณด ํ๋ฆ„์ด ๋ˆ„๋ฝ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ตœ์ดˆ ๊ฐœ์ธ์ •๋ณด ํ๋ฆ„๋„ ์ž‘์„ฑ ์ดํ›„์— ํ˜„ํ–‰ํ™”๊ฐ€ ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์•„ ๋ณ€ํ™”๋œ ๊ฐœ์ธ์ •๋ณด ํ๋ฆ„์ด ํ๋ฆ„๋„์— ๋ฐ˜์˜๋˜์ง€ ์•Š๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "๊ด€๋ฆฌ์ฒด๊ณ„ ์ „ ์˜์—ญ์— ๋Œ€ํ•œ ์ •๋ณด์„œ๋น„์Šค ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ํ˜„ํ™ฉ์„ ๋ถ„์„ํ•˜๊ณ  ์—…๋ฌด ์ ˆ์ฐจ์™€ ํ๋ฆ„์„ ํŒŒ์•…ํ•˜์—ฌ ๋ฌธ์„œํ™”ํ•˜๋ฉฐ, ์ด๋ฅผ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜์—ฌ ์ตœ์‹ ์„ฑ์„ ์œ ์ง€ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.2.3": {"name": "์œ„ํ—˜ ํ‰๊ฐ€","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.2.3 ์œ„ํ—˜ ํ‰๊ฐ€","Subdomain": "1.2. ์œ„ํ—˜ ๊ด€๋ฆฌ","AuditEvidence": ["์œ„ํ—˜๊ด€๋ฆฌ ์ง€์นจ","์œ„ํ—˜๊ด€๋ฆฌ ๋งค๋‰ด์–ผยท๊ฐ€์ด๋“œ","์œ„ํ—˜๊ด€๋ฆฌ ๊ณ„ํš์„œ","์œ„ํ—˜ํ‰๊ฐ€ ๊ฒฐ๊ณผ๋ณด๊ณ ์„œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์œ„์›ํšŒ ํšŒ์˜๋ก","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์‹ค๋ฌด ํ˜‘์˜ํšŒ ํšŒ์˜๋ก","์ •๋ณด์ž์‚ฐ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ž์‚ฐ ๋ชฉ๋ก","์ •๋ณด์„œ๋น„์Šค ๋ฐ ๊ฐœ์ธ์ •๋ณด ํ๋ฆ„ํ‘œยทํ๋ฆ„๋„"],"AuditChecklist": ["์กฐ์ง ๋˜๋Š” ์„œ๋น„์Šค์˜ ํŠน์„ฑ์— ๋”ฐ๋ผ ๋‹ค์–‘ํ•œ ์ธก๋ฉด์—์„œ ๋ฐœ์ƒํ•  ์ˆ˜ ์žˆ๋Š” ์œ„ํ—˜์„ ์‹๋ณ„ํ•˜๊ณ  ํ‰๊ฐ€ํ•  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ๋ฒ•์„ ์ •์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์œ„ํ—˜๊ด€๋ฆฌ ๋ฐฉ๋ฒ• ๋ฐ ์ ˆ์ฐจ(์ˆ˜ํ–‰์ธ๋ ฅ, ๊ธฐ๊ฐ„, ๋Œ€์ƒ, ๋ฐฉ๋ฒ•, ์˜ˆ์‚ฐ ๋“ฑ)๋ฅผ ๊ตฌ์ฒดํ™”ํ•œ ์œ„ํ—˜๊ด€๋ฆฌ๊ณ„ํš์„ ๋งค๋…„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์œ„ํ—˜๊ด€๋ฆฌ๊ณ„ํš์— ๋”ฐ๋ผ ์—ฐ 1ํšŒ ์ด์ƒ ์ •๊ธฐ์ ์œผ๋กœ ๋˜๋Š” ํ•„์š”ํ•œ ์‹œ์ ์— ์œ„ํ—˜ํ‰๊ฐ€๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์กฐ์ง์—์„œ ์ˆ˜์šฉ ๊ฐ€๋Šฅํ•œ ๋ชฉํ‘œ ์œ„ํ—˜์ˆ˜์ค€์„ ์ •ํ•˜๊ณ , ๊ทธ ์ˆ˜์ค€์„ ์ดˆ๊ณผํ•˜๋Š” ์œ„ํ—˜์„ ์‹๋ณ„ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์œ„ํ—˜์‹๋ณ„ ๋ฐ ํ‰๊ฐ€ ๊ฒฐ๊ณผ๋ฅผ ๊ฒฝ์˜์ง„์—๊ฒŒ ๋ณด๊ณ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ˆ˜๋ฆฝ๋œ ์œ„ํ—˜๊ด€๋ฆฌ๊ณ„ํš์„œ์— ์œ„ํ—˜ํ‰๊ฐ€ ๊ธฐ๊ฐ„ ๋ฐ ์œ„ํ—˜๊ด€๋ฆฌ ๋Œ€์ƒ๊ณผ ๋ฐฉ๋ฒ•์ด ์ •์˜๋˜์–ด ์žˆ์œผ๋‚˜, ์œ„ํ—˜๊ด€๋ฆฌ ์ˆ˜ํ–‰ ์ธ๋ ฅ๊ณผ ์†Œ์š” ์˜ˆ์‚ฐ ๋“ฑ ๊ตฌ์ฒด์ ์ธ ์‹คํ–‰๊ณ„ํš์ด ๋ˆ„๋ฝ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ „๋…„๋„์—๋Š” ์œ„ํ—˜ํ‰๊ฐ€๋ฅผ ์ˆ˜ํ–‰ํ•˜์˜€์œผ๋‚˜, ๊ธˆ๋…„๋„์—๋Š” ์ž์‚ฐ ๋ณ€๊ฒฝ์ด ์—†์—ˆ๋‹ค๋Š” ์‚ฌ์œ ๋กœ ์œ„ํ—˜ ํ‰๊ฐ€๋ฅผ ์ˆ˜ํ–‰ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์œ„ํ—˜๊ด€๋ฆฌ ๊ณ„ํš์— ๋”ฐ๋ผ ์œ„ํ—˜ ์‹๋ณ„ ๋ฐ ํ‰๊ฐ€๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๋ฒ”์œ„ ๋‚ด ์ค‘์š” ์ •๋ณด์ž์‚ฐ์— ๋Œ€ํ•œ ์œ„ํ—˜ ์‹๋ณ„ ๋ฐ ํ‰๊ฐ€๋ฅผ ์ˆ˜ํ–‰ํ•˜์ง€ ์•Š์•˜๊ฑฐ๋‚˜, ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ๋ฒ•์  ์š”๊ตฌ ์‚ฌํ•ญ ์ค€์ˆ˜ ์—ฌ๋ถ€์— ๋”ฐ๋ฅธ ์œ„ํ—˜์„ ์‹๋ณ„ ๋ฐ ํ‰๊ฐ€ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์œ„ํ—˜๊ด€๋ฆฌ ๊ณ„ํš์— ๋”ฐ๋ผ ์œ„ํ—˜ ์‹๋ณ„ ๋ฐ ํ‰๊ฐ€๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  ์ˆ˜์šฉ ๊ฐ€๋Šฅํ•œ ๋ชฉํ‘œ ์œ„ํ—˜์ˆ˜์ค€์„ ์„ค์ •ํ•˜์˜€์œผ๋‚˜, ๊ด€๋ จ ์‚ฌํ•ญ์„ ๊ฒฝ์˜์ง„(์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋“ฑ)์— ๋ณด๊ณ ํ•˜์—ฌ ์Šน์ธ๋ฐ›์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ๋‚ด๋ถ€ ์ง€์นจ์— ์ •์˜ํ•œ ์œ„ํ—˜ ํ‰๊ฐ€ ๋ฐฉ๋ฒ•๊ณผ ์‹ค์ œ ์ˆ˜ํ–‰ํ•œ ์œ„ํ—˜ ํ‰๊ฐ€ ๋ฐฉ๋ฒ•์ด ์ƒ์ดํ•  ๊ฒฝ์šฐ","์‚ฌ๋ก€ 6 : ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„์™€ ๊ด€๋ จ๋œ ๊ด€๋ฆฌ์ ยท๋ฌผ๋ฆฌ์  ์˜์—ญ์˜ ์œ„ํ—˜ ์‹๋ณ„ ๋ฐ ํ‰๊ฐ€๋ฅผ ์ˆ˜ํ–‰ํ•˜์ง€ ์•Š๊ณ , ๋‹จ์ˆœํžˆ ๊ธฐ์ˆ ์  ์ทจ์•ฝ์ ์ง„๋‹จ ๊ฒฐ๊ณผ๋ฅผ ์œ„ํ—˜ ํ‰๊ฐ€ ๊ฒฐ๊ณผ๋กœ ๊ฐˆ์Œํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 7 : ์ˆ˜์šฉ ๊ฐ€๋Šฅํ•œ ๋ชฉํ‘œ ์œ„ํ—˜์ˆ˜์ค€(DoA)์„ ํƒ€๋‹นํ•œ ์‚ฌ์œ  ์—†์ด ๊ณผ๋„ํ•˜๊ฒŒ ๋†’์ด๋Š” ๊ฒƒ์œผ๋กœ ๊ฒฐ์ •ํ•จ์— ๋”ฐ๋ผ, ์‹ค์งˆ์ ์œผ๋กœ ๋Œ€์‘์ด ํ•„์š”ํ•œ ์ฃผ์š” ์œ„ํ—˜๋“ค์ด ์กฐ์น˜๊ฐ€ ๋ถˆํ•„์š”ํ•œ ์œ„ํ—˜(์ˆ˜์šฉ ๊ฐ€๋Šฅํ•œ ์œ„ํ—˜)์œผ๋กœ ์ง€์ •๋œ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€)"]}],"description": "์กฐ์ง์˜ ๋Œ€๋‚ด์™ธ ํ™˜๊ฒฝ๋ถ„์„์„ ํ†ตํ•˜์—ฌ ์œ ํ˜•๋ณ„ ์œ„ํ˜‘์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๊ณ  ์กฐ์ง์— ์ ํ•ฉํ•œ ์œ„ํ—˜ ํ‰๊ฐ€ ๋ฐฉ๋ฒ•์„ ์„ ์ •ํ•˜์—ฌ ๊ด€๋ฆฌ์ฒด๊ณ„ ์ „ ์˜์—ญ์— ๋Œ€ํ•˜์—ฌ ์—ฐ 1ํšŒ ์ด์ƒ ์œ„ํ—˜์„ ํ‰๊ฐ€ํ•˜๋ฉฐ, ์ˆ˜์šฉํ•  ์ˆ˜ ์žˆ๋Š” ์œ„ํ—˜์€ ๊ฒฝ์˜์ง„์˜ ์Šน์ธ์„ ๋ฐ›์•„ ๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.2.4": {"name": "๋ณดํ˜ธ๋Œ€์ฑ… ์„ ์ •","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.2.4 ๋ณดํ˜ธ๋Œ€์ฑ… ์„ ์ •","Subdomain": "1.2. ์œ„ํ—˜ ๊ด€๋ฆฌ","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ดํ–‰๊ณ„ํš์„œยท์œ„ํ—˜๊ด€๋ฆฌ๊ณ„ํš์„œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋Œ€์ฑ…์„œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋งˆ์Šคํ„ฐํ”Œ๋žœ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ดํ–‰๊ณ„ํš ๊ฒฝ์˜์ง„ ๋ณด๊ณ  ๋ฐ ์Šน์ธ ๋‚ด์—ญ"],"AuditChecklist": ["์‹๋ณ„๋œ ์œ„ํ—˜์— ๋Œ€ํ•œ ์ฒ˜๋ฆฌ ์ „๋žต(๊ฐ์†Œ, ํšŒํ”ผ, ์ „๊ฐ€, ์ˆ˜์šฉ ๋“ฑ)์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์œ„ํ—˜์ฒ˜๋ฆฌ๋ฅผ ์œ„ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์„ ์ •ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ณดํ˜ธ๋Œ€์ฑ…์˜ ์šฐ์„ ์ˆœ์œ„๋ฅผ ๊ณ ๋ คํ•˜์—ฌ ์ผ์ •, ๋‹ด๋‹น๋ถ€์„œ ๋ฐ ๋‹ด๋‹น์ž, ์˜ˆ์‚ฐ ๋“ฑ์˜ ํ•ญ๋ชฉ์„ ํฌํ•จํ•œ ๋ณดํ˜ธ๋Œ€์ฑ… ์ดํ–‰๊ณ„ํš์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ๊ฒฝ์˜์ง„์— ๋ณด๊ณ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋Œ€์ฑ…์— ๋Œ€ํ•œ ์ดํ–‰๊ณ„ํš์€ ์ˆ˜๋ฆฝํ•˜์˜€์œผ๋‚˜, ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž์—๊ฒŒ ๋ณด๊ณ ๊ฐ€ ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์œ„ํ—˜๊ฐ์†Œ๊ฐ€ ์š”๊ตฌ๋˜๋Š” ์ผ๋ถ€ ์œ„ํ—˜์˜ ์กฐ์น˜ ์ดํ–‰๊ณ„ํš์ด ๋ˆ„๋ฝ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ฒ•์— ๋”ฐ๋ผ ์˜๋ฌด์ ์œผ๋กœ ์ดํ–‰ํ•˜์—ฌ์•ผ ํ•  ์‚ฌํ•ญ, ๋ณด์•ˆ ์ทจ์•ฝ์„ฑ์ด ๋†’์€ ์œ„ํ—˜ ๋“ฑ์„ ๋ณ„๋„์˜ ๋ณดํ˜ธ์กฐ์น˜ ๊ณ„ํš ์—†์ด ์œ„ํ—˜์ˆ˜์šฉ์œผ๋กœ ๊ฒฐ์ •ํ•˜์—ฌ ์กฐ์น˜ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์œ„ํ—˜์ˆ˜์šฉ์— ๋Œ€ํ•œ ๊ทผ๊ฑฐ์™€ ํƒ€๋‹น์„ฑ์ด ๋ฏธํกํ•˜๊ณ , ์‹œ๊ธ‰์„ฑ ๋ฐ ๊ตฌํ˜„ ์šฉ์ด์„ฑ ๋“ฑ์˜ ์ธก๋ฉด์—์„œ ์ฆ‰์‹œ ๋˜๋Š” ๋‹จ๊ธฐ ์กฐ์น˜๊ฐ€ ๊ฐ€๋Šฅํ•œ ์œ„ํ—˜์š”์ธ์— ๋Œ€ํ•ด์„œ๋„ ํŠน๋ณ„ํ•œ ์‚ฌ์œ  ์—†์ด ์žฅ๊ธฐ ์กฐ์น˜๊ณ„ํš์œผ๋กœ ๋ถ„๋ฅ˜ํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์œ„ํ—˜ ํ‰๊ฐ€ ๊ฒฐ๊ณผ์— ๋”ฐ๋ผ ์‹๋ณ„๋œ ์œ„ํ—˜์„ ์ฒ˜๋ฆฌํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์กฐ์ง์— ์ ํ•ฉํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์„ ์ •ํ•˜๊ณ , ๋ณดํ˜ธ๋Œ€์ฑ…์˜ ์šฐ์„ ์ˆœ์œ„์™€ ์ผ์ •ยท๋‹ด๋‹น์žยท์˜ˆ์‚ฐ ๋“ฑ์„ ํฌํ•จํ•œ ์ดํ–‰๊ณ„ํš์„ ์ˆ˜๋ฆฝํ•˜์—ฌ ๊ฒฝ์˜์ง„์˜ ์Šน์ธ์„ ๋ฐ›์•„์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.3.1": {"name": "๋ณดํ˜ธ๋Œ€์ฑ… ๊ตฌํ˜„","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.3.1 ๋ณดํ˜ธ๋Œ€์ฑ… ๊ตฌํ˜„","Subdomain": "1.3. ๊ด€๋ฆฌ์ฒด๊ณ„ ์šด์˜","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ดํ–‰๊ณ„ํš์„œยท์œ„ํ—˜๊ด€๋ฆฌ๊ณ„ํš์„œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋Œ€์ฑ…์„œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ดํ–‰๊ณ„ํš ๊ฒฝ๊ณผ๋ณด๊ณ ์„œ(๊ฒฝ์˜์ง„ ๋ณด๊ณ  ํฌํ•จ)","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ดํ–‰ ์™„๋ฃŒ ๋ณด๊ณ ์„œ(๊ฒฝ์˜์ง„ ๋ณด๊ณ  ํฌํ•จ)","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์šด์˜๋ช…์„ธ์„œ"],"AuditChecklist": ["์ดํ–‰๊ณ„ํš์— ๋”ฐ๋ผ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ํšจ๊ณผ์ ์œผ๋กœ ๊ตฌํ˜„ํ•˜๊ณ  ์ดํ–‰๊ฒฐ๊ณผ์˜ ์ •ํ™•์„ฑ ๋ฐ ํšจ๊ณผ์„ฑ ์—ฌ๋ถ€๋ฅผ ๊ฒฝ์˜์ง„์ด ํ™•์ธํ•  ์ˆ˜ ์žˆ๋„๋ก ๋ณด๊ณ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ด€๋ฆฌ์ฒด๊ณ„ ์ธ์ฆ๊ธฐ์ค€๋ณ„๋กœ ๋ณดํ˜ธ๋Œ€์ฑ… ๊ตฌํ˜„ ๋ฐ ์šด์˜ ํ˜„ํ™ฉ์„ ๊ธฐ๋กํ•œ ์šด์˜๋ช…์„ธ์„œ๋ฅผ ๊ตฌ์ฒด์ ์œผ๋กœ ์ž‘์„ฑํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋Œ€์ฑ…์— ๋Œ€ํ•œ ์ดํ–‰์™„๋ฃŒ ๊ฒฐ๊ณผ๋ฅผ ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž์—๊ฒŒ ๋ณด๊ณ ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์œ„ํ—˜์กฐ์น˜ ์ดํ–‰๊ฒฐ๊ณผ๋ณด๊ณ ์„œ๋Š” สป์กฐ์น˜ ์™„๋ฃŒสผ๋กœ ๋ช…์‹œ๋˜์–ด ์žˆ์œผ๋‚˜, ๊ด€๋ จ๋œ ์œ„ํ—˜์ด ์—ฌ์ „ํžˆ ์กด์žฌํ•˜๊ฑฐ๋‚˜ ์ดํ–‰๊ฒฐ๊ณผ์˜ ์ •ํ™•์„ฑ ๋ฐ ํšจ๊ณผ์„ฑ์ด ํ™•์ธ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ „๋…„๋„ ์ •๋ณด๋ณดํ˜ธ๋Œ€์ฑ… ์ดํ–‰๊ณ„ํš์— ๋”ฐ๋ผ ์ค‘ยท์žฅ๊ธฐ๋กœ ๋ถ„๋ฅ˜๋œ ์œ„ํ—˜๋“ค์ด ํ•ด๋‹น์—ฐ๋„์— ๊ตฌํ˜„์ด ๋˜๊ณ  ์žˆ์ง€ ์•Š๊ฑฐ๋‚˜ ์ดํ–‰๊ฒฐ๊ณผ๋ฅผ ๊ฒฝ์˜์ง„์ด ๊ฒ€ํ†  ๋ฐ ํ™•์ธํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์šด์˜๋ช…์„ธ์„œ์— ์ž‘์„ฑ๋œ ์šด์˜ ํ˜„ํ™ฉ์ด ์‹ค์ œ์™€ ์ผ์น˜ํ•˜์ง€ ์•Š๊ณ , ์šด๋ช…๋ช…์„ธ์„œ์— ๊ธฐ๋ก๋˜์–ด ์žˆ๋Š” ๊ด€๋ จ ๋ฌธ์„œ, ๊ฒฐ์žฌ ๋‚ด์šฉ, ํšŒ์˜๋ก ๋“ฑ์ด ์กด์žฌํ•˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ์ดํ–‰๊ณ„ํš ์‹œํ–‰์— ๋Œ€ํ•œ ๊ฒฐ๊ณผ๋ฅผ ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž์—๊ฒŒ ๋ณด๊ณ ํ•˜์˜€์œผ๋‚˜, ์ผ๋ถ€ ๋ฏธ์ดํ–‰๋œ ๊ฑด์— ๋Œ€ํ•œ ์‚ฌ์œ  ๋ณด๊ณ  ๋ฐ ํ›„์† ์กฐ์น˜๊ฐ€ ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์„ ์ •ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์€ ์ดํ–‰๊ณ„ํš์— ๋”ฐ๋ผ ํšจ๊ณผ์ ์œผ๋กœ ๊ตฌํ˜„ํ•˜๊ณ , ๊ฒฝ์˜์ง„์€ ์ดํ–‰๊ฒฐ๊ณผ์˜ ์ •ํ™•์„ฑ๊ณผ ํšจ๊ณผ์„ฑ ์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.3.2": {"name": "๋ณดํ˜ธ๋Œ€์ฑ… ๊ณต์œ ","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.3.2 ๋ณดํ˜ธ๋Œ€์ฑ… ๊ณต์œ ","Subdomain": "1.3. ๊ด€๋ฆฌ์ฒด๊ณ„ ์šด์˜","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋Œ€์ฑ…๋ณ„ ์šด์˜๋ถ€์„œ ๋˜๋Š” ์‹œํ–‰๋ถ€์„œ ํ˜„ํ™ฉ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด ๊ด€๋ฆฌ๊ณ„ํš ๋‚ด๋ถ€๊ณต์œ  ์ฆ๊ฑฐ์ž๋ฃŒ(๊ณต์ง€ ๋‚ด์—ญ, ๊ต์œก์ž๋ฃŒ, ๊ณต์œ  ์ž๋ฃŒ ๋“ฑ)"],"AuditChecklist": ["๊ตฌํ˜„๋œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์šด์˜ ๋˜๋Š” ์‹œํ–‰ํ•  ๋ถ€์„œ ๋ฐ ๋‹ด๋‹น์ž๋ฅผ ๋ช…ํ™•ํ•˜๊ฒŒ ํŒŒ์•…ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ตฌํ˜„๋œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์šด์˜ ๋˜๋Š” ์‹œํ–‰ํ•  ๋ถ€์„œ ๋ฐ ๋‹ด๋‹น์ž์—๊ฒŒ ๊ด€๋ จ ๋‚ด์šฉ์„ ๊ณต์œ  ๋˜๋Š” ๊ต์œกํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด๋ณดํ˜ธ๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜์—ฌ ๊ตฌํ˜„ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๊ด€๋ จ ๋‚ด์šฉ์„ ์ถฉ๋ถ„ํžˆ ๊ณต์œ ยท๊ต์œกํ•˜์ง€ ์•Š์•„ ์‹ค์ œ ์šด์˜ ๋˜๋Š” ์ˆ˜ํ–‰ ๋ถ€์„œ ๋ฐ ๋‹ด๋‹น์ž๊ฐ€ ํ•ด๋‹น ๋‚ด์šฉ์„ ์ธ์ง€ํ•˜์ง€ ๋ชปํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "๋ณดํ˜ธ๋Œ€์ฑ…์˜ ์‹ค์ œ ์šด์˜ ๋˜๋Š” ์‹œํ–‰ํ•  ๋ถ€์„œ ๋ฐ ๋‹ด๋‹น์ž๋ฅผ ํŒŒ์•…ํ•˜์—ฌ ๊ด€๋ จ ๋‚ด์šฉ์„ ๊ณต์œ ํ•˜๊ณ  ๊ต์œกํ•˜์—ฌ ์ง€์†์ ์œผ๋กœ ์šด์˜๋˜๋„๋ก ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.3.3": {"name": "์šด์˜ํ˜„ํ™ฉ ๊ด€๋ฆฌ","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.3.3 ์šด์˜ํ˜„ํ™ฉ ๊ด€๋ฆฌ","Subdomain": "1.3. ๊ด€๋ฆฌ์ฒด๊ณ„ ์šด์˜","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์—ฐ๊ฐ„๊ณ„ํš์„œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์šด์˜ํ˜„ํ™ฉํ‘œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํ™œ๋™ ์ˆ˜ํ–‰ ์—ฌ๋ถ€ ์ ๊ฒ€ ๊ฒฐ๊ณผ"],"AuditChecklist": ["๊ด€๋ฆฌ์ฒด๊ณ„ ์šด์˜์„ ์œ„ํ•˜์—ฌ ์ฃผ๊ธฐ์  ๋˜๋Š” ์ƒ์‹œ์ ์œผ๋กœ ์ˆ˜ํ–‰ํ•˜์—ฌ์•ผ ํ•˜๋Š” ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํ™œ๋™์„ ๋ฌธ์„œํ™”ํ•˜์—ฌ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฒฝ์˜์ง„์€ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ด€๋ฆฌ์ฒด๊ณ„ ์šด์˜ํ™œ๋™์˜ ํšจ๊ณผ์„ฑ์„ ํ™•์ธํ•˜๊ณ  ์ด๋ฅผ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„ ์šด์˜ํ˜„ํ™ฉ ์ค‘ ์ฃผ๊ธฐ์  ๋˜๋Š” ์ƒ์‹œ์ ์ธ ํ™œ๋™์ด ์š”๊ตฌ๋˜๋Š” ํ™œ๋™ ํ˜„ํ™ฉ์„ ๋ฌธ์„œํ™”ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„ ์šด์˜ํ˜„ํ™ฉ์— ๋Œ€ํ•œ ๋ฌธ์„œํ™”๋Š” ์ด๋ฃจ์–ด์กŒ์œผ๋‚˜, ํ•ด๋‹น ์šด์˜ํ˜„ํ™ฉ์— ๋Œ€ํ•œ ์ฃผ๊ธฐ์ ์ธ ๊ฒ€ํ† ๊ฐ€ ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์•„ ์›”๋ณ„ ๋ฐ ๋ถ„๊ธฐ๋ณ„ ํ™œ๋™์ด ์š”๊ตฌ๋˜๋Š” ์ผ๋ถ€ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํ™œ๋™์ด ๋ˆ„๋ฝ๋˜์—ˆ๊ณ  ์ผ๋ถ€๋Š” ์ดํ–‰ ์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ31์กฐ(๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž์˜ ์ง€์ •)","์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ45์กฐ์˜3(์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž์˜ ์ง€์ • ๋“ฑ)"]}],"description": "์กฐ์ง์ด ์ˆ˜๋ฆฝํ•œ ๊ด€๋ฆฌ์ฒด๊ณ„์— ๋”ฐ๋ผ ์ƒ์‹œ์  ๋˜๋Š” ์ฃผ๊ธฐ์ ์œผ๋กœ ์ˆ˜ํ–‰ํ•˜์—ฌ์•ผ ํ•˜๋Š” ์šด์˜ํ™œ๋™ ๋ฐ ์ˆ˜ํ–‰ ๋‚ด์—ญ์€ ์‹๋ณ„ ๋ฐ ์ถ”์ ์ด ๊ฐ€๋Šฅํ•˜๋„๋ก ๊ธฐ๋กํ•˜์—ฌ ๊ด€๋ฆฌํ•˜๊ณ , ๊ฒฝ์˜์ง„์€ ์ฃผ๊ธฐ์ ์œผ๋กœ ์šด์˜ํ™œ๋™์˜ ํšจ๊ณผ์„ฑ์„ ํ™•์ธํ•˜์—ฌ ๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.4.1": {"name": "๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ ์ค€์ˆ˜ ๊ฒ€ํ† ","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.4.1 ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ ์ค€์ˆ˜ ๊ฒ€ํ† ","Subdomain": "1.4. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ ๋ฐ ๊ฐœ์„ ","AuditEvidence": ["๋ฒ•์  ์ค€๊ฑฐ์„ฑ ๊ฒ€ํ†  ๋‚ด์—ญ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…ยท์ง€์นจ ๊ฒ€ํ†  ๋ฐ ๊ฐœ์ •์ด๋ ฅ","์ •์ฑ…ยท์ง€์นจ ์‹ ๊ตฌ๋Œ€์กฐํ‘œ","๋ฒ• ๊ฐœ์ •์‚ฌํ•ญ ๋‚ด๋ถ€๊ณต์œ  ์ž๋ฃŒ","๊ฐœ์ธ์ •๋ณด ์†ํ•ด๋ฐฐ์ƒ ์ฑ…์ž„๋ณด์žฅ ์ž…์ฆ ์ž๋ฃŒ(์‚ฌ์ด๋ฒ„๋ณดํ—˜ ์•ฝ์ •์„œ ๋“ฑ)","์ •๋ณด๋ณดํ˜ธ ๊ณต์‹œ ๋‚ด์—ญ"],"AuditChecklist": ["์กฐ์ง์ด ์ค€์ˆ˜ํ•˜์—ฌ์•ผ ํ•˜๋Š” ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์„ ํŒŒ์•…ํ•˜์—ฌ ์ตœ์‹ ์„ฑ์„ ์œ ์ง€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์˜ ์ค€์ˆ˜ ์—ฌ๋ถ€๋ฅผ ์—ฐ 1ํšŒ ์ด์ƒ ์ •๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ•์ด ์ตœ๊ทผ ๊ฐœ์ •๋˜์—ˆ์œผ๋‚˜ ๊ฐœ์ •์‚ฌํ•ญ์ด ์กฐ์ง์— ๋ฏธ์น˜๋Š” ์˜ํ–ฅ์„ ๊ฒ€ํ† ํ•˜์ง€ ์•Š์•˜์œผ๋ฉฐ, ์ •์ฑ…์„œยท์‹œํ–‰๋ฌธ์„œ ๋ฐ ๋ฒ•์ ์ค€๊ฑฐ์„ฑ ์ฒดํฌ๋ฆฌ์ŠคํŠธ ๋“ฑ์—๋„ ํ•ด๋‹น ๋‚ด์šฉ์„ ๋ฐ˜์˜ํ•˜์ง€ ์•Š์•„ ์ •์ฑ…์„œยท์‹œํ–‰๋ฌธ์„œ ๋ฐ ๋ฒ•์ ์ค€๊ฑฐ์„ฑ ์ฒดํฌ๋ฆฌ์ŠคํŠธ ๋“ฑ์˜ ๋‚ด์šฉ์ด ๋ฒ•๋ น ๋‚ด์šฉ๊ณผ ์ผ์น˜ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์กฐ์ง์—์„œ ์ค€์ˆ˜ํ•˜์—ฌ์•ผ ํ•  ๋ฒ•๋ฅ ์ด ๊ฐœ์ •๋˜์—ˆ์œผ๋‚˜, ํ•ด๋‹น ๋ฒ•๋ฅ  ์ค€๊ฑฐ์„ฑ ๊ฒ€ํ† ๋ฅผ ์žฅ๊ธฐ๊ฐ„ ์ˆ˜ํ–‰ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ฒ•์  ์ค€๊ฑฐ์„ฑ ์ค€์ˆ˜ ์—ฌ๋ถ€์— ๋Œ€ํ•œ ๊ฒ€ํ† ๊ฐ€ ์ ์ ˆํžˆ ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์•„ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ๋“ฑ ๋ฒ•๊ทœ ์œ„๋ฐ˜ ์‚ฌํ•ญ์ด ๋‹ค์ˆ˜ ๋ฐœ๊ฒฌ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ•์— ๋”ฐ๋ผ ๊ฐœ์ธ์ •๋ณด ์†ํ•ด๋ฐฐ์ƒ์ฑ…์ž„ ๋ณด์žฅ์ œ๋„ ์ ์šฉ ๋Œ€์ƒ์ด ๋˜์—ˆ์œผ๋‚˜, ์ด๋ฅผ ์ธ์ง€ํ•˜์ง€ ๋ชปํ•˜์—ฌ ๋ณดํ—˜ ๊ฐ€์ž…์ด๋‚˜ ์ค€๋น„๊ธˆ ์ ๋ฆฝ์„ ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ ๋˜๋Š” ๋ณดํ—˜ ๊ฐ€์ž…์„ ํ•˜์˜€์œผ๋‚˜ ์ด์šฉ์ž ์ˆ˜ ๋ฐ ๋งค์ถœ์•ก์— ๋”ฐ๋ฅธ ์ตœ์ €๊ฐ€์ž…๊ธˆ์•ก ๊ธฐ์ค€์„ ์ค€์ˆ˜ํ•˜์ง€ ๋ชปํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ์ •๋ณด๋ณดํ˜ธ ๊ณต์‹œ ์˜๋ฌด๋Œ€์ƒ ์‚ฌ์—…์ž์ด์ง€๋งŒ ๋ฒ•์— ์ •ํ•œ ์‹œ์  ๋‚ด์— ์ •๋ณด๋ณดํ˜ธ ๊ณต์‹œ๊ฐ€ ์‹œํ–‰๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 6 : ๋ชจ๋ฐ”์ผ์•ฑ์„ ํ†ตํ•ด ์œ„์น˜์ •๋ณด์‚ฌ์—…์ž๋กœ๋ถ€ํ„ฐ ์ด์šฉ์ž์˜ ๊ฐœ์ธ์œ„์น˜์ •๋ณด๋ฅผ ์ „์†ก๋ฐ›์•„ ์„œ๋น„์Šค์— ์ด์šฉํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์œ„์น˜๊ธฐ๋ฐ˜์„œ๋น„์Šค์‚ฌ์—… ์‹ ๊ณ ๋ฅผ ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 7 : ๊ตญ๋‚ด์— ์ฃผ์†Œ ๋˜๋Š” ์˜์—…์†Œ๊ฐ€ ์—†๋Š” ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์ž๋กœ์„œ ์ „๋…„๋„ ๋ง ๊ธฐ์ค€ ์ง์ „ 3๊ฐœ์›” ๊ฐ„ ๊ทธ ๊ฐœ์ธ์ •๋ณด๊ฐ€ ์ €์žฅยท๊ด€๋ฆฌ๋˜๊ณ  ์žˆ๋Š” ๊ตญ๋‚ด ์ •๋ณด์ฃผ์ฒด์˜ ์ˆ˜๊ฐ€ ์ผ์ผํ‰๊ท  100๋งŒ๋ช… ์ด์ƒ์ธ ์ž์— ํ•ด๋‹น๋˜์–ด ๊ตญ๋‚ด๋Œ€๋ฆฌ์ธ ์ง€์ •์˜๋ฌด์— ํ•ด๋‹น๋จ์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ , ๊ตญ๋‚ด๋Œ€๋ฆฌ์ธ์„ ๋ฌธ์„œ๋กœ ์ง€์ •ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€)"]}],"description": "์กฐ์ง์ด ์ค€์ˆ˜ํ•˜์—ฌ์•ผ ํ•  ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ํŒŒ์•…ํ•˜์—ฌ ๊ทœ์ •์— ๋ฐ˜์˜ํ•˜๊ณ , ์ค€์ˆ˜ ์—ฌ๋ถ€๋ฅผ ์ง€์†์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.4.2": {"name": "๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.4.2 ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€","Subdomain": "1.4. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ ๋ฐ ๊ฐœ์„ ","AuditEvidence": ["๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ ๊ณ„ํš์„œ(๋‚ด๋ถ€์ ๊ฒ€ ๊ณ„ํš์„œ, ๋‚ด๋ถ€๊ฐ์‚ฌ ๊ณ„ํš์„œ)","๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ ๊ฒฐ๊ณผ๋ณด๊ณ ์„œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์œ„์›ํšŒ ํšŒ์˜๋ก"],"AuditChecklist": ["๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ ๋ฐ ์ˆ˜๋ฆฝ๋œ ์ •์ฑ…์— ๋”ฐ๋ผ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„๊ฐ€ ํšจ๊ณผ์ ์œผ๋กœ ์šด์˜๋˜๋Š”์ง€๋ฅผ ์ ๊ฒ€ํ•˜๊ธฐ ์œ„ํ•œ ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€๊ธฐ์ค€, ๋ฒ”์œ„, ์ฃผ๊ธฐ, ์ ๊ฒ€์ธ๋ ฅ ์ž๊ฒฉ์š”๊ฑด ๋“ฑ์„ ํฌํ•จํ•œ ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ ๊ณ„ํš์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ ๊ณ„ํš์— ๋”ฐ๋ผ ๋…๋ฆฝ์„ฑ, ๊ฐ๊ด€์„ฑ ๋ฐ ์ „๋ฌธ์„ฑ์ด ํ™•๋ณด๋œ ์ธ๋ ฅ์„ ๊ตฌ์„ฑํ•˜์—ฌ ์—ฐ 1ํšŒ ์ด์ƒ ์ ๊ฒ€์„ ์ˆ˜ํ–‰ํ•˜๊ณ  ๋ฐœ๊ฒฌ๋œ ๋ฌธ์ œ์ ์„ ๊ฒฝ์˜์ง„์—๊ฒŒ ๋ณด๊ณ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ ์ธ๋ ฅ์— ์ ๊ฒ€ ๋Œ€์ƒ์œผ๋กœ ์‹๋ณ„๋œ ์ „์‚ฐํŒ€ ์ง์›์ด ํฌํ•จ๋˜์–ด ์ „์‚ฐํŒ€ ๊ด€๋ฆฌ ์˜์—ญ์— ๋Œ€ํ•œ ์ ๊ฒ€์— ๊ด€์—ฌํ•˜๊ณ  ์žˆ์–ด, ์ ๊ฒ€์˜ ๋…๋ฆฝ์„ฑ์ด ํ›ผ์†๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ธˆ๋…„๋„ ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€์„ ์‹ค์‹œํ•˜์˜€์œผ๋‚˜, ์ ๊ฒ€๋ฒ”์œ„๊ฐ€ ์ผ๋ถ€ ์˜์—ญ์— ๊ตญํ•œ๋˜์–ด ์žˆ์–ด ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„๋ฅผ ์ถฉ์กฑํ•˜์ง€ ๋ชปํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ํŒ€์ด ์œ„ํ—˜ํ‰๊ฐ€ ๋˜๋Š” ์ทจ์•ฝ์  ์ ๊ฒ€ ๋“ฑ ๊ด€๋ฆฌ์ฒด๊ณ„ ๊ตฌ์ถ• ๊ณผ์ •์— ์ฐธ์—ฌํ•œ ๋‚ด๋ถ€ ์ง์› ๋ฐ ์™ธ๋ถ€ ์ปจ์„คํ„ดํŠธ๋กœ๋งŒ ๊ตฌ์„ฑ๋˜์–ด, ์ ๊ฒ€์˜ ๋…๋ฆฝ์„ฑ์ด ํ™•๋ณด๋˜์—ˆ๋‹ค๊ณ  ๋ณผ ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€)"]}],"description": "๊ด€๋ฆฌ์ฒด๊ณ„๊ฐ€ ๋‚ด๋ถ€ ์ •์ฑ… ๋ฐ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์— ๋”ฐ๋ผ ํšจ๊ณผ์ ์œผ๋กœ ์šด์˜๋˜๊ณ  ์žˆ๋Š”์ง€ ๋…๋ฆฝ์„ฑ๊ณผ ์ „๋ฌธ์„ฑ์ด ํ™•๋ณด๋œ ์ธ๋ ฅ์„ ๊ตฌ์„ฑํ•˜์—ฌ ์—ฐ 1ํšŒ ์ด์ƒ ์ ๊ฒ€ํ•˜๊ณ , ๋ฐœ๊ฒฌ๋œ ๋ฌธ์ œ์ ์„ ๊ฒฝ์˜์ง„์—๊ฒŒ ๋ณด๊ณ ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"1.4.3": {"name": "๊ด€๋ฆฌ์ฒด๊ณ„ ๊ฐœ์„ ","checks": {},"status": "PASS","attributes": [{"Domain": "1. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ˆ˜๋ฆฝ ๋ฐ ์šด์˜","Section": "1.4.3 ๊ด€๋ฆฌ์ฒด๊ณ„ ๊ฐœ์„ ","Subdomain": "1.4. ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ ๋ฐ ๊ฐœ์„ ","AuditEvidence": ["๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ ๊ฒฐ๊ณผ๋ณด๊ณ ์„œ","๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ ์กฐ์น˜๊ณ„ํš์„œยท์ดํ–‰์กฐ์น˜๊ฒฐ๊ณผ์„œ","์žฌ๋ฐœ๋ฐฉ์ง€ ๋Œ€์ฑ…","ํšจ๊ณผ์„ฑ ์ธก์ • ์ง€ํ‘œ ๋ฐ ์ธก์ • ๊ฒฐ๊ณผ(๊ฒฝ์˜์ง„ ๋ณด๊ณ  ํฌํ•จ)"],"AuditChecklist": ["๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ ์ค€์ˆ˜๊ฒ€ํ†  ๋ฐ ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€์„ ํ†ตํ•˜์—ฌ ์‹๋ณ„๋œ ๊ด€๋ฆฌ์ฒด๊ณ„์ƒ์˜ ๋ฌธ์ œ์ ์— ๋Œ€ํ•œ ๊ทผ๋ณธ ์›์ธ์„ ๋ถ„์„ํ•˜์—ฌ ์žฌ๋ฐœ๋ฐฉ์ง€ ๋ฐ ๊ฐœ์„  ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์žฌ๋ฐœ๋ฐฉ์ง€ ๋ฐ ๊ฐœ์„  ๊ฒฐ๊ณผ์˜ ์ •ํ™•์„ฑ ๋ฐ ํšจ๊ณผ์„ฑ ์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•˜๊ธฐ ์œ„ํ•œ ๊ธฐ์ค€๊ณผ ์ ˆ์ฐจ๋ฅผ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋‚ด๋ถ€์ ๊ฒ€์„ ํ†ตํ•˜์—ฌ ๋ฐœ๊ฒฌ๋œ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ฆฌ์ฒด๊ณ„ ์šด์˜์ƒ ๋ฌธ์ œ์ ์ด ๋งค๋ฒˆ ๋™์ผํ•˜๊ฒŒ ๋ฐ˜๋ณต๋˜์–ด ๋ฐœ์ƒ๋˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€ ๊ทœ์ •์—๋Š” ๋‚ด๋ถ€์ ๊ฒ€ ์‹œ ๋ฐœ๊ฒฌ๋œ ๋ฌธ์ œ์ ์— ๋Œ€ํ•ด์„œ๋Š” ๊ทผ๋ณธ์›์ธ์— ๋Œ€ํ•œ ๋ถ„์„ ๋ฐ ์žฌ๋ฐœ๋ฐฉ์ง€ ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜, ์ตœ๊ทผ์— ์ˆ˜ํ–‰๋œ ๋‚ด๋ถ€์ ๊ฒ€์—์„œ๋Š” ๋ฐœ๊ฒฌ๋œ ๋ฌธ์ œ์ ์— ๋Œ€ํ•˜์—ฌ ๊ทผ๋ณธ์›์ธ ๋ถ„์„ ๋ฐ ์žฌ๋ฐœ๋ฐฉ์ง€ ๋Œ€์ฑ…์ด ์ˆ˜๋ฆฝ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ด€๋ฆฌ์ฒด๊ณ„์ƒ ๋ฌธ์ œ์ ์— ๋Œ€ํ•œ ์žฌ๋ฐœ๋ฐฉ์ง€ ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ํ•ต์‹ฌ์„ฑ๊ณผ์ง€ํ‘œ๋ฅผ ๋งˆ๋ จํ•˜์—ฌ ์ฃผ๊ธฐ์ ์œผ๋กœ ์ธก์ •ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๊ทธ ๊ฒฐ๊ณผ์— ๋Œ€ํ•˜์—ฌ ๊ฒฝ์˜์ง„ ๋ณด๊ณ ๊ฐ€ ์žฅ๊ธฐ๊ฐ„ ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€ ์‹œ ๋ฐœ๊ฒฌ๋œ ๋ฌธ์ œ์ ์— ๋Œ€ํ•˜์—ฌ ์กฐ์น˜๊ณ„ํš์„ ์ˆ˜๋ฆฝํ•˜์ง€ ์•Š์•˜๊ฑฐ๋‚˜ ์กฐ์น˜ ์™„๋ฃŒ ์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€)"]}],"description": "๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ ์ค€์ˆ˜๊ฒ€ํ†  ๋ฐ ๊ด€๋ฆฌ์ฒด๊ณ„ ์ ๊ฒ€์„ ํ†ตํ•˜์—ฌ ์‹๋ณ„๋œ ๊ด€๋ฆฌ์ฒด๊ณ„์ƒ์˜ ๋ฌธ์ œ์ ์— ๋Œ€ํ•œ ์›์ธ์„ ๋ถ„์„ํ•˜๊ณ  ์žฌ๋ฐœ๋ฐฉ์ง€ ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•˜๋ฉฐ, ๊ฒฝ์˜์ง„์€ ๊ฐœ์„  ๊ฒฐ๊ณผ์˜ ์ •ํ™•์„ฑ๊ณผ ํšจ๊ณผ์„ฑ ์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.1.1": {"name": "์ •์ฑ…์˜ ์œ ์ง€๊ด€๋ฆฌ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.1.1 ์ •์ฑ…์˜ ์œ ์ง€๊ด€๋ฆฌ","Subdomain": "2.1. ์ •์ฑ…, ์กฐ์ง, ์ž์‚ฐ ๊ด€๋ฆฌ","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ… ๋ฐ ์‹œํ–‰๋ฌธ์„œ(์ง€์นจ, ์ ˆ์ฐจ, ๊ฐ€์ด๋“œ, ๋งค๋‰ด์–ผ ๋“ฑ)","์ •์ฑ…ยท์ง€์นจ ์ •๊ธฐยท๋น„์ •๊ธฐ ํƒ€๋‹น์„ฑ ๊ฒ€ํ†  ๊ฒฐ๊ณผ","์ •์ฑ…ยท์ง€์นจ ๊ด€๋ จ ๋ถ€์„œ์™€์˜ ๊ฒ€ํ†  ํšŒ์˜๋ก, ํšŒ๋žŒ๋‚ด์šฉ","์ •์ฑ…ยท์ง€์นจ ์ œยท๊ฐœ์ • ์ด๋ ฅ"],"AuditChecklist": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์ •์ฑ… ๋ฐ ์‹œํ–‰๋ฌธ์„œ์— ๋Œ€ํ•œ ์ •๊ธฐ์ ์ธ ํƒ€๋‹น์„ฑ ๊ฒ€ํ†  ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์กฐ์ง์˜ ๋Œ€๋‚ด์™ธ ํ™˜๊ฒฝ์— ์ค‘๋Œ€ํ•œ ๋ณ€ํ™” ๋ฐœ์ƒ ์‹œ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์ •์ฑ… ๋ฐ ์‹œํ–‰๋ฌธ์„œ์— ๋ฏธ์น˜๋Š” ์˜ํ–ฅ์„ ๊ฒ€ํ† ํ•˜๊ณ  ํ•„์š”์‹œ ์ œยท๊ฐœ์ •ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์ •์ฑ… ๋ฐ ์‹œํ–‰๋ฌธ์„œ์˜ ์ œยท๊ฐœ์ • ์‹œ ์ดํ•ด ๊ด€๊ณ„์ž์˜ ๊ฒ€ํ† ๋ฅผ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์ •์ฑ… ๋ฐ ์‹œํ–‰๋ฌธ์„œ์˜ ์ œยท๊ฐœ์ • ๋‚ด์—ญ์— ๋Œ€ํ•˜์—ฌ ์ด๋ ฅ๊ด€๋ฆฌ๋ฅผ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ง€์นจ์„œ์™€ ์ ˆ์ฐจ์„œ ๊ฐ„ ํŒจ์Šค์›Œ๋“œ ์„ค์ • ๊ทœ์น™์— ์ผ๊ด€์„ฑ์ด ์—†๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ •๋ณด๋ณดํ˜ธ ํ™œ๋™(์ •๋ณด๋ณดํ˜ธ ๊ต์œก, ์•”ํ˜ธํ™”, ๋ฐฑ์—… ๋“ฑ)์˜ ๋Œ€์ƒ, ์ฃผ๊ธฐ, ์ˆ˜์ค€, ๋ฐฉ๋ฒ• ๋“ฑ์ด ๊ด€๋ จ ๋‚ด๋ถ€ ๊ทœ์ •, ์ง€์นจ, ์ ˆ์ฐจ์— ์„œ๋กœ ๋‹ค๋ฅด๊ฒŒ ๋ช…์‹œ๋˜์–ด ์ผ๊ด€์„ฑ์ด ์—†๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์— ๋Œ€ํ•œ ์ ‘๊ทผ ๋ฐ ์ž‘์—…์ด๋ ฅ์„ ํšจ๊ณผ์ ์œผ๋กœ ๊ธฐ๋ก ๋ฐ ๊ด€๋ฆฌํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘๊ทผํ†ต์ œ ์†”๋ฃจ์…˜์„ ์‹ ๊ทœ๋กœ ๋„์ž…ํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๋ณด์•ˆ์‹œ์Šคํ…œ ๋ณด์•ˆ ๊ด€๋ฆฌ์ง€์นจ ๋ฐ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ๋ณด์•ˆ ๊ด€๋ฆฌ์ง€์นจ ๋“ฑ ๋‚ด๋ถ€ ๋ณด์•ˆ์ง€์นจ์— ์ ‘๊ทผํ†ต์ œ, ์ž‘์—…์ด๋ ฅ, ๋กœ๊น…, ๊ฒ€ํ†  ๋“ฑ์— ๊ด€ํ•œ ์‚ฌํ•ญ์ด ๋ฐ˜์˜๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…์ด ๊ฐœ์ •๋˜์—ˆ์œผ๋‚˜ ์ •์ฑ… ์‹œํ–‰ ๊ธฐ์ค€์ผ์ด ๋ช…์‹œ๋˜์–ด ์žˆ์ง€ ์•Š์œผ๋ฉฐ, ๊ด€๋ จ ์ •์ฑ…์˜ ์ž‘์„ฑ์ผ, ์ž‘์„ฑ์ž ๋ฐ ์Šน์ธ์ž ๋“ฑ์ด ๋ˆ„๋ฝ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ ๊ด€๋ จ ๋ฒ•๋ น, ๊ณ ์‹œ ๋“ฑ์— ์ค‘๋Œ€ํ•œ ๋ณ€๊ฒฝ์‚ฌํ•ญ์ด ๋ฐœ์ƒํ•˜์˜€์œผ๋‚˜, ์ด๋Ÿฌํ•œ ๋ณ€๊ฒฝ์ด ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ… ๋ฐ ์‹œํ–‰๋ฌธ์„œ์— ๋ฏธ์น˜๋Š” ์˜ํ–ฅ์„ ๊ฒ€ํ† ํ•˜์ง€ ์•Š์•˜๊ฑฐ๋‚˜ ๋ณ€๊ฒฝ์‚ฌํ•ญ์„ ๋ฐ˜์˜ํ•˜์—ฌ ๊ฐœ์ •ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€)"]}],"description": "์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์ •์ฑ…๊ณผ ์‹œํ–‰๋ฌธ์„œ๋Š” ๋ฒ•๋ น ๋ฐ ๊ทœ์ œ, ์ƒ์œ„ ์กฐ์ง ๋ฐ ๊ด€๋ จ ๊ธฐ๊ด€ ์ •์ฑ…๊ณผ์˜ ์—ฐ๊ณ„์„ฑ, ์กฐ์ง์˜ ๋Œ€๋‚ด์™ธ ํ™˜๊ฒฝ๋ณ€ํ™” ๋“ฑ์— ๋”ฐ๋ผ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜์—ฌ ํ•„์š”ํ•œ ๊ฒฝ์šฐ ์ œยท๊ฐœ์ •ํ•˜๊ณ  ๊ทธ ๋‚ด์—ญ์„ ์ด๋ ฅ๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.1.2": {"name": "์กฐ์ง์˜ ์œ ์ง€๊ด€๋ฆฌ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.1.2 ์กฐ์ง์˜ ์œ ์ง€๊ด€๋ฆฌ","Subdomain": "2.1. ์ •์ฑ…, ์กฐ์ง, ์ž์‚ฐ ๊ด€๋ฆฌ","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์กฐ์ง๋„","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์กฐ์ง ์ง๋ฌด๊ธฐ์ˆ ์„œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์—…๋ฌด ๋ถ„์žฅํ‘œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ •์ฑ…ยท์ง€์นจ, ๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์˜์‚ฌ์†Œํ†ต ๊ด€๋ฆฌ๊ณ„ํš","์˜์‚ฌ์†Œํ†ต ์ˆ˜ํ–‰ ์ด๋ ฅ(์›”๊ฐ„๋ณด๊ณ , ์ฃผ๊ฐ„๋ณด๊ณ , ๋‚ด๋ถ€๊ณต์ง€ ๋“ฑ)","์˜์‚ฌ์†Œํ†ต ์ฑ„๋„(์ •๋ณด๋ณดํ˜ธํฌํ„ธ, ๊ฒŒ์‹œํŒ ๋“ฑ)"],"AuditChecklist": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์ฑ…์ž„์ž์™€ ๋‹ด๋‹น์ž์˜ ์—ญํ•  ๋ฐ ์ฑ…์ž„์„ ๋ช…ํ™•ํžˆ ์ •์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์ฑ…์ž„์ž์™€ ๋‹ด๋‹น์ž์˜ ํ™œ๋™์„ ํ‰๊ฐ€ํ•  ์ˆ˜ ์žˆ๋Š” ์ฒด๊ณ„๋ฅผ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์กฐ์ง ๋ฐ ์กฐ์ง์˜ ๊ตฌ์„ฑ์› ๊ฐ„ ์ƒํ˜ธ ์˜์‚ฌ์†Œํ†ตํ•  ์ˆ˜ ์žˆ๋Š” ์ฒด๊ณ„ ๋ฐ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋‚ด๋ถ€ ์ง€์นจ ๋ฐ ์ง๋ฌด๊ธฐ์ˆ ์„œ์— ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž, ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž ๋ฐ ๊ด€๋ จ ๋‹ด๋‹น์ž์˜ ์—ญํ• ๊ณผ ์ฑ…์ž„์„ ์ •์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์‹ค์ œ ์šด์˜ํ˜„ํ™ฉ๊ณผ ์ผ์น˜ํ•˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋ฐ ๊ด€๋ จ ๋‹ด๋‹น์ž์˜ ํ™œ๋™์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ํ‰๊ฐ€ํ•  ์ˆ˜ ์žˆ๋Š” ๋ชฉํ‘œ, ๊ธฐ์ค€, ์ง€ํ‘œ ๋“ฑ์˜ ์ฒด๊ณ„๊ฐ€ ๋งˆ๋ จ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋‚ด๋ถ€ ์ง€์นจ์—๋Š” ๋ถ€์„œ๋ณ„ ์ •๋ณด๋ณดํ˜ธ ๋‹ด๋‹น์ž๋Š” ์ •๋ณด๋ณดํ˜ธ์™€ ๊ด€๋ จ๋œ KPI๋ฅผ ์„ค์ •ํ•˜์—ฌ ์ธ์‚ฌํ‰๊ฐ€ ์‹œ ๋ฐ˜์˜ํ•˜๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜, ๋ถ€์„œ๋ณ„ ์ •๋ณด๋ณดํ˜ธ ๋‹ด๋‹น์ž์˜ KPI์— ์ •๋ณด๋ณดํ˜ธ์™€ ๊ด€๋ จ๋œ ์‚ฌํ•ญ์ด ์ „ํ˜€ ๋ฐ˜์˜๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž๊ฐ€ ์ง€์ •๋˜์–ด ์žˆ์œผ๋‚˜, ๊ด€๋ จ ๋ฒ•๋ น์—์„œ ์š”๊ตฌํ•˜๋Š” ์—ญํ•  ๋ฐ ์ฑ…์ž„์ด ๋‚ด๋ถ€ ์ง€์นจ์ด๋‚˜ ์ง๋ฌด๊ธฐ์ˆ ์„œ ๋“ฑ์— ๊ตฌ์ฒด์ ์œผ๋กœ ๋ช…์‹œ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด), ์ œ31์กฐ(๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž์˜ ์ง€์ •)","์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ45์กฐ์˜3(์ •๋ณด๋ณดํ˜ธ ์ตœ๊ณ ์ฑ…์ž„์ž์˜ ์ง€์ • ๋“ฑ)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€)"]}],"description": "์กฐ์ง์˜ ๊ฐ ๊ตฌ์„ฑ์›์—๊ฒŒ ์ •๋ณด๋ณดํ˜ธ์™€ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์—ญํ•  ๋ฐ ์ฑ…์ž„์„ ํ• ๋‹นํ•˜๊ณ , ๊ทธ ํ™œ๋™์„ ํ‰๊ฐ€ํ•  ์ˆ˜ ์žˆ๋Š” ์ฒด๊ณ„์™€ ์กฐ์ง ๋ฐ ์กฐ์ง์˜ ๊ตฌ์„ฑ์› ๊ฐ„ ์ƒํ˜ธ ์˜์‚ฌ์†Œํ†ตํ•  ์ˆ˜ ์žˆ๋Š” ์ฒด๊ณ„๋ฅผ ์ˆ˜๋ฆฝํ•˜์—ฌ ์šด์˜ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.1.3": {"name": "์ •๋ณด์ž์‚ฐ ๊ด€๋ฆฌ","checks": {"macie_is_enabled": "PASS","resourceexplorer2_indexes_found": "PASS","config_recorder_all_regions_enabled": null,"account_maintain_current_contact_details": null,"organizations_account_part_of_organizations": null,"organizations_tags_policies_enabled_and_attached": null,"account_security_contact_information_is_registered": null,"account_security_questions_are_registered_in_the_aws_account": null,"account_maintain_different_contact_details_to_security_billing_and_operations": null},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.1.3 ์ •๋ณด์ž์‚ฐ ๊ด€๋ฆฌ","Subdomain": "2.1. ์ •์ฑ…, ์กฐ์ง, ์ž์‚ฐ ๊ด€๋ฆฌ","AuditEvidence": ["์ •๋ณด์ž์‚ฐ ๋ชฉ๋ก(์ฑ…์ž„์ž, ๋‹ด๋‹น์ž ์ง€์ •)","์ •๋ณด์ž์‚ฐ ์ทจ๊ธ‰ ์ ˆ์ฐจ(๋ฌธ์„œ, ์ •๋ณด์‹œ์Šคํ…œ ๋“ฑ)","์ •๋ณด์ž์‚ฐ ๊ด€๋ฆฌ ์‹œ์Šคํ…œ ํ™”๋ฉด","์ •๋ณด์ž์‚ฐ ๋ณด์•ˆ๋“ฑ๊ธ‰ ํ‘œ์‹œ ๋‚ด์—ญ"],"AuditChecklist": ["์ •๋ณด์ž์‚ฐ์˜ ๋ณด์•ˆ๋“ฑ๊ธ‰์— ๋”ฐ๋ฅธ ์ทจ๊ธ‰์ ˆ์ฐจ(์ƒ์„ฑยท๋„์ž…, ์ €์žฅ, ์ด์šฉ, ํŒŒ๊ธฐ) ๋ฐ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ •์˜ํ•˜๊ณ  ์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์‹๋ณ„๋œ ์ •๋ณด์ž์‚ฐ์— ๋Œ€ํ•˜์—ฌ ์ฑ…์ž„์ž ๋ฐ ๊ด€๋ฆฌ์ž๋ฅผ ์ง€์ •ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋‚ด๋ถ€ ์ง€์นจ์— ๋”ฐ๋ผ ๋ฌธ์„œ์— ๋ณด์•ˆ๋“ฑ๊ธ‰์„ ํ‘œ๊ธฐํ•˜๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜, ์ด๋ฅผ ํ‘œ์‹œํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ •๋ณด์ž์‚ฐ๋ณ„ ๋‹ด๋‹น์ž ๋ฐ ์ฑ…์ž„์ž๋ฅผ ์‹๋ณ„ํ•˜์ง€ ์•Š์•˜๊ฑฐ๋‚˜, ์ž์‚ฐ๋ชฉ๋ก ํ˜„ํ–‰ํ™”๊ฐ€ ๋ฏธํกํ•˜์—ฌ ํ‡ด์ง, ์ „๋ณด ๋“ฑ ์ธ์‚ฌ์ด๋™์ด ๋ฐœ์ƒํ•˜์—ฌ ์ฃผ์š” ์ •๋ณด์ž์‚ฐ์˜ ๋‹ด๋‹น์ž ๋ฐ ์ฑ…์ž„์ž๊ฐ€ ๋ณ€๊ฒฝ๋˜์—ˆ์Œ์—๋„ ์ด๋ฅผ ์‹๋ณ„ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์‹๋ณ„๋œ ์ •๋ณด์ž์‚ฐ์— ๋Œ€ํ•œ ์ค‘์š”๋„ ํ‰๊ฐ€๋ฅผ ์‹ค์‹œํ•˜์—ฌ ๋ณด์•ˆ๋“ฑ๊ธ‰์„ ๋ถ€์—ฌํ•˜๊ณ  ์ •๋ณด ์ž์‚ฐ๋ชฉ๋ก์— ๊ธฐ๋กํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๋ณด์•ˆ๋“ฑ๊ธ‰์— ๋”ฐ๋ฅธ ์ทจ๊ธ‰์ ˆ์ฐจ๋ฅผ ์ •์˜ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์ •๋ณด์ž์‚ฐ์˜ ์šฉ๋„์™€ ์ค‘์š”๋„์— ๋”ฐ๋ฅธ ์ทจ๊ธ‰ ์ ˆ์ฐจ ๋ฐ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ , ์ž์‚ฐ๋ณ„ ์ฑ…์ž„์†Œ์žฌ๋ฅผ ๋ช…ํ™•ํžˆ ์ •์˜ํ•˜์—ฌ ๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 2,"total": 9,"manual": 0}},"2.2.1": {"name": "์ฃผ์š” ์ง๋ฌด์ž ์ง€์ • ๋ฐ ๊ด€๋ฆฌ","checks": {"iam_support_role_created": null,"organizations_delegated_administrators": null,"account_security_contact_information_is_registered": null},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.2.1 ์ฃผ์š” ์ง๋ฌด์ž ์ง€์ • ๋ฐ ๊ด€๋ฆฌ","Subdomain": "2.2. ์ธ์  ๋ณด์•ˆ","AuditEvidence": ["์ฃผ์š” ์ง๋ฌด ๊ธฐ์ค€","์ฃผ์š”์ง๋ฌด์ž ๋ชฉ๋ก","๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ๋ชฉ๋ก","์ค‘์š” ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ๊ณ„์ • ๋ฐ ๊ถŒํ•œ ๊ด€๋ฆฌ ๋Œ€์žฅ","์ฃผ์š” ์ง๋ฌด์ž์— ๋Œ€ํ•œ ๊ด€๋ฆฌ ํ˜„ํ™ฉ(๊ต์œก ๊ฒฐ๊ณผ, ๋ณด์•ˆ์„œ์•ฝ์„œ ๋“ฑ)"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์˜ ์ทจ๊ธ‰, ์ฃผ์š” ์‹œ์Šคํ…œ ์ ‘๊ทผ ๋“ฑ ์ฃผ์š” ์ง๋ฌด์˜ ๊ธฐ์ค€์„ ๋ช…ํ™•ํžˆ ์ •์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ฃผ์š” ์ง๋ฌด๋ฅผ ์ˆ˜ํ–‰ํ•˜๋Š” ์ž„์ง์› ๋ฐ ์™ธ๋ถ€์ž๋ฅผ ์ฃผ์š” ์ง๋ฌด์ž๋กœ ์ง€์ •ํ•˜๊ณ  ๊ทธ ๋ชฉ๋ก์„ ์ตœ์‹ ์œผ๋กœ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์—…๋ฌด์ƒ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ทจ๊ธ‰ํ•˜๋Š” ์ž๋ฅผ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž๋กœ ์ง€์ •ํ•˜๊ณ  ๋ชฉ๋ก์„ ์ตœ์‹ ์œผ๋กœ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์—…๋ฌด ํ•„์š”์„ฑ์— ๋”ฐ๋ผ ์ฃผ์š” ์ง๋ฌด์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ์ง€์ •์„ ์ตœ์†Œํ™”ํ•˜๋Š” ๋“ฑ ๊ด€๋ฆฌ๋ฐฉ์•ˆ์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ฃผ์š” ์ง๋ฌด์ž ๋ช…๋‹จ(๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ๋ช…๋‹จ, ๋น„๋ฐ€์ •๋ณด๊ด€๋ฆฌ์ž ๋ช…๋‹จ ๋“ฑ)์„ ์ž‘์„ฑํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๋Œ€๋Ÿ‰์˜ ๊ฐœ์ธ์ •๋ณด ๋“ฑ ์ค‘์š”์ •๋ณด๋ฅผ ์ทจ๊ธ‰ํ•˜๋Š” ์ผ๋ถ€ ์ž„์ง์›(DBA, DLP ๊ด€๋ฆฌ์ž ๋“ฑ)์„ ๋ช…๋‹จ์— ๋ˆ„๋ฝํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ฃผ์š” ์ง๋ฌด์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ๋ชฉ๋ก์„ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ์œผ๋‚˜, ํ‡ด์‚ฌํ•œ ์ž„์ง์›์ด ํฌํ•จ๋˜์–ด ์žˆ๊ณ  ์ตœ๊ทผ ์‹ ๊ทœ ์ž…์‚ฌํ•œ ์ธ๋ ฅ์ด ํฌํ•จ๋˜์–ด ์žˆ์ง€ ์•Š๋Š” ๋“ฑ ํ˜„ํ–‰ํ™” ๊ด€๋ฆฌ๊ฐ€ ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ถ€์„œ ๋‹จ์œ„๋กœ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ๊ถŒํ•œ์„ ์ผ๊ด„ ๋ถ€์—ฌํ•˜๊ณ  ์žˆ์–ด ์‹ค์ œ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ทจ๊ธ‰ํ•  ํ•„์š”๊ฐ€ ์—†๋Š” ์ธ์›๊นŒ์ง€ ๊ณผ๋‹คํ•˜๊ฒŒ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž๋กœ ์ง€์ •๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๋‚ด๋ถ€ ์ง€์นจ์—๋Š” ์ฃผ์š” ์ง๋ฌด์ž ๊ถŒํ•œ ๋ถ€์—ฌ ์‹œ์—๋Š” ๋ณด์•ˆํŒ€์˜ ์Šน์ธ์„ ๋ฐ›๊ณ  ์ฃผ์š” ์ง๋ฌด์— ๋”ฐ๋ฅธ ๋ณด์•ˆ์„œ์•ฝ์„œ๋ฅผ ์ž‘์„ฑํ•˜๋„๋ก ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๋ณด์•ˆํŒ€ ์Šน์ธ ๋ฐ ๋ณด์•ˆ์„œ์•ฝ์„œ ์ž‘์„ฑ ์—†์ด ๋“ฑ๋ก๋œ ์ฃผ์š” ์ง๋ฌด์ž๊ฐ€ ๋‹ค์ˆ˜ ์กด์žฌํ•˜๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ28์กฐ(๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž์— ๋Œ€ํ•œ ๊ฐ๋…), ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€)"]}],"description": "๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์˜ ์ทจ๊ธ‰์ด๋‚˜ ์ฃผ์š” ์‹œ์Šคํ…œ ์ ‘๊ทผ ๋“ฑ ์ฃผ์š” ์ง๋ฌด์˜ ๊ธฐ์ค€๊ณผ ๊ด€๋ฆฌ๋ฐฉ์•ˆ์„ ์ˆ˜๋ฆฝํ•˜๊ณ , ์ฃผ์š” ์ง๋ฌด์ž๋ฅผ ์ตœ์†Œํ•œ์œผ๋กœ ์ง€์ •ํ•˜์—ฌ ๊ทธ ๋ชฉ๋ก์„ ์ตœ์‹ ์œผ๋กœ ๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 3,"manual": 0}},"2.2.2": {"name": "์ง๋ฌด ๋ถ„๋ฆฌ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.2.2 ์ง๋ฌด ๋ถ„๋ฆฌ","Subdomain": "2.2. ์ธ์  ๋ณด์•ˆ","AuditEvidence": ["์ง๋ฌด ๋ถ„๋ฆฌ ๊ด€๋ จ ์ง€์นจ(์ธ์  ๋ณด์•ˆ ์ง€์นจ ๋“ฑ)","์ง๋ฌด๊ธฐ์ˆ ์„œ(์‹œ์Šคํ…œ ์šด์˜ยท๊ด€๋ฆฌ, ๊ฐœ๋ฐœยท์šด์˜ ๋“ฑ)","์ง๋ฌด ๋ฏธ๋ถ„๋ฆฌ ์‹œ ๋ณด์™„ํ†ต์ œ ํ˜„ํ™ฉ"],"AuditChecklist": ["๊ถŒํ•œ ์˜คยท๋‚จ์šฉ ๋“ฑ์œผ๋กœ ์ธํ•œ ์ž ์žฌ์ ์ธ ํ”ผํ•ด ์˜ˆ๋ฐฉ์„ ์œ„ํ•˜์—ฌ ์ง๋ฌด ๋ถ„๋ฆฌ ๊ธฐ์ค€์„ ์ˆ˜๋ฆฝํ•˜์—ฌ ์ ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ง๋ฌด ๋ถ„๋ฆฌ๊ฐ€ ์–ด๋ ค์šด ๊ฒฝ์šฐ ์ง๋ฌด์ž ๊ฐ„ ์ƒํ˜ธ ๊ฒ€ํ† , ์ƒ์œ„๊ด€๋ฆฌ์ž ์ •๊ธฐ ๋ชจ๋‹ˆํ„ฐ๋ง ๋ฐ ๋ณ€๊ฒฝ์‚ฌํ•ญ ์Šน์ธ, ์ฑ…์ž„์ถ”์ ์„ฑ ํ™•๋ณด ๋ฐฉ์•ˆ ๋“ฑ์˜ ๋ณด์™„ํ†ต์ œ๋ฅผ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์กฐ์ง์˜ ๊ทœ๋ชจ์™€ ์ธ์›์ด ๋‹ด๋‹น์ž๋ณ„ ์ง๋ฌด ๋ถ„๋ฆฌ๊ฐ€ ์ถฉ๋ถ„ํžˆ ๊ฐ€๋Šฅํ•œ ์กฐ์ง์ž„์—๋„ ์—…๋ฌด ํŽธ์˜์„ฑ๋งŒ์„ ์‚ฌ์œ ๋กœ ๋‚ด๋ถ€ ๊ทœ์ •์œผ๋กœ ์ •ํ•œ ์ง๋ฌด ๋ถ„๋ฆฌ ๊ธฐ์ค€์„ ์ค€์ˆ˜ํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์กฐ์ง์˜ ํŠน์„ฑ์ƒ ๊ฒฝ์˜์ง„์˜ ์Šน์ธ์„ ๋ฐ›์€ ํ›„ ๊ฐœ๋ฐœ๊ณผ ์šด์˜ ์ง๋ฌด๋ฅผ ๋ณ‘ํ–‰ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ง๋ฌด์ž ๊ฐ„ ์ƒํ˜ธ ๊ฒ€ํ† , ์ƒ์œ„๊ด€๋ฆฌ์ž์˜ ์ฃผ๊ธฐ์ ์ธ ์ง๋ฌด์ˆ˜ํ–‰ ๋ชจ๋‹ˆํ„ฐ๋ง ๋ฐ ๋ณ€๊ฒฝ ์‚ฌํ•ญ ๊ฒ€ํ† ยท์Šน์ธ, ์ง๋ฌด์ž์˜ ์ฑ…์ž„์ถ”์ ์„ฑ ํ™•๋ณด ๋“ฑ์˜ ๋ณด์™„ํ†ต์ œ ์ ˆ์ฐจ๊ฐ€ ๋งˆ๋ จ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "๊ถŒํ•œ ์˜คยท๋‚จ์šฉ ๋“ฑ์œผ๋กœ ์ธํ•œ ์ž ์žฌ์ ์ธ ํ”ผํ•ด ์˜ˆ๋ฐฉ์„ ์œ„ํ•˜์—ฌ ์ง๋ฌด ๋ถ„๋ฆฌ ๊ธฐ์ค€์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์ ์šฉํ•˜์—ฌ์•ผ ํ•œ๋‹ค. ๋‹ค๋งŒ ๋ถˆ๊ฐ€ํ”ผํ•˜๊ฒŒ ์ง๋ฌด ๋ถ„๋ฆฌ๊ฐ€ ์–ด๋ ค์šด ๊ฒฝ์šฐ ๋ณ„๋„์˜ ๋ณด์™„๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜์—ฌ ์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.2.3": {"name": "๋ณด์•ˆ ์„œ์•ฝ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.2.3 ๋ณด์•ˆ ์„œ์•ฝ","Subdomain": "2.2. ์ธ์  ๋ณด์•ˆ","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์„œ์•ฝ์„œ(์ž„์ง์›, ์™ธ๋ถ€์ธ๋ ฅ)","๋น„๋ฐ€์œ ์ง€์„œ์•ฝ์„œ(ํ‡ด์ง์ž)"],"AuditChecklist": ["์‹ ๊ทœ ์ธ๋ ฅ ์ฑ„์šฉ ์‹œ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ฑ…์ž„์ด ๋ช…์‹œ๋œ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์„œ์•ฝ์„œ๋ฅผ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?","์ž„์‹œ์ง์›, ์™ธ์ฃผ์šฉ์—ญ์ง์› ๋“ฑ ์™ธ๋ถ€์ž์—๊ฒŒ ์ •๋ณด์ž์‚ฐ์— ๋Œ€ํ•œ ์ ‘๊ทผ๊ถŒํ•œ์„ ๋ถ€์—ฌํ•  ๊ฒฝ์šฐ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ์— ๋Œ€ํ•œ ์ฑ…์ž„, ๋น„๋ฐ€์œ ์ง€ ์˜๋ฌด ๋“ฑ์ด ๋ช…์‹œ๋œ ์„œ์•ฝ์„œ๋ฅผ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?","์ž„์ง์› ํ‡ด์ง ์‹œ ๋ณ„๋„์˜ ๋น„๋ฐ€์œ ์ง€์— ๊ด€๋ จํ•œ ์„œ์•ฝ์„œ๋ฅผ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ, ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๋น„๋ฐ€์œ ์ง€ ์„œ์•ฝ์„œ๋Š” ์•ˆ์ „ํ•˜๊ฒŒ ๋ณด๊ด€ํ•˜๊ณ  ํ•„์š”์‹œ ์‰ฝ๊ฒŒ ์ฐพ์•„๋ณผ ์ˆ˜ ์žˆ๋„๋ก ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์‹ ๊ทœ ์ž…์‚ฌ์ž์— ๋Œ€ํ•ด์„œ๋Š” ์ž…์‚ฌ ์ ˆ์ฐจ์ƒ์— ๋ณด์•ˆ์„œ์•ฝ์„œ๋ฅผ ๋ฐ›๋„๋ก ๊ทœ์ •ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ตœ๊ทผ์— ์ž…์‚ฌํ•œ ์ผ๋ถ€ ์ง์›์˜ ๋ณด์•ˆ์„œ์•ฝ์„œ ์ž‘์„ฑ์ด ๋ˆ„๋ฝ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ž„์ง์›์— ๋Œ€ํ•ด์„œ๋Š” ๋ณด์•ˆ์„œ์•ฝ์„œ๋ฅผ ๋ฐ›๊ณ  ์žˆ์œผ๋‚˜, ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์— ์ง์ ‘ ์ ‘์†์ด ๊ฐ€๋Šฅํ•œ ์™ธ์ฃผ ์ธ๋ ฅ์— ๋Œ€ํ•ด์„œ๋Š” ๋ณด์•ˆ์„œ์•ฝ์„œ๋ฅผ ๋ฐ›์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ œ์ถœ๋œ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์„œ์•ฝ์„œ๋ฅผ ๋ชจ์•„ ๋†“์€ ๋ฌธ์„œ์ฒ ์ด ๋น„์ธ๊ฐ€์ž๊ฐ€ ์ ‘๊ทผ ๊ฐ€๋Šฅํ•œ ์ƒํƒœ๋กœ ์‚ฌ๋ฌด์‹ค ์ฑ…์ƒ์— ๋ฐฉ์น˜๋˜์–ด ์žˆ๋Š” ๋“ฑ ๊ด€๋ฆฌ๊ฐ€ ๋ฏธํกํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž์— ๋Œ€ํ•˜์—ฌ ๋ณด์•ˆ์„œ์•ฝ์„œ๋งŒ ๋ฐ›๊ณ  ์žˆ์œผ๋‚˜, ๋ณด์•ˆ์„œ์•ฝ์„œ ๋‚ด์— ๋น„๋ฐ€์œ ์ง€์— ๋Œ€ํ•œ ๋‚ด์šฉ๋งŒ ์žˆ๊ณ  ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ์— ๊ด€ํ•œ ์ฑ…์ž„ ๋ฐ ๋‚ด์šฉ์ด ํฌํ•จ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์ •๋ณด์ž์‚ฐ์„ ์ทจ๊ธ‰ํ•˜๊ฑฐ๋‚˜ ์ ‘๊ทผ๊ถŒํ•œ์ด ๋ถ€์—ฌ๋œ ์ž„์ง์›ยท์ž„์‹œ์ง์›ยท์™ธ๋ถ€์ž ๋“ฑ์ด ๋‚ด๋ถ€ ์ •์ฑ… ๋ฐ","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.2.4": {"name": "์ธ์‹์ œ๊ณ  ๋ฐ ๊ต์œกํ›ˆ๋ จ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.2.4 ์ธ์‹์ œ๊ณ  ๋ฐ ๊ต์œกํ›ˆ๋ จ","Subdomain": "2.2. ์ธ์  ๋ณด์•ˆ","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ต์œก ๊ณ„ํš์„œ","๊ต์œก ๊ฒฐ๊ณผ๋ณด๊ณ ์„œ","๊ณตํ†ต, ์ง๋ฌด๋ณ„ ๊ต์œก์ž๋ฃŒ","๊ต์œก์ฐธ์„์ž ๋ชฉ๋ก"],"AuditChecklist": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ต์œก์˜ ์‹œ๊ธฐ, ๊ธฐ๊ฐ„, ๋Œ€์ƒ, ๋‚ด์šฉ, ๋ฐฉ๋ฒ• ๋“ฑ์˜ ๋‚ด์šฉ์ด ํฌํ•จ๋œ ์—ฐ๊ฐ„ ๊ต์œก ๊ณ„ํš์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ๊ฒฝ์˜์ง„์˜ ์Šน์ธ์„ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?","๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„ ๋‚ด ๋ชจ๋“  ์ž„์ง์›๊ณผ ์™ธ๋ถ€์ž๋ฅผ ๋Œ€์ƒ์œผ๋กœ ์—ฐ๊ฐ„ ๊ต์œก ๊ณ„ํš์— ๋”ฐ๋ผ ์—ฐ 1ํšŒ ์ด์ƒ ์ •๊ธฐ์ ์œผ๋กœ ๊ต์œก์„ ์ˆ˜ํ–‰ํ•˜๊ณ , ๊ด€๋ จ ๋ฒ•๊ทœ ๋ฐ ๊ทœ์ •์˜ ์ค‘๋Œ€ํ•œ ๋ณ€๊ฒฝ ์‹œ ์ด์— ๋Œ€ํ•œ ์ถ”๊ฐ€๊ต์œก์„ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ž„์ง์› ์ฑ„์šฉ ๋ฐ ์™ธ๋ถ€์ž ์‹ ๊ทœ ๊ณ„์•ฝ ์‹œ ์—…๋ฌด ์‹œ์ž‘ ์ „์— ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ต์œก์„ ์‹œํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","IT ๋ฐ ์ •๋ณด๋ณดํ˜ธ, ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์กฐ์ง ๋‚ด ์ž„์ง์›์€ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ์™€ ๊ด€๋ จํ•˜์—ฌ ์ง๋ฌด๋ณ„ ์ „๋ฌธ์„ฑ ์ œ๊ณ ๋ฅผ ์œ„ํ•œ ๋ณ„๋„์˜ ๊ต์œก์„ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?","๊ต์œก์‹œํ–‰์— ๋Œ€ํ•œ ๊ธฐ๋ก์„ ๋‚จ๊ธฐ๊ณ  ๊ต์œก ํšจ๊ณผ์™€ ์ ์ •์„ฑ์„ ํ‰๊ฐ€ํ•˜์—ฌ ๋‹ค์Œ ๊ต์œก ๊ณ„ํš์— ๋ฐ˜์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ „๋…„๋„์—๋Š” ์—ฐ๊ฐ„ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ต์œก ๊ณ„ํš์„ ์ˆ˜๋ฆฝํ•˜์—ฌ ์ดํ–‰ํ•˜์˜€์œผ๋‚˜, ๋‹นํ•ด ์—ฐ๋„์— ํƒ€๋‹นํ•œ ์‚ฌ์œ  ์—†์ด ์—ฐ๊ฐ„ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ต์œก ๊ณ„ํš์„ ์ˆ˜๋ฆฝํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์—ฐ๊ฐ„ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ต์œก ๊ณ„ํš์— ๊ต์œก ์ฃผ๊ธฐ์™€ ๋Œ€์ƒ์€ ๋ช…์‹œํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์‹œํ–‰ ์ผ์ •, ๋‚ด์šฉ ๋ฐ ๋ฐฉ๋ฒ• ๋“ฑ์˜ ๋‚ด์šฉ์ด ํฌํ•จ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์—ฐ๊ฐ„ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ต์œก ๊ณ„ํš์— ์ „ ์ง์›์„ ๋Œ€์ƒ์œผ๋กœ ํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ธ์‹ ๊ต์œก์€ ์ผ์ •์‹œ๊ฐ„ ๊ณ„ํš๋˜์–ด ์žˆ์œผ๋‚˜, ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด๋‹ด๋‹น์ž ๋“ฑ ์ง๋ฌด๋ณ„๋กœ ํ•„์š”ํ•œ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ๊ต์œก ๊ณ„ํš์ด ํฌํ•จ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ต์œก ๊ณ„ํš์„œ ๋ฐ ๊ฒฐ๊ณผ ๋ณด๊ณ ์„œ๋ฅผ ํ™•์ธํ•œ ๊ฒฐ๊ณผ, ์ธ์ฆ๋ฒ”์œ„ ๋‚ด์˜ ์ •๋ณด์ž์‚ฐ ๋ฐ ์„ค๋น„์— ์ ‘๊ทผํ•˜๋Š” ์™ธ์ฃผ์šฉ์—ญ์—…์ฒด ์ง์›(์ „์‚ฐ์‹ค ์ถœ์ž… ์ฒญ์†Œ์›, ๊ฒฝ๋น„์›, ์™ธ์ฃผ๊ฐœ๋ฐœ์ž ๋“ฑ)์„ ๊ต์œก ๋Œ€์ƒ์—์„œ ๋ˆ„๋ฝํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ๋‹นํ•ด ์—ฐ๋„ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ต์œก์„ ์‹ค์‹œํ•˜์˜€์œผ๋‚˜, ๊ต์œก์‹œํ–‰ ๋ฐ ํ‰๊ฐ€์— ๊ด€ํ•œ ๊ธฐ๋ก(๊ต์œก ์ž๋ฃŒ, ์ถœ์„๋ถ€, ํ‰๊ฐ€ ์„ค๋ฌธ์ง€, ๊ฒฐ๊ณผ๋ณด๊ณ ์„œ ๋“ฑ) ์ผ๋ถ€๋ฅผ ๋‚จ๊ธฐ์ง€ ์•Š๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 6 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ต์œก ๋ฏธ์ด์ˆ˜์ž๋ฅผ ํŒŒ์•…ํ•˜์ง€ ์•Š๊ณ  ์žˆ๊ฑฐ๋‚˜, ํ•ด๋‹น ๋ฏธ์ด์ˆ˜์ž์— ๋Œ€ํ•œ ์ถ”๊ฐ€๊ต์œก ๋ฐฉ๋ฒ•(์ „๋‹ฌ๊ต์œก, ์ถ”๊ฐ€๊ต์œก, ์˜จ๋ผ์ธ๊ต์œก ๋“ฑ)์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ26์กฐ(์—…๋ฌด์œ„ํƒ์— ๋”ฐ๋ฅธ ๊ฐœ์ธ์ •๋ณด์˜ ์ฒ˜๋ฆฌ ์ œํ•œ), ์ œ28์กฐ(๊ฐœ์ธ์ •๋ณด ์ทจ๊ธ‰์ž์— ๋Œ€ํ•œ ๊ฐ๋…), ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€)"]}],"description": "์ž„์ง์› ๋ฐ ๊ด€๋ จ ์™ธ๋ถ€์ž๊ฐ€ ์กฐ์ง์˜ ๊ด€๋ฆฌ์ฒด๊ณ„์™€ ์ •์ฑ…์„ ์ดํ•ดํ•˜๊ณ  ์ง๋ฌด๋ณ„ ์ „๋ฌธ์„ฑ์„ ํ™•๋ณดํ•  ์ˆ˜ ์žˆ๋„๋ก ์—ฐ๊ฐ„ ์ธ์‹์ œ๊ณ  ํ™œ๋™ ๋ฐ ๊ต์œกํ›ˆ๋ จ ๊ณ„ํš์„ ์ˆ˜๋ฆฝยท์šด์˜ํ•˜๊ณ , ๊ทธ ๊ฒฐ๊ณผ์— ๋”ฐ๋ฅธ ํšจ๊ณผ์„ฑ์„ ํ‰๊ฐ€ํ•˜์—ฌ ๋‹ค์Œ ๊ณ„ํš์— ๋ฐ˜์˜ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.2.5": {"name": "ํ‡ด์ง ๋ฐ ์ง๋ฌด๋ณ€๊ฒฝ ๊ด€๋ฆฌ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.2.5 ํ‡ด์ง ๋ฐ ์ง๋ฌด๋ณ€๊ฒฝ ๊ด€๋ฆฌ","Subdomain": "2.2. ์ธ์  ๋ณด์•ˆ","AuditEvidence": ["ํ‡ด์ง ๋ฐ ์ง๋ฌด๋ณ€๊ฒฝ ์ ˆ์ฐจ์„œ","ํ‡ด์ง ์‹œ ์ž์‚ฐ(๊ณ„์ •) ๋ฐ˜๋‚ฉ๊ด€๋ฆฌ๋Œ€์žฅ","ํ‡ด์ง์ž ๋ณด์•ˆ์ ๊ฒ€ ์ฒดํฌ๋ฆฌ์ŠคํŠธ ๋ฐ ์ ๊ฒ€ ๋‚ด์—ญ"],"AuditChecklist": ["ํ‡ด์ง, ์ง๋ฌด๋ณ€๊ฒฝ, ๋ถ€์„œ์ด๋™, ํœด์ง ๋“ฑ์œผ๋กœ ์ธํ•œ ์ธ์‚ฌ๋ณ€๊ฒฝ ๋‚ด์šฉ์ด ์ธ์‚ฌ๋ถ€์„œ, ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๋ถ€์„œ, ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ์šด์˜๋ถ€์„œ ๊ฐ„ ๊ณต์œ ๋˜๊ณ  ์žˆ๋Š”๊ฐ€?","์กฐ์ง ๋‚ด ์ธ๋ ฅ(์ž„์ง์›, ์ž„์‹œ์ง์›, ์™ธ์ฃผ์šฉ์—ญ์ง์› ๋“ฑ)์˜ ํ‡ด์ง ๋˜๋Š” ์ง๋ฌด๋ณ€๊ฒฝ ์‹œ ์ง€์ฒด ์—†๋Š” ์ •๋ณด์ž์‚ฐ ๋ฐ˜๋‚ฉ, ์ ‘๊ทผ๊ถŒํ•œ ํšŒ์ˆ˜ยท์กฐ์ •, ๊ฒฐ๊ณผ ํ™•์ธ ๋“ฑ์˜ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ง๋ฌด ๋ณ€๋™์— ๋”ฐ๋ผ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž์—์„œ ์ œ์™ธ๋œ ์ธ๋ ฅ์˜ ๊ณ„์ •๊ณผ ๊ถŒํ•œ์ด ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์— ๊ทธ๋Œ€๋กœ ๋‚จ์•„ ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ตœ๊ทผ์— ํ‡ด์งํ•œ ์ฃผ์š”์ง๋ฌด์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž์— ๋Œ€ํ•˜์—ฌ ์ž์‚ฐ๋ฐ˜๋‚ฉ, ๊ถŒํ•œ ํšŒ์ˆ˜ ๋“ฑ์˜ ํ‡ด์ง์ ˆ์ฐจ ์ดํ–‰ ๊ธฐ๋ก์ด ํ™•์ธ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ž„์ง์› ํ‡ด์ง ์‹œ ์ž์‚ฐ๋ฐ˜๋‚ฉ ๊ด€๋ฆฌ๋Š” ์ž˜ ์ดํ–‰ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ธ์‚ฌ๊ทœ์ •์—์„œ ์ •ํ•œ ํ‡ด์ง์ž ๋ณด์•ˆ์ ๊ฒ€ ๋ฐ ํ‡ด์งํ™•์ธ์„œ๋ฅผ ์ž‘์„ฑํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ํ‡ด์ง ์‹œ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์˜ ์ ‘๊ทผ ๊ถŒํ•œ์€ ์ง€์ฒด ์—†์ด ํšŒ์ˆ˜๋˜์—ˆ์ง€๋งŒ, ์ถœ์ž…ํ†ต์ œ ์‹œ์Šคํ…œ ๋ฐ VPN ๋“ฑ ์ผ๋ถ€ ์‹œ์Šคํ…œ์˜ ์ ‘๊ทผ ๊ถŒํ•œ์ด ํšŒ์ˆ˜๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ5์กฐ(์ ‘๊ทผ ๊ถŒํ•œ์˜ ๊ด€๋ฆฌ)"]}],"description": "ํ‡ด์ง ๋ฐ ์ง๋ฌด๋ณ€๊ฒฝ ์‹œ ์ธ์‚ฌยท์ •๋ณด๋ณดํ˜ธยท๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธยทIT ๋“ฑ ๊ด€๋ จ ๋ถ€์„œ๋ณ„ ์ดํ–‰ํ•˜์—ฌ์•ผ ํ•  ์ž์‚ฐ๋ฐ˜๋‚ฉ, ๊ณ„์ • ๋ฐ ์ ‘๊ทผ๊ถŒํ•œ ํšŒ์ˆ˜ยท์กฐ์ •, ๊ฒฐ๊ณผํ™•์ธ ๋“ฑ์˜ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.2.6": {"name": "๋ณด์•ˆ ์œ„๋ฐ˜ ์‹œ ์กฐ์น˜","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.2.6 ๋ณด์•ˆ ์œ„๋ฐ˜ ์‹œ ์กฐ์น˜","Subdomain": "2.2. ์ธ์  ๋ณด์•ˆ","AuditEvidence": ["์ธ์‚ฌ ๊ทœ์ •(์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ๊ทœ์ • ์œ„๋ฐ˜์— ๋”ฐ๋ฅธ ์ฒ˜๋ฒŒ๊ทœ์ •)","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ง€์นจ ์œ„๋ฐ˜์ž ์ง•๊ณ„ ๋‚ด์—ญ","์‚ฌ๊ณ  ์‚ฌ๋ก€(์ „์‚ฌ ๊ณต์ง€, ๊ต์œก ๋‚ด์šฉ)"],"AuditChecklist": ["์ž„์ง์› ๋ฐ ๊ด€๋ จ ์™ธ๋ถ€์ž๊ฐ€ ๋ฒ•๋ น๊ณผ ๊ทœ์ œ ๋ฐ ๋‚ด๋ถ€์ •์ฑ…์— ๋”ฐ๋ฅธ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ฑ…์ž„๊ณผ ์˜๋ฌด๋ฅผ ์œ„๋ฐ˜ํ•œ ๊ฒฝ์šฐ์— ๋Œ€ํ•œ ์ฒ˜๋ฒŒ ๊ทœ์ •์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ ์œ„๋ฐ˜ ์‚ฌํ•ญ์ด ์ ๋ฐœ๋œ ๊ฒฝ์šฐ ๋‚ด๋ถ€ ์ ˆ์ฐจ์— ๋”ฐ๋ฅธ ์กฐ์น˜๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ทœ์ • ์œ„๋ฐ˜์ž์— ๋Œ€ํ•œ ์ฒ˜๋ฆฌ ๊ธฐ์ค€ ๋ฐ ์ ˆ์ฐจ๊ฐ€ ๋‚ด๋ถ€ ๊ทœ์ •์— ์ „ํ˜€ ํฌํ•จ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋ณด์•ˆ์‹œ์Šคํ…œ(DLP, ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘๊ทผ์ œ์–ด์‹œ์Šคํ…œ, ๋‚ด๋ถ€์ •๋ณด์œ ์ถœํ†ต์ œ์‹œ์Šคํ…œ ๋“ฑ)์„ ํ†ตํ•˜์—ฌ ์ •์ฑ… ์œ„๋ฐ˜์ด ํƒ์ง€๋œ ๊ด€๋ จ์ž์—๊ฒŒ ๊ฒฝ๊ณ  ๋ฉ”์‹œ์ง€๋ฅผ ์ „๋‹ฌํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ด์— ๋Œ€ํ•œ ์†Œ๋ช… ๋ฐ ์ถ”๊ฐ€ ์กฐ์‚ฌ, ์ง•๊ณ„ ์ฒ˜๋ถ„ ๋“ฑ ๋‚ด๋ถ€ ๊ทœ์ •์— ๋”ฐ๋ฅธ ํ›„์† ์กฐ์น˜๊ฐ€ ์ดํ–‰๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์ž„์ง์› ๋ฐ ๊ด€๋ จ ์™ธ๋ถ€์ž๊ฐ€ ๋ฒ•๋ น, ๊ทœ์ œ ๋ฐ ๋‚ด๋ถ€์ •์ฑ…์„ ์œ„๋ฐ˜ํ•œ ๊ฒฝ์šฐ ์ด์— ๋”ฐ๋ฅธ ์กฐ์น˜ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.3.1": {"name": "์™ธ๋ถ€์ž ํ˜„ํ™ฉ ๊ด€๋ฆฌ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.3.1 ์™ธ๋ถ€์ž ํ˜„ํ™ฉ ๊ด€๋ฆฌ","Subdomain": "2.3. ์™ธ๋ถ€์ž ๋ณด์•ˆ","AuditEvidence": ["์™ธ๋ถ€ ์œ„ํƒ ๋ฐ ์™ธ๋ถ€ ์‹œ์„คยท์„œ๋น„์Šค ํ˜„ํ™ฉ","์™ธ๋ถ€ ์œ„ํƒ ๊ณ„์•ฝ์„œ","์œ„ํ—˜๋ถ„์„ ๋ณด๊ณ ์„œ ๋ฐ ๋ณดํ˜ธ๋Œ€์ฑ…","์œ„ํƒ ๋ณด์•ˆ๊ด€๋ฆฌ ์ง€์นจ, ์ฒดํฌ๋ฆฌ์ŠคํŠธ ๋“ฑ"],"AuditChecklist": ["๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„ ๋‚ด์—์„œ ๋ฐœ์ƒํ•˜๊ณ  ์žˆ๋Š” ์—…๋ฌด ์œ„ํƒ ๋ฐ ์™ธ๋ถ€ ์‹œ์„คยท์„œ๋น„์Šค์˜ ์ด์šฉ ํ˜„ํ™ฉ์„ ์‹๋ณ„ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์—…๋ฌด ์œ„ํƒ ๋ฐ ์™ธ๋ถ€ ์‹œ์„คยท์„œ๋น„์Šค์˜ ์ด์šฉ์— ๋”ฐ๋ฅธ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ๊ณผ ์œ„ํ—˜์„ ํŒŒ์•…ํ•˜๊ณ  ์ ์ ˆํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋‚ด๋ถ€ ๊ทœ์ •์— ๋”ฐ๋ผ ์™ธ๋ถ€ ์œ„ํƒ ๋ฐ ์™ธ๋ถ€ ์‹œ์„คยท์„œ๋น„์Šค ํ˜„ํ™ฉ์„ ๋ชฉ๋ก์œผ๋กœ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๋ช‡ ๊ฐœ์›” ์ „์— ๋ณ€๊ฒฝ๋œ ์œ„ํƒ์—…์ฒด๊ฐ€ ๋ชฉ๋ก์— ๋ฐ˜์˜๋˜์–ด ์žˆ์ง€ ์•Š์€ ๋“ฑ ํ˜„ํ–‰ํ™” ๊ด€๋ฆฌ๊ฐ€ ๋ฏธํกํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ด€๋ฆฌ์ฒด๊ณ„ ๋ฒ”์œ„ ๋‚ด ์ผ๋ถ€ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์„ ์™ธ๋ถ€ ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค๋กœ ์ด์ „ํ•˜์˜€์œผ๋‚˜, ์ด์— ๋Œ€ํ•œ ์‹๋ณ„ ๋ฐ ์œ„ํ—˜ํ‰๊ฐ€๊ฐ€ ์ˆ˜ํ–‰๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ26์กฐ(์—…๋ฌด์œ„ํƒ์— ๋”ฐ๋ฅธ ๊ฐœ์ธ์ •๋ณด์˜ ์ฒ˜๋ฆฌ ์ œํ•œ)","์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ50์กฐ์˜3(์˜๋ฆฌ๋ชฉ์ ์˜ ๊ด‘๊ณ ์„ฑ ์ •๋ณด ์ „์†ก์˜ ์œ„ํƒ ๋“ฑ)"]}],"description": "์—…๋ฌด์˜ ์ผ๋ถ€(๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰, ์ •๋ณด๋ณดํ˜ธ, ์ •๋ณด์‹œ์Šคํ…œ ์šด์˜ ๋˜๋Š” ๊ฐœ๋ฐœ ๋“ฑ)๋ฅผ ์™ธ๋ถ€์— ์œ„ํƒํ•˜๊ฑฐ๋‚˜ ์™ธ๋ถ€์˜ ์‹œ์„ค ๋˜๋Š” ์„œ๋น„์Šค(์ง‘์ ์ •๋ณดํ†ต์‹ ์‹œ์„ค, ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค, ์• ํ”Œ๋ฆฌ์ผ€์ด์…˜ ์„œ๋น„์Šค ๋“ฑ)๋ฅผ ์ด์šฉํ•˜๋Š” ๊ฒฝ์šฐ ๊ทธ ํ˜„ํ™ฉ์„ ์‹๋ณ„ํ•˜๊ณ  ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ ๋ฐ ์™ธ๋ถ€ ์กฐ์งยท์„œ๋น„์Šค๋กœ๋ถ€ํ„ฐ ๋ฐœ์ƒ๋˜๋Š” ์œ„ํ—˜์„ ํŒŒ์•…ํ•˜์—ฌ ์ ์ ˆํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.3.2": {"name": "์™ธ๋ถ€์ž ๊ณ„์•ฝ ์‹œ ๋ณด์•ˆ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.3.2 ์™ธ๋ถ€์ž ๊ณ„์•ฝ ์‹œ ๋ณด์•ˆ","Subdomain": "2.3. ์™ธ๋ถ€์ž ๋ณด์•ˆ","AuditEvidence": ["์œ„ํƒ ๊ณ„์•ฝ์„œ","์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ํ˜‘์•ฝ์„œ(์•ฝ์ •์„œ, ๋ถ€์†ํ•ฉ์˜์„œ)","์œ„ํƒ ๊ด€๋ จ ๋‚ด๋ถ€ ์ง€์นจ","์œ„ํƒ์—…์ฒด ์„ ์ • ๊ด€๋ จ RFP(์ œ์•ˆ์š”์ฒญ์„œ), ํ‰๊ฐ€ํ‘œ"],"AuditChecklist": ["์ค‘์š”์ •๋ณด ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์™€ ๊ด€๋ จ๋œ ์™ธ๋ถ€ ์„œ๋น„์Šค ๋ฐ ์œ„ํƒ ์—…์ฒด๋ฅผ ์„ ์ •ํ•˜๋Š” ๊ฒฝ์šฐ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ ์—ญ๋Ÿ‰์„ ๊ณ ๋ คํ•˜๋„๋ก ์ ˆ์ฐจ๋ฅผ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์™ธ๋ถ€ ์„œ๋น„์Šค ์ด์šฉ ๋ฐ ์—…๋ฌด ์œ„ํƒ์— ๋”ฐ๋ฅธ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์š”๊ตฌ์‚ฌํ•ญ์„ ์‹๋ณ„ํ•˜๊ณ  ์ด๋ฅผ ๊ณ„์•ฝ์„œ ๋˜๋Š” ํ˜‘์ •์„œ์— ๋ช…์‹œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ๊ฐœ๋ฐœ์„ ์œ„ํƒํ•˜๋Š” ๊ฒฝ์šฐ ๊ฐœ๋ฐœ ์‹œ ์ค€์ˆ˜ํ•˜์—ฌ์•ผ ํ•  ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์š”๊ตฌ์‚ฌํ•ญ์„ ๊ณ„์•ฝ์„œ์— ๋ช…์‹œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : IT ์šด์˜, ๊ฐœ๋ฐœ ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด๋ฅผ ์œ„ํƒํ•˜๋Š” ์™ธ์ฃผ์šฉ์—ญ์—…์ฒด์— ๋Œ€ํ•œ ์œ„ํƒ๊ณ„์•ฝ์„œ๊ฐ€ ์กด์žฌํ•˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด๋ฅผ ์œ„ํƒํ•˜๋Š” ์™ธ๋ถ€์—…์ฒด์™€์˜ ์œ„ํƒ๊ณ„์•ฝ์„œ์ƒ์— ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ๋“ฑ ๋ฒ•๋ น์—์„œ ์š”๊ตฌํ•˜๋Š” ์ผ๋ถ€ ํ•ญ๋ชฉ(๊ด€๋ฆฌยท๊ฐ๋…์— ๊ด€ํ•œ ์‚ฌํ•ญ ๋“ฑ)์ด ํฌํ•จ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ธํ”„๋ผ ์šด์˜๊ณผ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด ์ผ๋ถ€๋ฅผ ์™ธ๋ถ€์—…์ฒด์— ์œ„ํƒํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๊ณ„์•ฝ์„œ ๋“ฑ์—๋Š” ์œ„ํƒ์—…๋ฌด์˜ ํŠน์„ฑ์— ๋”ฐ๋ฅธ ๋ณด์•ˆ ์š”๊ตฌ์‚ฌํ•ญ์„ ์‹๋ณ„ยท๋ฐ˜์˜ํ•˜์ง€ ์•Š๊ณ  ๋น„๋ฐ€์œ ์ง€ ๋ฐ ์†ํ•ด๋ฐฐ์ƒ์— ๊ด€ํ•œ ์ผ๋ฐ˜ ์‚ฌํ•ญ๋งŒ ๊ทœ์ •ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ26์กฐ(์—…๋ฌด์œ„ํƒ์— ๋”ฐ๋ฅธ ๊ฐœ์ธ์ •๋ณด์˜ ์ฒ˜๋ฆฌ ์ œํ•œ)"]}],"description": "์™ธ๋ถ€ ์„œ๋น„์Šค๋ฅผ ์ด์šฉํ•˜๊ฑฐ๋‚˜ ์™ธ๋ถ€์ž์—๊ฒŒ ์—…๋ฌด๋ฅผ ์œ„ํƒํ•˜๋Š” ๊ฒฝ์šฐ ์ด์— ๋”ฐ๋ฅธ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์š”๊ตฌ์‚ฌํ•ญ์„ ์‹๋ณ„ํ•˜๊ณ , ๊ด€๋ จ ๋‚ด์šฉ์„ ๊ณ„์•ฝ์„œ ๋˜๋Š” ํ˜‘์ •์„œ ๋“ฑ์— ๋ช…์‹œํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.3.3": {"name": "์™ธ๋ถ€์ž ๋ณด์•ˆ ์ดํ–‰ ๊ด€๋ฆฌ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.3.3 ์™ธ๋ถ€์ž ๋ณด์•ˆ ์ดํ–‰ ๊ด€๋ฆฌ","Subdomain": "2.3. ์™ธ๋ถ€์ž ๋ณด์•ˆ","AuditEvidence": ["์™ธ๋ถ€์ž ๋ฐ ์ˆ˜ํƒ์ž ๋ณด์•ˆ์ ๊ฒ€ ๊ฒฐ๊ณผ","์™ธ๋ถ€์ž ๋ฐ ์ˆ˜ํƒ์ž ๊ต์œก ๋‚ด์—ญ(๊ต์œก ๊ฒฐ๊ณผ, ์ฐธ์„์ž ๋ช…๋‹จ, ๊ต์œก๊ต์žฌ ๋“ฑ)","๊ฐœ์ธ์ •๋ณด ์œ„ํƒ ๊ณ„์•ฝ์„œ","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด ์žฌ์œ„ํƒ ์‹œ ์œ„ํƒ์ž ๋™์˜ ์ฆ๊ฑฐ์ž๋ฃŒ"],"AuditChecklist": ["์™ธ๋ถ€์ž๊ฐ€ ๊ณ„์•ฝ์„œ, ํ˜‘์ •์„œ, ๋‚ด๋ถ€์ •์ฑ…์— ๋ช…์‹œ๋œ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์š”๊ตฌ์‚ฌํ•ญ์„ ์ค€์ˆ˜ํ•˜๊ณ  ์žˆ๋Š”์ง€ ์ฃผ๊ธฐ์ ์œผ๋กœ ์ ๊ฒ€ ๋˜๋Š” ๊ฐ์‚ฌ๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์™ธ๋ถ€์ž์— ๋Œ€ํ•œ ์ ๊ฒ€ ๋˜๋Š” ๊ฐ์‚ฌ ์‹œ ๋ฐœ๊ฒฌ๋œ ๋ฌธ์ œ์ ์— ๋Œ€ํ•˜์—ฌ ๊ฐœ์„ ๊ณ„ํš์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด๋ฅผ ์œ„ํƒ๋ฐ›์€ ์ˆ˜ํƒ์ž๊ฐ€ ๊ด€๋ จ ์—…๋ฌด๋ฅผ ์ œ3์ž์—๊ฒŒ ์žฌ์œ„ํƒํ•˜๋Š” ๊ฒฝ์šฐ ์œ„ํƒ์ž์˜ ๋™์˜๋ฅผ ๋ฐ›๋„๋ก ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ํšŒ์‚ฌ ๋‚ด์— ์ƒ์ฃผํ•˜์—ฌ IT ๊ฐœ๋ฐœ ๋ฐ ์šด์˜ ์—…๋ฌด๋ฅผ ์ˆ˜ํ–‰ํ•˜๋Š” ์™ธ์ฃผ์—…์ฒด์— ๋Œ€ํ•ด์„œ๋Š” ์ •๊ธฐ์ ์œผ๋กœ ๋ณด์•ˆ์ ๊ฒ€์„ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด ์ˆ˜ํƒ์ž์— ๋Œ€ํ•˜์—ฌ ๋ณด์•ˆ๊ต์œก์„ ์‹ค์‹œํ•˜๋ผ๋Š” ๊ณต๋ฌธ์„ ๋ฐœ์†กํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๊ต์œก ์ˆ˜ํ–‰ ์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ˆ˜ํƒ์ž๊ฐ€ ์ž์ฒด์ ์œผ๋กœ ๋ณด์•ˆ์ ๊ฒ€์„ ์ˆ˜ํ–‰ํ•œ ํ›„ ๊ทธ ๊ฒฐ๊ณผ๋ฅผ ํ†ต์ง€ํ•˜๋„๋ก ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ˆ˜ํƒ์ž๊ฐ€ ๋ณด์•ˆ ์ ๊ฒ€์„ ์ถฉ์‹คํžˆ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š”์ง€ ์—ฌ๋ถ€์— ๋Œ€ํ•˜์—ฌ ํ™•์ธํ•˜๋Š” ์ ˆ์ฐจ๊ฐ€ ์กด์žฌํ•˜์ง€ ์•Š์•„ ๋ณด์•ˆ์ ๊ฒ€ ๊ฒฐ๊ณผ์˜ ์‹ ๋ขฐ์„ฑ์ด ๋งค์šฐ ๋–จ์–ด์ง€๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด ์ˆ˜ํƒ์ž ์ค‘ ์ผ๋ถ€๊ฐ€ ์œ„ํƒ์ž์˜ ๋™์˜ ์—†์ด ํ•ด๋‹น ์—…๋ฌด๋ฅผ ์ œ3์ž์—๊ฒŒ ์žฌ์œ„ํƒํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ์˜๋ฆฌ ๋ชฉ์ ์˜ ๊ด‘๊ณ ์„ฑ ์ •๋ณด์ „์†ก ์—…๋ฌด๋ฅผ ํƒ€์ธ์—๊ฒŒ ์œ„ํƒํ•˜๋ฉด์„œ ์ˆ˜ํƒ์ž์— ๋Œ€ํ•œ ๊ด€๋ฆฌยท๊ฐ๋…์„ ์ˆ˜ํ–‰ํ•˜์ง€ ์•Š๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ26์กฐ(์—…๋ฌด์œ„ํƒ์— ๋”ฐ๋ฅธ ๊ฐœ์ธ์ •๋ณด์˜ ์ฒ˜๋ฆฌ ์ œํ•œ)","์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ50์กฐ์˜3(์˜๋ฆฌ๋ชฉ์ ์˜ ๊ด‘๊ณ ์„ฑ ์ •๋ณด ์ „์†ก์˜ ์œ„ํƒ ๋“ฑ)"]}],"description": "๊ณ„์•ฝ์„œ, ํ˜‘์ •์„œ, ๋‚ด๋ถ€์ •์ฑ…์— ๋ช…์‹œ๋œ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์š”๊ตฌ์‚ฌํ•ญ์— ๋”ฐ๋ผ ์™ธ๋ถ€์ž์˜ ๋ณดํ˜ธ๋Œ€์ฑ… ์ดํ–‰ ์—ฌ๋ถ€๋ฅผ ์ฃผ๊ธฐ์ ์ธ ์ ๊ฒ€ ๋˜๋Š” ๊ฐ์‚ฌ ๋“ฑ ๊ด€๋ฆฌยท๊ฐ๋…ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.3.4": {"name": "์™ธ๋ถ€์ž ๊ณ„์•ฝ ๋ณ€๊ฒฝ ๋ฐ ๋งŒ๋ฃŒ ์‹œ ๋ณด์•ˆ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.3.4 ์™ธ๋ถ€์ž ๊ณ„์•ฝ ๋ณ€๊ฒฝ ๋ฐ ๋งŒ๋ฃŒ ์‹œ ๋ณด์•ˆ","Subdomain": "2.3. ์™ธ๋ถ€์ž ๋ณด์•ˆ","AuditEvidence": ["์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์„œ์•ฝ์„œ","๋น„๋ฐ€์œ ์ง€ ํ™•์•ฝ์„œ","์ •๋ณด ๋ฐ ๊ฐœ์ธ์ •๋ณด ํŒŒ๊ธฐ ํ™•์•ฝ์„œ","์™ธ๋ถ€์ž ๊ณ„์•ฝ ์ข…๋ฃŒ์™€ ๊ด€๋ จ๋œ ๋‚ด๋ถ€ ์ •์ฑ…, ์ง€์นจ"],"AuditChecklist": ["์™ธ๋ถ€์ž ๊ณ„์•ฝ๋งŒ๋ฃŒ, ์—…๋ฌด ์ข…๋ฃŒ, ๋‹ด๋‹น์ž ๋ณ€๊ฒฝ ์‹œ ๊ณต์‹์ ์ธ ์ ˆ์ฐจ์— ๋”ฐ๋ฅธ ์ •๋ณด์ž์‚ฐ ๋ฐ˜๋‚ฉ, ์ •๋ณด์‹œ์Šคํ…œ ์ ‘๊ทผ๊ณ„์ • ์‚ญ์ œ, ๋น„๋ฐ€์œ ์ง€ ํ™•์•ฝ์„œ ์ง•๊ตฌ ๋“ฑ์ด ์ด๋ฃจ์–ด์งˆ ์ˆ˜ ์žˆ๋„๋ก ๋ณด์•ˆ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์™ธ๋ถ€์ž ๊ณ„์•ฝ ๋งŒ๋ฃŒ ์‹œ ์œ„ํƒ ์—…๋ฌด์™€ ๊ด€๋ จํ•˜์—ฌ ์™ธ๋ถ€์ž๊ฐ€ ์ค‘์š”์ •๋ณด ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ฅผ ๋ณด์œ ํ•˜๊ณ  ์žˆ๋Š”์ง€ ํ™•์ธํ•˜๊ณ  ์ด๋ฅผ ํšŒ์ˆ˜ยทํŒŒ๊ธฐํ•  ์ˆ˜ ์žˆ๋„๋ก ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ผ๋ถ€ ์ •๋ณด์‹œ์Šคํ…œ์—์„œ ๊ณ„์•ฝ ๋งŒ๋ฃŒ๋œ ์™ธ๋ถ€์ž์˜ ๊ณ„์ • ๋ฐ ๊ถŒํ•œ์ด ์‚ญ์ œ๋˜์ง€ ์•Š๊ณ  ์กด์žฌํ•˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์™ธ์ฃผ์šฉ์—ญ์‚ฌ์—… ์ˆ˜ํ–‰๊ณผ์ •์—์„œ ์ผ๋ถ€ ์šฉ์—ญ์—…์ฒด ๋‹ด๋‹น์ž๊ฐ€ ๊ต์ฒด๋˜๊ฑฐ๋‚˜ ๊ณ„์•ฝ ๋งŒ๋ฃŒ๋กœ ํ‡ด์งํ•˜์˜€์œผ๋‚˜, ๊ด€๋ จ ์ธ๋ ฅ๋“ค์— ๋Œ€ํ•œ ํ‡ด์‚ฌ ์‹œ ๋ณด์•ˆ์„œ์•ฝ์„œ ๋“ฑ ๋‚ด๋ถ€ ๊ทœ์ •์— ๋”ฐ๋ฅธ ์กฐ์น˜๊ฐ€ ์ดํ–‰๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ์œ„ํƒํ•œ ์—…์ฒด์™€ ๊ณ„์•ฝ ์ข…๋ฃŒ ์ดํ›„ ๋ณด์œ ํ•˜๊ณ  ์žˆ๋Š” ๊ฐœ์ธ์ •๋ณด๋ฅผ ํŒŒ๊ธฐํ•˜์˜€๋Š”์ง€ ์—ฌ๋ถ€๋ฅผ ํ™•์ธยท์ ๊ฒ€ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ26์กฐ(์—…๋ฌด์œ„ํƒ์— ๋”ฐ๋ฅธ ๊ฐœ์ธ์ •๋ณด์˜ ์ฒ˜๋ฆฌ ์ œํ•œ)","์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ50์กฐ์˜3(์˜๋ฆฌ๋ชฉ์ ์˜ ๊ด‘๊ณ ์„ฑ ์ •๋ณด ์ „์†ก์˜ ์œ„ํƒ ๋“ฑ)"]}],"description": "์™ธ๋ถ€์ž ๊ณ„์•ฝ๋งŒ๋ฃŒ, ์—…๋ฌด์ข…๋ฃŒ, ๋‹ด๋‹น์ž ๋ณ€๊ฒฝ ์‹œ์—๋Š” ์ œ๊ณตํ•œ ์ •๋ณด์ž์‚ฐ ๋ฐ˜๋‚ฉ, ์ •๋ณด์‹œ์Šคํ…œ ์ ‘๊ทผ๊ณ„์ • ์‚ญ์ œ, ์ค‘์š”์ •๋ณด ํŒŒ๊ธฐ, ์—…๋ฌด ์ˆ˜ํ–‰ ์ค‘ ์ทจ๋“์ •๋ณด์˜ ๋น„๋ฐ€์œ ์ง€ ํ™•์•ฝ์„œ ์ง•๊ตฌ ๋“ฑ์˜ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.1": {"name": "๋ณดํ˜ธ๊ตฌ์—ญ ์ง€์ •","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.4.1 ๋ณดํ˜ธ๊ตฌ์—ญ ์ง€์ •","Subdomain": "2.4. ๋ฌผ๋ฆฌ ๋ณด์•ˆ","AuditEvidence": ["๋ฌผ๋ฆฌ์  ๋ณด์•ˆ ์ง€์นจ(๋ณดํ˜ธ๊ตฌ์—ญ ์ง€์ • ๊ธฐ์ค€)","๋ณดํ˜ธ๊ตฌ์—ญ ์ง€์ • ํ˜„ํ™ฉ","๋ณดํ˜ธ๊ตฌ์—ญ ํ‘œ์‹œ","๋ณดํ˜ธ๊ตฌ์—ญ๋ณ„ ๋ณดํ˜ธ๋Œ€์ฑ… ํ˜„ํ™ฉ"],"AuditChecklist": ["๋ฌผ๋ฆฌ์ ยทํ™˜๊ฒฝ์  ์œ„ํ˜‘์œผ๋กœ๋ถ€ํ„ฐ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด, ๋ฌธ์„œ, ์ €์žฅ๋งค์ฒด, ์ฃผ์š” ์„ค๋น„ ๋ฐ ์‹œ์Šคํ…œ ๋“ฑ์„ ๋ณดํ˜ธํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ํ†ต์ œ๊ตฌ์—ญ, ์ œํ•œ๊ตฌ์—ญ, ์ ‘๊ฒฌ๊ตฌ์—ญ ๋“ฑ ๋ฌผ๋ฆฌ์  ๋ณดํ˜ธ๊ตฌ์—ญ ์ง€์ •๊ธฐ์ค€์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ฌผ๋ฆฌ์  ๋ณดํ˜ธ๊ตฌ์—ญ ์ง€์ •๊ธฐ์ค€์— ๋”ฐ๋ผ ๋ณดํ˜ธ๊ตฌ์—ญ์„ ์ง€์ •ํ•˜๊ณ  ๊ตฌ์—ญ๋ณ„ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋‚ด๋ถ€ ๋ฌผ๋ฆฌ๋ณด์•ˆ ์ง€์นจ์—๋Š” ๊ฐœ์ธ์ •๋ณด ๋ณด๊ด€์‹œ์„ค ๋ฐ ์‹œ์Šคํ…œ ๊ตฌ์—ญ์„ ํ†ต์ œ๊ตฌ์—ญ์œผ๋กœ ์ง€์ •ํ•œ๋‹ค๊ณ  ๋ช…์‹œ๋˜์–ด ์žˆ์œผ๋‚˜, ๋ฉค๋ฒ„์‹ญ ๊ฐ€์ž…์‹ ์ฒญ ์„œ๋ฅ˜๊ฐ€ ๋ณด๊ด€๋˜์–ด ์žˆ๋Š” ๋ฌธ์„œ๊ณ  ๋“ฑ ์ผ๋ถ€ ๋Œ€์ƒ ๊ตฌ์—ญ์ด ํ†ต์ œ๊ตฌ์—ญ์—์„œ ๋ˆ„๋ฝ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€ ๋ฌผ๋ฆฌ๋ณด์•ˆ ์ง€์นจ์— ํ†ต์ œ๊ตฌ์—ญ์— ๋Œ€ํ•ด์„œ๋Š” ์ง€์ •๋œ ์–‘์‹์˜ ํ†ต์ œ๊ตฌ์—ญ ํ‘œ์ง€ํŒ์„ ์„ค์น˜ํ•˜๋„๋ก ๋ช…์‹œํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ผ๋ถ€ ํ†ต์ œ๊ตฌ์—ญ์— ํ‘œ์‹œํŒ์„ ์„ค์น˜ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ10์กฐ(๋ฌผ๋ฆฌ์  ์•ˆ์ „์กฐ์น˜)"]}],"description": "๋ฌผ๋ฆฌ์ ยทํ™˜๊ฒฝ์  ์œ„ํ˜‘์œผ๋กœ๋ถ€ํ„ฐ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด, ๋ฌธ์„œ, ์ €์žฅ๋งค์ฒด, ์ฃผ์š” ์„ค๋น„ ๋ฐ ์‹œ์Šคํ…œ ๋“ฑ์„ ๋ณดํ˜ธํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ํ†ต์ œ๊ตฌ์—ญยท์ œํ•œ๊ตฌ์—ญยท์ ‘๊ฒฌ๊ตฌ์—ญ ๋“ฑ ๋ฌผ๋ฆฌ์  ๋ณดํ˜ธ๊ตฌ์—ญ์„ ์ง€์ •ํ•˜๊ณ  ๊ตฌ์—ญ๋ณ„ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.2": {"name": "์ถœ์ž…ํ†ต์ œ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.4.2 ์ถœ์ž…ํ†ต์ œ","Subdomain": "2.4. ๋ฌผ๋ฆฌ ๋ณด์•ˆ","AuditEvidence": ["์ถœ์ž… ๊ด€๋ฆฌ๋Œ€์žฅ ๋ฐ ์ถœ์ž…๋กœ๊ทธ","์ถœ์ž… ๋“ฑ๋ก ์‹ ์ฒญ์„œ ๋ฐ ์Šน์ธ ๋‚ด์—ญ","์ถœ์ž…๊ธฐ๋ก ๊ฒ€ํ† ์„œ","์ถœ์ž…ํ†ต์ œ์‹œ์Šคํ…œ ๊ด€๋ฆฌํ™”๋ฉด(์ถœ์ž…์ž ๋“ฑ๋ก ํ˜„ํ™ฉ ๋“ฑ)"],"AuditChecklist": ["๋ณดํ˜ธ๊ตฌ์—ญ์€ ์ถœ์ž…์ ˆ์ฐจ์— ๋”ฐ๋ผ ์ถœ์ž…์ด ํ—ˆ๊ฐ€๋œ ์ž๋งŒ ์ถœ์ž…ํ•˜๋„๋ก ํ†ต์ œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐ ๋ณดํ˜ธ๊ตฌ์—ญ์— ๋Œ€ํ•œ ๋‚ดยท์™ธ๋ถ€์ž ์ถœ์ž…๊ธฐ๋ก์„ ์ผ์ •๊ธฐ๊ฐ„ ๋ณด์กดํ•˜๊ณ  ์ถœ์ž…๊ธฐ๋ก ๋ฐ ์ถœ์ž…๊ถŒํ•œ์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ํ†ต์ œ๊ตฌ์—ญ์„ ์ •์˜ํ•˜์—ฌ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์ถœ์ž… ๊ฐ€๋Šฅํ•œ ์ž„์ง์›์„ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ถœ์ž…๊ธฐ๋ก์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜์ง€ ์•Š์•„ ํ‡ด์ง, ์ „๋ฐฐ ๋“ฑ์— ๋”ฐ๋ฅธ ์žฅ๊ธฐ ๋ฏธ์ถœ์ž…์ž๊ฐ€ ๋‹ค์ˆ˜ ์กด์žฌํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ „์‚ฐ์‹ค, ๋ฌธ์„œ๊ณ  ๋“ฑ ํ†ต์ œ๊ตฌ์—ญ์— ์ถœ์ž…ํ†ต์ œ ์žฅ์น˜๊ฐ€ ์„ค์น˜๋˜์–ด ์žˆ์œผ๋‚˜, ํƒ€๋‹นํ•œ ์‚ฌ์œ  ๋˜๋Š” ์Šน์ธ ์—†์ด ์žฅ์‹œ๊ฐ„ ๊ฐœ๋ฐฉ ์ƒํƒœ๋กœ ์œ ์ง€ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ผ๋ถ€ ์™ธ๋ถ€ ํ˜‘๋ ฅ์—…์ฒด ์ง์›์—๊ฒŒ ๊ณผ๋„ํ•˜๊ฒŒ ์ „ ๊ตฌ์—ญ์„ ์ƒ์‹œ ์ถœ์ž…ํ•  ์ˆ˜ ์žˆ๋Š” ์ถœ์ž…์นด๋“œ๋ฅผ ๋ถ€์—ฌํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ10์กฐ(๋ฌผ๋ฆฌ์  ์•ˆ์ „์กฐ์น˜)"]}],"description": "๋ณดํ˜ธ๊ตฌ์—ญ์€ ์ธ๊ฐ€๋œ ์‚ฌ๋žŒ๋งŒ์ด ์ถœ์ž…ํ•˜๋„๋ก ํ†ต์ œํ•˜๊ณ  ์ฑ…์ž„์ถ”์ ์„ฑ์„ ํ™•๋ณดํ•  ์ˆ˜ ์žˆ๋„๋ก ์ถœ์ž… ๋ฐ ์ ‘๊ทผ ์ด๋ ฅ์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.3": {"name": "์ •๋ณด์‹œ์Šคํ…œ ๋ณดํ˜ธ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.4.3 ์ •๋ณด์‹œ์Šคํ…œ ๋ณดํ˜ธ","Subdomain": "2.4. ๋ฌผ๋ฆฌ ๋ณด์•ˆ","AuditEvidence": ["์ •๋ณด์ฒ˜๋ฆฌ์‹œ์„ค ๋„๋ฉด","์ •๋ณด์‹œ์Šคํ…œ ๋ฐฐ์น˜๋„","์ž์‚ฐ๋ชฉ๋ก"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ์˜ ์ค‘์š”๋„, ์šฉ๋„, ํŠน์„ฑ ๋“ฑ์„ ๊ณ ๋ คํ•˜์—ฌ ๋ฐฐ์น˜ ์žฅ์†Œ๋ฅผ ๋ถ„๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ์˜ ์‹ค์ œ ๋ฌผ๋ฆฌ์  ์œ„์น˜๋ฅผ ์†์‰ฝ๊ฒŒ ํ™•์ธํ•  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ์•ˆ์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ „๋ ฅ ๋ฐ ํ†ต์‹ ์ผ€์ด๋ธ”์„ ์™ธ๋ถ€๋กœ๋ถ€ํ„ฐ์˜ ๋ฌผ๋ฆฌ์  ์†์ƒ ๋ฐ ์ „๊ธฐ์  ์˜ํ–ฅ์œผ๋กœ๋ถ€ํ„ฐ ์•ˆ์ „ํ•˜๊ฒŒ ๋ณดํ˜ธํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์‹œ์Šคํ…œ ๋ฐฐ์น˜๋„๊ฐ€ ์ตœ์‹  ๋ณ€๊ฒฝ์‚ฌํ•ญ์„ ๋ฐ˜์˜ํ•˜์—ฌ ์—…๋ฐ์ดํŠธ๋˜์ง€ ์•Š์•„ ์žฅ์• ๊ฐ€ ๋ฐœ์ƒ๋œ ์ •๋ณด์‹œ์Šคํ…œ์„ ์‹ ์†ํ•˜๊ฒŒ ํ™•์ธํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์„œ๋ฒ„์‹ค ๋ฐ”๋‹ฅ ๋˜๋Š” ๋ž™์— ๋งŽ์€ ์ผ€์ด๋ธ”์ด ์ •๋ฆฌ๋˜์ง€ ์•Š๊ณ  ๋’ค์—‰์ผœ ์žˆ์–ด ์ „๊ธฐ์ ์œผ๋กœ ๊ฐ„์„ญ, ์†์ƒ, ๋ˆ„์ˆ˜, ๋ถ€์ฃผ์˜ ๋“ฑ์— ์˜ํ•œ ์žฅ์•  ๋ฐœ์ƒ์ด ์šฐ๋ ค๋˜๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์ •๋ณด์‹œ์Šคํ…œ์€ ํ™˜๊ฒฝ์  ์œ„ํ˜‘๊ณผ ์œ ํ•ด์š”์†Œ, ๋น„์ธ๊ฐ€ ์ ‘๊ทผ ๊ฐ€๋Šฅ์„ฑ์„ ๊ฐ์†Œ์‹œํ‚ฌ ์ˆ˜ ์žˆ๋„๋ก ์ค‘์š”๋„์™€ ํŠน์„ฑ์„ ๊ณ ๋ คํ•˜์—ฌ ๋ฐฐ์น˜ํ•˜๊ณ , ํ†ต์‹  ๋ฐ ์ „๋ ฅ ์ผ€์ด๋ธ”์ด ์†์ƒ์„ ์ž…์ง€ ์•Š๋„๋ก ๋ณดํ˜ธํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.4": {"name": "๋ณดํ˜ธ์„ค๋น„ ์šด์˜","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.4.4 ๋ณดํ˜ธ์„ค๋น„ ์šด์˜","Subdomain": "2.4. ๋ฌผ๋ฆฌ ๋ณด์•ˆ","AuditEvidence": ["๋ฌผ๋ฆฌ์  ๋ณด์•ˆ ์ง€์นจ(๋ณดํ˜ธ์„ค๋น„ ๊ด€๋ จ)","์ „์‚ฐ์‹ค ์„ค๋น„ ํ˜„ํ™ฉ ๋ฐ ์ ๊ฒ€ํ‘œ","IDC ์œ„ํƒ์šด์˜ ๊ณ„์•ฝ์„œ, SLA ๋“ฑ"],"AuditChecklist": ["๊ฐ ๋ณดํ˜ธ๊ตฌ์—ญ์˜ ์ค‘์š”๋„ ๋ฐ ํŠน์„ฑ์— ๋”ฐ๋ผ ํ™”์žฌ, ์ˆ˜ํ•ด, ์ „๋ ฅ ์ด์ƒ ๋“ฑ ์ธ์žฌ ๋ฐ ์ž์—ฐ์žฌํ•ด ๋“ฑ์— ๋Œ€๋น„ํ•˜์—ฌ ํ•„์š”ํ•œ ์„ค๋น„๋ฅผ ๊ฐ–์ถ”๊ณ  ์šด์˜์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์™ธ๋ถ€ ์ง‘์ ์ •๋ณดํ†ต์‹ ์‹œ์„ค(IDC)์— ์œ„ํƒ ์šด์˜ํ•˜๋Š” ๊ฒฝ์šฐ ๋ฌผ๋ฆฌ์  ๋ณดํ˜ธ์— ํ•„์š”ํ•œ ์š”๊ตฌ์‚ฌํ•ญ์„ ๊ณ„์•ฝ์„œ์— ๋ฐ˜์˜ํ•˜๊ณ  ์šด์˜์ƒํƒœ๋ฅผ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋ณธ์‚ฌ ์ „์‚ฐ์‹ค ๋“ฑ ์ผ๋ถ€ ๋ณดํ˜ธ๊ตฌ์—ญ์— ๋‚ด๋ถ€ ์ง€์นจ์— ์ •ํ•œ ๋ณดํ˜ธ์„ค๋น„๋ฅผ ๊ฐ–์ถ”๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ „์‚ฐ์‹ค ๋‚ด์— UPS, ์†Œํ™”์„ค๋น„ ๋“ฑ์˜ ๋ณดํ˜ธ์„ค๋น„๋Š” ๊ฐ–์ถ”๊ณ  ์žˆ์œผ๋‚˜, ๊ด€๋ จ ์„ค๋น„์— ๋Œ€ํ•œ ์šด์˜ ๋ฐ ์ ๊ฒ€ ๊ธฐ์ค€์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์šด์˜์ง€์นจ์— ๋”ฐ๋ผ ์ „์‚ฐ์‹ค ๋‚ด์— ์˜จยท์Šต๋„ ์กฐ์ ˆ๊ธฐ๋ฅผ ์„ค์น˜ํ•˜์˜€์œผ๋‚˜, ์šฉ๋Ÿ‰ ๋ถ€์กฑ์œผ๋กœ ์ธํ•˜์—ฌ ํ‘œ์ค€ ์˜จยท์Šต๋„๋ฅผ ์œ ์ง€ํ•˜์ง€ ๋ชปํ•˜์—ฌ ์žฅ์• ๋ฐœ์ƒ ๊ฐ€๋Šฅ์„ฑ์ด ๋†’์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ46์กฐ(์ง‘์ ๋œ ์ •๋ณดํ†ต์‹ ์‹œ์„ค์˜ ๋ณดํ˜ธ)","์ง‘์ ์ •๋ณด ํ†ต์‹ ์‹œ์„ค ๋ณดํ˜ธ์ง€์นจ","์†Œ๋ฐฉ์‹œ์„ค ์„ค์น˜ ๋ฐ ๊ด€๋ฆฌ์— ๊ด€ํ•œ ๋ฒ•๋ฅ (์†Œ๋ฐฉ์‹œ์„ค๋ฒ•) ์ œ12์กฐ(ํŠน์ •์†Œ๋ฐฉ๋Œ€์ƒ๋ฌผ์— ์„ค์น˜ํ•˜๋Š” ์†Œ๋ฐฉ์‹œ์„ค์˜ ๊ด€๋ฆฌ ๋“ฑ), ์ œ16์กฐ(ํ”ผ๋‚œ์‹œ์„ค, ๋ฐฉํ™”๊ตฌ์—ญ ๋ฐ ๋ฐฉํ™”์‹œ์„ค์˜ ๊ด€๋ฆฌ)"]}],"description": "๋ณดํ˜ธ๊ตฌ์—ญ์— ์œ„์น˜ํ•œ ์ •๋ณด์‹œ์Šคํ…œ์˜ ์ค‘์š”๋„ ๋ฐ ํŠน์„ฑ์— ๋”ฐ๋ผ ์˜จยท์Šต๋„ ์กฐ์ ˆ, ํ™”์žฌ๊ฐ์ง€, ์†Œํ™”์„ค๋น„, ๋ˆ„์ˆ˜๊ฐ์ง€, UPS, ๋น„์ƒ๋ฐœ์ „๊ธฐ, ์ด์ค‘์ „์›์„  ๋“ฑ์˜ ๋ณดํ˜ธ์„ค๋น„๋ฅผ ๊ฐ–์ถ”๊ณ  ์šด์˜์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์šด์˜ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.5": {"name": "๋ณดํ˜ธ๊ตฌ์—ญ ๋‚ด ์ž‘์—…","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.4.5 ๋ณดํ˜ธ๊ตฌ์—ญ ๋‚ด ์ž‘์—…","Subdomain": "2.4. ๋ฌผ๋ฆฌ ๋ณด์•ˆ","AuditEvidence": ["์ž‘์—… ์‹ ์ฒญ์„œ, ์ž‘์—… ์ผ์ง€","ํ†ต์ œ๊ตฌ์—ญ ์ถœ์ž… ๋Œ€์žฅ","ํ†ต์ œ๊ตฌ์—ญ์— ๋Œ€ํ•œ ์ถœ์ž…๊ธฐ๋ก ๋ฐ ์ž‘์—… ๊ธฐ๋ก ๊ฒ€ํ†  ๋‚ด์—ญ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ ๋„์ž…, ์œ ์ง€๋ณด์ˆ˜ ๋“ฑ์œผ๋กœ ๋ณดํ˜ธ๊ตฌ์—ญ ๋‚ด ์ž‘์—…์ด ํ•„์š”ํ•œ ๊ฒฝ์šฐ์— ๋Œ€ํ•œ ๊ณต์‹์ ์ธ ์ž‘์—…์‹ ์ฒญ ๋ฐ ์ˆ˜ํ–‰ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ณดํ˜ธ๊ตฌ์—ญ ๋‚ด ์ž‘์—…์ด ํ†ต์ œ ์ ˆ์ฐจ์— ๋”ฐ๋ผ ์ ์ ˆํžˆ ์ˆ˜ํ–‰๋˜์—ˆ๋Š”์ง€ ์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์ž‘์—… ๊ธฐ๋ก์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ „์‚ฐ์‹ค ์ถœ์ž…๋กœ๊ทธ์—๋Š” ์™ธ๋ถ€ ์œ ์ง€๋ณด์ˆ˜ ์—…์ฒด ์ง์›์˜ ์ถœ์ž…๊ธฐ๋ก์ด ๋‚จ์•„ ์žˆ์œผ๋‚˜, ์ด์— ๋Œ€ํ•œ ๋ณดํ˜ธ๊ตฌ์—ญ ์ž‘์—… ์‹ ์ฒญ ๋ฐ ์Šน์ธ ๋‚ด์—ญ์ด ์กด์žฌํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ(๋‚ด๋ถ€ ๊ทœ์ •์— ๋”ฐ๋ฅธ ๋ณดํ˜ธ๊ตฌ์—ญ ์ž‘์—… ์‹ ์ฒญ ์—†์ด ๋ณดํ˜ธ๊ตฌ์—ญ ์ถœ์ž… ๋ฐ ์ž‘์—…์ด ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ)","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€ ๊ทœ์ •์—๋Š” ๋ณดํ˜ธ๊ตฌ์—ญ ๋‚ด ์ž‘์—… ๊ธฐ๋ก์— ๋Œ€ํ•˜์—ฌ ๋ถ„๊ธฐ๋ณ„ 1ํšŒ ์ด์ƒ ์ ๊ฒ€ํ•˜๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜, ํŠน๋ณ„ํ•œ ์‚ฌ์œ  ์—†์ด ์žฅ๊ธฐ๊ฐ„ ๋™์•ˆ ๋ณดํ˜ธ๊ตฌ์—ญ ๋‚ด ์ž‘์—… ๊ธฐ๋ก์— ๋Œ€ํ•œ ์ ๊ฒ€์ด ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "๋ณดํ˜ธ๊ตฌ์—ญ ๋‚ด์—์„œ์˜ ๋น„์ธ๊ฐ€ํ–‰์œ„ ๋ฐ ๊ถŒํ•œ ์˜คยท๋‚จ์šฉ ๋“ฑ์„ ๋ฐฉ์ง€ํ•˜๊ธฐ ์œ„ํ•œ ์ž‘์—… ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝ ๋ฐ์ดํ–‰ํ•˜๊ณ , ์ž‘์—… ๊ธฐ๋ก์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.6": {"name": "๋ฐ˜์ถœ์ž… ๊ธฐ๊ธฐ ํ†ต์ œ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.4.6 ๋ฐ˜์ถœ์ž… ๊ธฐ๊ธฐ ํ†ต์ œ","Subdomain": "2.4. ๋ฌผ๋ฆฌ ๋ณด์•ˆ","AuditEvidence": ["๋ณดํ˜ธ๊ตฌ์—ญ ๋‚ด ๋ฐ˜์ถœ์ž… ์‹ ์ฒญ์„œ","๋ฐ˜์ถœ์ž… ๊ด€๋ฆฌ๋Œ€์žฅ","๋ฐ˜์ถœ์ž… ์ด๋ ฅ ๊ฒ€ํ†  ๊ฒฐ๊ณผ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ, ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ, ์ €์žฅ๋งค์ฒด ๋“ฑ์„ ๋ณดํ˜ธ๊ตฌ์—ญ์— ๋ฐ˜์ž…ํ•˜๊ฑฐ๋‚˜ ๋ฐ˜์ถœํ•˜๋Š” ๊ฒฝ์šฐ ์ •๋ณด์œ ์ถœ, ์•…์„ฑ์ฝ”๋“œ ๊ฐ์—ผ ๋“ฑ ๋ณด์•ˆ์‚ฌ๊ณ  ์˜ˆ๋ฐฉ์„ ์œ„ํ•œ ํ†ต์ œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ฐ˜์ถœ์ž… ํ†ต์ œ์ ˆ์ฐจ์— ๋”ฐ๋ฅธ ๊ธฐ๋ก์„ ์œ ์ง€ยท๊ด€๋ฆฌํ•˜๊ณ , ์ ˆ์ฐจ ์ค€์ˆ˜ ์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•  ์ˆ˜ ์žˆ๋„๋ก ๋ฐ˜์ถœ์ž… ์ด๋ ฅ์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ์ ๊ฒ€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ด๋™์ปดํ“จํŒ…๊ธฐ๊ธฐ ๋ฐ˜์ถœ์ž…์— ๋Œ€ํ•œ ํ†ต์ œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ์œผ๋‚˜, ํ†ต์ œ๊ตฌ์—ญ ๋‚ด ์ด๋™์ปดํ“จํŒ…๊ธฐ๊ธฐ ๋ฐ˜์ž…์— ๋Œ€ํ•œ ํ†ต์ œ๋ฅผ ํ•˜๊ณ  ์žˆ์ง€ ์•Š์•„ ์ถœ์ž…์ด ํ—ˆ์šฉ๋œ ๋‚ดยท์™ธ๋ถ€์ธ์ด ์ด๋™์ปดํ“จํŒ…๊ธฐ๊ธฐ๋ฅผ ์ œ์•ฝ ์—†์ด ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€ ์ง€์นจ์— ๋”ฐ๋ผ ์ „์‚ฐ์žฅ๋น„ ๋ฐ˜์ถœ์ž…์ด ์žˆ๋Š” ๊ฒฝ์šฐ ์ž‘์—…๊ณ„ํš์„œ์— ๋ฐ˜์ถœ์ž… ๋‚ด์šฉ์„ ๊ธฐ์žฌํ•˜๊ณ  ๊ด€๋ฆฌ ์ฑ…์ž„์ž์˜ ์„œ๋ช…์„ ๋ฐ›๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜, ์ž‘์—…๊ณ„ํš์„œ์˜ ๋ฐ˜์ถœ์ž… ๊ธฐ๋ก์— ๊ด€๋ฆฌ์ฑ…์ž„์ž์˜ ์„œ๋ช…์ด ๋‹ค์ˆ˜ ๋ˆ„๋ฝ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ10์กฐ(๋ฌผ๋ฆฌ์  ์•ˆ์ „์กฐ์น˜)"]}],"description": "๋ณดํ˜ธ๊ตฌ์—ญ ๋‚ด ์ •๋ณด์‹œ์Šคํ…œ, ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ, ์ €์žฅ๋งค์ฒด ๋“ฑ์— ๋Œ€ํ•œ ๋ฐ˜์ถœ์ž… ํ†ต์ œ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝ ๋ฐ์ดํ–‰ํ•˜๊ณ  ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.4.7": {"name": "์—…๋ฌดํ™˜๊ฒฝ ๋ณด์•ˆ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.4.7 ์—…๋ฌดํ™˜๊ฒฝ ๋ณด์•ˆ","Subdomain": "2.4. ๋ฌผ๋ฆฌ ๋ณด์•ˆ","AuditEvidence": ["์‚ฌ๋ฌด์‹ค ๋ฐ ๊ณต์šฉ๊ณต๊ฐ„ ๋ณด์•ˆ์ ๊ฒ€ ๋ณด๊ณ ์„œ","์‚ฌ๋ฌด์‹ค ๋ฐ ๊ณต์šฉ๊ณต๊ฐ„ ๋ณด์•ˆ์ ๊ฒ€ํ‘œ","๋ฏธ์ค€์ˆ˜์ž์— ๋Œ€ํ•œ ์กฐ์น˜ ์‚ฌํ•ญ(๊ต์œก, ์ƒ๋ฒŒ ๋“ฑ)","์ถœ๋ ฅยท๋ณต์‚ฌ๋ฌผ ๋ณดํ˜ธ์กฐ์น˜ ํ˜„ํ™ฉ"],"AuditChecklist": ["๋ฌธ์„œ๊ณ , ๊ณต์šฉ PC, ๋ณตํ•ฉ๊ธฐ, ํŒŒ์ผ์„œ๋ฒ„ ๋“ฑ ๊ณต์šฉ์œผ๋กœ ์‚ฌ์šฉํ•˜๋Š” ์‹œ์„ค ๋ฐ ์‚ฌ๋ฌด์šฉ ๊ธฐ๊ธฐ์— ๋Œ€ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์—…๋ฌด์šฉ PC, ์ฑ…์ƒ, ์„œ๋ž ๋“ฑ ๊ฐœ์ธ์—…๋ฌด ํ™˜๊ฒฝ์„ ํ†ตํ•œ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์˜ ์œ ยท๋…ธ์ถœ์„ ๋ฐฉ์ง€ํ•˜๊ธฐ ์œ„ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด๊ฐ€ ํฌํ•จ๋œ ์ข…์ด ์ธ์‡„๋ฌผ ๋“ฑ ๊ฐœ์ธ์ •๋ณด์˜ ์ถœ๋ ฅยท๋ณต์‚ฌ๋ฌผ์„ ์•ˆ์ „ํ•˜๊ฒŒ ๊ด€๋ฆฌํ•˜๊ธฐ ์œ„ํ•ด ํ•„์š”ํ•œ ๋ณดํ˜ธ์กฐ์น˜๋ฅผ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ ๋ฐ ๊ณต์šฉ์—…๋ฌด ํ™˜๊ฒฝ์—์„œ์˜ ์ •๋ณด๋ณดํ˜ธ ์ค€์ˆ˜ ์—ฌ๋ถ€๋ฅผ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ์ธ์ •๋ณด ๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์„œ ๋‚ด ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ๋ฅผ ์œ„ํ•œ ์ƒํ™œ๋ณด์•ˆ ์ ๊ฒ€(ํด๋ฆฐ๋ฐ์Šคํฌ ์šด์˜ ๋“ฑ)์„ ์ •๊ธฐ์ ์œผ๋กœ ์ˆ˜ํ–‰ํ•˜๋„๋ก ๋ช…์‹œํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ด๋ฅผ ์ดํ–‰ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋ฉค๋ฒ„์‹ญ ๊ฐ€์ž…์‹ ์ฒญ์„œ ๋“ฑ ๊ฐœ์ธ์ •๋ณด๊ฐ€ ํฌํ•จ๋œ ์„œ๋ฅ˜๋ฅผ ์ž ๊ธˆ์žฅ์น˜๊ฐ€ ์—†๋Š” ์‚ฌ๋ฌด์‹ค ๋ฌธ์„œํ•จ์— ๋ณด๊ด€ํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ง์›๋“ค์˜ ์ปดํ“จํ„ฐ ํ™”๋ฉด๋ณดํ˜ธ๊ธฐ ๋ฐ ํŒจ์Šค์›Œ๋“œ๊ฐ€ ์„ค์ •๋˜์–ด ์žˆ์ง€ ์•Š๊ณ , ํœด๊ฐ€์ž ์ฑ…์ƒ ์œ„์— ์ค‘์š”๋ฌธ์„œ๊ฐ€ ์žฅ๊ธฐ๊ฐ„ ๋ฐฉ์น˜๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ํšŒ์˜์‹ค ๋“ฑ ๊ณต์šฉ ์‚ฌ๋ฌด ๊ณต๊ฐ„์— ์„ค์น˜๋œ ๊ณต์šฉPC์— ๋Œ€ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์ด ์ˆ˜๋ฆฝ๋˜์–ด ์žˆ์ง€ ์•Š์•„ ๊ฐœ์ธ์ •๋ณด๊ฐ€ ํฌํ•จ๋œ ํŒŒ์ผ์ด ์•”ํ˜ธํ™”๋˜์ง€ ์•Š์€ ์ฑ„๋กœ ์ €์žฅ๋˜์–ด ์žˆ๊ฑฐ๋‚˜, ๋ณด์•ˆ ์—…๋ฐ์ดํŠธ ๋ฏธ์ ์šฉ, ๋ฐฑ์‹  ๋ฏธ์„ค์น˜ ๋“ฑ ์ทจ์•ฝํ•œ ์ƒํƒœ๋กœ ์œ ์ง€ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ10์กฐ(๋ฌผ๋ฆฌ์  ์•ˆ์ „์กฐ์น˜), ์ œ12์กฐ(์ถœ๋ ฅยท๋ณต์‚ฌ์‹œ ์•ˆ์ „์กฐ์น˜)"]}],"description": "๊ณต์šฉ์œผ๋กœ ์‚ฌ์šฉํ•˜๋Š” ์‚ฌ๋ฌด์šฉ ๊ธฐ๊ธฐ(๋ฌธ์„œ๊ณ , ๊ณต์šฉ PC, ๋ณตํ•ฉ๊ธฐ, ํŒŒ์ผ์„œ๋ฒ„ ๋“ฑ) ๋ฐ ๊ฐœ์ธ ์—…๋ฌดํ™˜๊ฒฝ(์—…๋ฌด์šฉ PC, ์ฑ…์ƒ ๋“ฑ)์„ ํ†ตํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด๊ฐ€ ๋น„์ธ๊ฐ€์ž์—๊ฒŒ ๋…ธ์ถœ ๋˜๋Š” ์œ ์ถœ๋˜์ง€ ์•Š๋„๋ก ํด๋ฆฐ๋ฐ์Šคํฌ, ์ •๊ธฐ์ ๊ฒ€ ๋“ฑ ์—…๋ฌดํ™˜๊ฒฝ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.5.1": {"name": "์‚ฌ์šฉ์ž ๊ณ„์ • ๊ด€๋ฆฌ","checks": {"iam_user_accesskey_unused": null,"iam_securityaudit_role_created": null,"iam_user_console_access_unused": null,"iam_policy_no_full_access_to_kms": null,"iam_role_administratoraccess_policy": null,"iam_user_administrator_access_policy": null,"organizations_scp_check_deny_regions": null,"iam_group_administrator_access_policy": null,"iam_policy_allows_privilege_escalation": null,"iam_inline_policy_no_full_access_to_kms": null,"iam_policy_no_full_access_to_cloudtrail": null,"iam_policy_attached_only_to_group_or_roles": null,"cognito_user_pool_self_registration_disabled": null,"iam_role_cross_account_readonlyaccess_policy": null,"iam_inline_policy_allows_privilege_escalation": null,"iam_inline_policy_no_administrative_privileges": null,"iam_inline_policy_no_full_access_to_cloudtrail": null,"iam_no_custom_policy_permissive_role_assumption": null,"iam_role_cross_service_confused_deputy_prevention": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"iam_customer_unattached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.5.1 ์‚ฌ์šฉ์ž ๊ณ„์ • ๊ด€๋ฆฌ","Subdomain": "2.5. ์ธ์ฆ ๋ฐ ๊ถŒํ•œ๊ด€๋ฆฌ","AuditEvidence": ["์‚ฌ์šฉ์ž ๊ณ„์ • ๋ฐ ๊ถŒํ•œ ์‹ ์ฒญ์„œ","์‚ฌ์šฉ์ž ๊ณ„์ • ๋ฐ ๊ถŒํ•œ ๊ด€๋ฆฌ๋Œ€์žฅ ๋˜๋Š” ํ™”๋ฉด","์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ๋ณ„ ์ ‘๊ทผ๊ถŒํ•œ ๋ถ„๋ฅ˜ํ‘œ","์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ๋ณ„ ์‚ฌ์šฉ์ž, ๊ด€๋ฆฌ์ž, ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ๋ชฉ๋ก"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ๊ณผ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์— ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋Š” ์‚ฌ์šฉ์ž ๊ณ„์ • ๋ฐ ์ ‘๊ทผ๊ถŒํ•œ์˜ ๋“ฑ๋กยท๋ณ€๊ฒฝยท์‚ญ์ œ์— ๊ด€ํ•œ ๊ณต์‹์ ์ธ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ๊ณผ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์— ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋Š” ์‚ฌ์šฉ์ž ๊ณ„์ • ๋ฐ ์ ‘๊ทผ๊ถŒํ•œ ์ƒ์„ฑ ๋ฐ ๋“ฑ๋กยท๋ณ€๊ฒฝ ์‹œ ์ง๋ฌด๋ณ„ ์ ‘๊ทผ๊ถŒํ•œ ๋ถ„๋ฅ˜ ์ฒด๊ณ„์— ๋”ฐ๋ผ ์—…๋ฌด์ƒ ํ•„์š”ํ•œ ์ตœ์†Œํ•œ์˜ ๊ถŒํ•œ๋งŒ์„ ๋ถ€์—ฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์‚ฌ์šฉ์ž์—๊ฒŒ ๊ณ„์ • ๋ฐ ์ ‘๊ทผ๊ถŒํ•œ์„ ๋ถ€์—ฌํ•˜๋Š” ๊ฒฝ์šฐ ํ•ด๋‹น ๊ณ„์ •์— ๋Œ€ํ•œ ๋ณด์•ˆ์ฑ…์ž„์ด ๋ณธ์ธ์—๊ฒŒ ์žˆ์Œ์„ ๋ช…ํ™•ํžˆ ์ธ์‹์‹œํ‚ค๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์‚ฌ์šฉ์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž์— ๋Œ€ํ•œ ๊ณ„์ •ยท๊ถŒํ•œ์— ๋Œ€ํ•œ ์‚ฌ์šฉ์ž ๋“ฑ๋ก, ํ•ด์ง€ ๋ฐ ์Šน์ธ์ ˆ์ฐจ ์—†์ด ๊ตฌ๋‘ ์š”์ฒญ, ์ด๋ฉ”์ผ ๋“ฑ์œผ๋กœ ์ฒ˜๋ฆฌํ•˜์—ฌ ์ด์— ๋Œ€ํ•œ ์Šน์ธ ๋ฐ ์ฒ˜๋ฆฌ ์ด๋ ฅ์ด ํ™•์ธ๋˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž๊ฐ€ ํœด๊ฐ€, ์ถœ์žฅ, ๊ณต๊ฐ€ ๋“ฑ์— ๋”ฐ๋ฅธ ์—…๋ฌด ๋ฐฑ์—…์„ ์‚ฌ์œ ๋กœ ๊ณต์‹์ ์ธ ์ ˆ์ฐจ๋ฅผ ๊ฑฐ์น˜์ง€ ์•Š๊ณ  ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž๋กœ ์ง€์ •๋˜์ง€ ์•Š์€ ์ธ์›์—๊ฒŒ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ๊ณ„์ •์„ ์•Œ๋ ค์ฃผ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ •๋ณด์‹œ์Šคํ…œ ๋˜๋Š” ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ์‚ฌ์šฉ์ž์—๊ฒŒ ํ•„์š” ์ด์ƒ์˜ ๊ณผ๋„ํ•œ ๊ถŒํ•œ์„ ๋ถ€์—ฌํ•˜์—ฌ ์—…๋ฌด์ƒ ๋ถˆํ•„์š”ํ•œ ์ •๋ณด ๋˜๋Š” ๊ฐœ์ธ์ •๋ณด์— ์ ‘๊ทผ์ด ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ5์กฐ(์ ‘๊ทผ ๊ถŒํ•œ์˜ ๊ด€๋ฆฌ)"]}],"description": "์ •๋ณด์‹œ์Šคํ…œ๊ณผ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์— ๋Œ€ํ•œ ๋น„์ธ๊ฐ€ ์ ‘๊ทผ์„ ํ†ต์ œํ•˜๊ณ  ์—…๋ฌด ๋ชฉ์ ์— ๋”ฐ๋ฅธ ์ ‘๊ทผ๊ถŒํ•œ์„ ์ตœ์†Œํ•œ์œผ๋กœ ๋ถ€์—ฌํ•  ์ˆ˜ ์žˆ๋„๋ก ์‚ฌ์šฉ์ž ๋“ฑ๋กยทํ•ด์ง€ ๋ฐ ์ ‘๊ทผ๊ถŒํ•œ ๋ถ€์—ฌยท๋ณ€๊ฒฝยท๋ง์†Œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ , ์‚ฌ์šฉ์ž ๋“ฑ๋ก ๋ฐ ๊ถŒํ•œ๋ถ€์—ฌ ์‹œ ์‚ฌ์šฉ์ž์—๊ฒŒ ๋ณด์•ˆ์ฑ…์ž„์ด ์žˆ์Œ์„ ๊ทœ์ •ํ™”ํ•˜๊ณ  ์ธ์‹์‹œ์ผœ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 22,"manual": 0}},"2.5.2": {"name": "์‚ฌ์šฉ์ž ์‹๋ณ„","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.5.2 ์‚ฌ์šฉ์ž ์‹๋ณ„","Subdomain": "2.5. ์ธ์ฆ ๋ฐ ๊ถŒํ•œ๊ด€๋ฆฌ","AuditEvidence": ["์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ๋กœ๊ทธ์ธ ํ™”๋ฉด","์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ๊ด€๋ฆฌ์ž, ์‚ฌ์šฉ์ž, ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ๊ณ„์ • ๋ชฉ๋ก","์˜ˆ์™ธ ์ฒ˜๋ฆฌ์— ๋Œ€ํ•œ ์Šน์ธ ๋‚ด์—ญ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์—์„œ ์‚ฌ์šฉ์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž๋ฅผ ์œ ์ผํ•˜๊ฒŒ ๊ตฌ๋ถ„ํ•  ์ˆ˜ ์žˆ๋Š” ์‹๋ณ„์ž๋ฅผ ํ• ๋‹นํ•˜๊ณ  ์ถ”์ธก ๊ฐ€๋Šฅํ•œ ์‹๋ณ„์ž์˜ ์‚ฌ์šฉ์„ ์ œํ•œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ถˆ๊ฐ€ํ”ผํ•œ ์‚ฌ์œ ๋กœ ๋™์ผํ•œ ์‹๋ณ„์ž๋ฅผ ๊ณต์œ ํ•˜์—ฌ ์‚ฌ์šฉํ•˜๋Š” ๊ฒฝ์šฐ ๊ทธ ์‚ฌ์œ ์™€ ํƒ€๋‹น์„ฑ์„ ๊ฒ€ํ† ํ•˜๊ณ  ๋ณด์™„๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜์—ฌ ์ฑ…์ž„์ž์˜ ์Šน์ธ์„ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด์‹œ์Šคํ…œ(์„œ๋ฒ„, ๋„คํŠธ์›Œํฌ, ์นจ์ž…์ฐจ๋‹จ์‹œ์Šคํ…œ, DBMS ๋“ฑ)์˜ ๊ณ„์ • ํ˜„ํ™ฉ์„ ํ™•์ธํ•œ ๊ฒฐ๊ณผ, ์ œ์กฐ์‚ฌ์—์„œ ์ œ๊ณตํ•˜๋Š” ๊ธฐ๋ณธ ๊ด€๋ฆฌ์ž ๊ณ„์ •์„ ๊ธฐ์ˆ ์ ์œผ๋กœ ๋ณ€๊ฒฝ ๊ฐ€๋Šฅํ•จ์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ  ๋ณ€๊ฒฝํ•˜์ง€ ์•Š๊ณ  ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ๋ฐœ์ž๊ฐ€ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ๊ณ„์ •์„ ๊ณต์šฉ์œผ๋กœ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ์œผ๋‚˜, ํƒ€๋‹น์„ฑ ๊ฒ€ํ†  ๋˜๋Š” ์ฑ…์ž„์ž์˜ ์Šน์ธ ๋“ฑ์ด ์—†์ด ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์™ธ๋ถ€์ง์›์ด ์œ ์ง€๋ณด์ˆ˜ํ•˜๊ณ  ์žˆ๋Š” ์ •๋ณด์‹œ์Šคํ…œ์˜ ์šด์˜๊ณ„์ •์„ ๋ณ„๋„์˜ ์Šน์ธ ์ ˆ์ฐจ ์—†์ด ๊ฐœ์ธ ๊ณ„์ •์ฒ˜๋Ÿผ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ5์กฐ(์ ‘๊ทผ ๊ถŒํ•œ์˜ ๊ด€๋ฆฌ)"]}],"description": "์‚ฌ์šฉ์ž ๊ณ„์ •์€ ์‚ฌ์šฉ์ž๋ณ„๋กœ ์œ ์ผํ•˜๊ฒŒ ๊ตฌ๋ถ„ํ•  ์ˆ˜ ์žˆ๋„๋ก ์‹๋ณ„์ž๋ฅผ ํ• ๋‹นํ•˜๊ณ  ์ถ”์ธก ๊ฐ€๋Šฅํ•œ ์‹๋ณ„์ž ์‚ฌ์šฉ์„ ์ œํ•œํ•˜์—ฌ์•ผ ํ•˜๋ฉฐ, ๋™์ผํ•œ ์‹๋ณ„์ž๋ฅผ ๊ณต์œ ํ•˜์—ฌ ์‚ฌ์šฉํ•˜๋Š” ๊ฒฝ์šฐ ๊ทธ ์‚ฌ์œ ์™€ ํƒ€๋‹น์„ฑ์„ ๊ฒ€ํ† ํ•˜์—ฌ ์ฑ…์ž„์ž์˜ ์Šน์ธ ๋ฐ ์ฑ…์ž„์ถ”์ ์„ฑ ํ™•๋ณด ๋“ฑ ๋ณด์™„๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.5.3": {"name": "์‚ฌ์šฉ์ž ์ธ์ฆ","checks": {"iam_root_mfa_enabled": null,"iam_user_accesskey_unused": null,"iam_check_saml_providers_sts": null,"cognito_user_pool_mfa_enabled": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_hardware_mfa_enabled": null,"iam_user_two_active_access_key": null,"iam_administrator_access_with_mfa": null,"iam_user_mfa_enabled_console_access": null,"iam_user_with_temporary_credentials": null,"apigatewayv2_api_authorizers_enabled": "FAIL","iam_user_no_setup_initial_access_key": null,"apigateway_restapi_authorizers_enabled": "PASS","rds_cluster_iam_authentication_enabled": "FAIL","rds_instance_iam_authentication_enabled": "FAIL","kafka_cluster_unrestricted_access_disabled": null,"cognito_identity_pool_guest_access_disabled": "FAIL","cognito_user_pool_advanced_security_enabled": null,"cognito_user_pool_self_registration_disabled": null,"directoryservice_supported_mfa_radius_enabled": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cognito_user_pool_client_token_revocation_enabled": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cognito_user_pool_client_prevent_user_existence_errors": null,"opensearch_service_domains_internal_user_database_enabled": null,"cognito_user_pool_blocks_potential_malicious_sign_in_attempts": null,"opensearch_service_domains_use_cognito_authentication_for_kibana": null,"cognito_user_pool_blocks_compromised_credentials_sign_in_attempts": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.5.3 ์‚ฌ์šฉ์ž ์ธ์ฆ","Subdomain": "2.5. ์ธ์ฆ ๋ฐ ๊ถŒํ•œ๊ด€๋ฆฌ","AuditEvidence": ["์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ๋กœ๊ทธ์ธ ํ™”๋ฉด","๋กœ๊ทธ์ธ ํšŸ์ˆ˜ ์ œํ•œ ์„ค์ • ํ™”๋ฉด","๋กœ๊ทธ์ธ ์‹คํŒจ ๋ฉ”์‹œ์ง€ ํ™”๋ฉด","์™ธ๋ถ€ ์ ‘์† ์‹œ ์ ˆ์ฐจ(์™ธ๋ถ€์ ‘์† ์‹ ์ฒญ์„œ, ์™ธ๋ถ€์ ‘์†์ž ํ˜„ํ™ฉ ๋“ฑ)"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ์ ‘๊ทผ์€ ์‚ฌ์šฉ์ž ์ธ์ฆ, ๋กœ๊ทธ์ธ ํšŸ์ˆ˜ ์ œํ•œ, ๋ถˆ๋ฒ• ๋กœ๊ทธ์ธ ์‹œ๋„ ๊ฒฝ๊ณ  ๋“ฑ ์•ˆ์ „ํ•œ ์‚ฌ์šฉ์ž ์ธ์ฆ ์ ˆ์ฐจ์— ๋”ฐ๋ผ ํ†ต์ œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณดํ†ต์‹ ๋ง์„ ํ†ตํ•˜์—ฌ ์™ธ๋ถ€์—์„œ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์— ์ ‘์†ํ•˜๋ ค๋Š” ๊ฒฝ์šฐ์—๋Š” ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์— ๋”ฐ๋ผ ์•ˆ์ „ํ•œ ์ธ์ฆ์ˆ˜๋‹จ ๋˜๋Š” ์•ˆ์ „ํ•œ ์ ‘์†์ˆ˜๋‹จ์„ ์ ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž๊ฐ€ ๊ณต๊ฐœ๋œ ์™ธ๋ถ€ ์ธํ„ฐ๋„ท๋ง์„ ํ†ตํ•˜์—ฌ ์ด์šฉ์ž์˜ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ ์‹œ์Šคํ…œ์— ์ ‘๊ทผ ์‹œ ์•ˆ์ „ํ•œ ์ธ์ฆ์ˆ˜๋‹จ์„ ์ ์šฉํ•˜์ง€ ์•Š๊ณ  IDยท๋น„๋ฐ€๋ฒˆํ˜ธ ๋ฐฉ์‹์œผ๋กœ๋งŒ ์ธ์ฆํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ๋กœ๊ทธ์ธ ์‹คํŒจ ์‹œ ํ•ด๋‹น ID๊ฐ€ ์กด์žฌํ•˜์ง€ ์•Š๊ฑฐ๋‚˜ ๋น„๋ฐ€๋ฒˆํ˜ธ๊ฐ€ ํ‹€๋ฆผ์„ ์ž์„ธํžˆ ํ‘œ์‹œํ•ด ์ฃผ๊ณ  ์žˆ์œผ๋ฉฐ, ๋กœ๊ทธ์ธ ์‹คํŒจํšŸ์ˆ˜์— ๋Œ€ํ•œ ์ œํ•œ์ด ์—†๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ5์กฐ(์ ‘๊ทผ ๊ถŒํ•œ์˜ ๊ด€๋ฆฌ), ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ)"]}],"description": "์ •๋ณด์‹œ์Šคํ…œ๊ณผ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์— ๋Œ€ํ•œ ์‚ฌ์šฉ์ž์˜ ์ ‘๊ทผ์€ ์•ˆ์ „ํ•œ ์ธ์ฆ์ ˆ์ฐจ์™€ ํ•„์š”์— ๋”ฐ๋ผ ๊ฐ•ํ™”๋œ ์ธ์ฆ๋ฐฉ์‹์„ ์ ์šฉํ•˜์—ฌ์•ผ ํ•œ๋‹ค. ๋˜ํ•œ ๋กœ๊ทธ์ธ ํšŸ์ˆ˜ ์ œํ•œ, ๋ถˆ๋ฒ• ๋กœ๊ทธ์ธ ์‹œ๋„ ๊ฒฝ๊ณ  ๋“ฑ ๋น„์ธ๊ฐ€์ž ์ ‘๊ทผ ํ†ต์ œ๋ฐฉ์•ˆ์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 4,"pass": 1,"total": 29,"manual": 0}},"2.5.4": {"name": "๋น„๋ฐ€๋ฒˆํ˜ธ ๊ด€๋ฆฌ","checks": {"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_reuse_24": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_password_policy_minimum_length_14": null,"cognito_user_pool_password_policy_number": null,"cognito_user_pool_password_policy_symbol": null,"cognito_user_pool_password_policy_lowercase": null,"cognito_user_pool_password_policy_uppercase": null,"cognito_user_pool_temporary_password_expiration": null,"cognito_user_pool_password_policy_minimum_length_14": null,"iam_password_policy_expires_passwords_within_90_days_or_less": null},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.5.4 ๋น„๋ฐ€๋ฒˆํ˜ธ ๊ด€๋ฆฌ","Subdomain": "2.5. ์ธ์ฆ ๋ฐ ๊ถŒํ•œ๊ด€๋ฆฌ","AuditEvidence": ["์›นํŽ˜์ด์ง€, ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ๋น„๋ฐ€๋ฒˆํ˜ธ ์„ค์ • ํ™”๋ฉด","๋น„๋ฐ€๋ฒˆํ˜ธ ๊ด€๋ฆฌ ์ •์ฑ… ๋ฐ ์ ˆ์ฐจ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ์•ˆ์ „ํ•œ ์‚ฌ์šฉ์ž ๋น„๋ฐ€๋ฒˆํ˜ธ ๊ด€๋ฆฌ์ ˆ์ฐจ ๋ฐ ์ž‘์„ฑ๊ทœ์น™์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด(์ด์šฉ์ž)๊ฐ€ ์•ˆ์ „ํ•œ ๋น„๋ฐ€๋ฒˆํ˜ธ๋ฅผ ์ด์šฉํ•  ์ˆ˜ ์žˆ๋„๋ก ๋น„๋ฐ€๋ฒˆํ˜ธ ์ž‘์„ฑ๊ทœ์น™์„ ์ˆ˜๋ฆฝ ๋ฐ ์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ๋˜๋Š” ์ •๋ณด์ฃผ์ฒด์˜ ์ธ์ฆ์ˆ˜๋‹จ์„ ์•ˆ์ „ํ•˜๊ฒŒ ์ ์šฉํ•˜๊ณ  ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ์ •์ฑ…, ์ง€์นจ ๋“ฑ์—์„œ ๋น„๋ฐ€๋ฒˆํ˜ธ ์ƒ์„ฑ๊ทœ์น™์˜ ๊ธฐ์ค€์„ ์ •ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ผ๋ถ€ ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์—์„œ ๋‚ด๋ถ€ ์ง€์นจ๊ณผ ์ƒ์ดํ•œ ๋น„๋ฐ€๋ฒˆํ˜ธ๋ฅผ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋น„๋ฐ€๋ฒˆํ˜ธ ๊ด€๋ จ ๋‚ด๋ถ€ ๊ทœ์ •์—๋Š” ๋น„๋ฐ€๋ฒˆํ˜ธ๋ฅผ ์ดˆ๊ธฐํ™” ์‹œ ์ž„์‹œ ๋น„๋ฐ€๋ฒˆํ˜ธ๋ฅผ ๋ถ€์—ฌ๋ฐ›๊ณ  ๊ฐ•์ œ์ ์œผ๋กœ ๋ณ€๊ฒฝํ•˜๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜, ์‹ค์ œ๋กœ๋Š” ์ž„์‹œ ๋น„๋ฐ€๋ฒˆํ˜ธ๋ฅผ ๊ทธ๋Œ€๋กœ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋น„๋ฐ€๋ฒˆํ˜ธ ๊ด€๋ จ ๋‚ด๋ถ€ ๊ทœ์ •์—๋Š” ์‚ฌ์šฉ์ž ๋ฐ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž์˜ ๋น„๋ฐ€๋ฒˆํ˜ธ ๋ณ€๊ฒฝ์ฃผ๊ธฐ๋ฅผ ์ •ํ•˜๊ณ  ์ดํ–‰ํ•˜๋„๋ก ํ•˜๊ณ  ์žˆ์Œ์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ  ๋ณ€๊ฒฝํ•˜์ง€ ์•Š๊ณ  ๊ทธ๋Œ€๋กœ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ5์กฐ(์ ‘๊ทผ ๊ถŒํ•œ์˜ ๊ด€๋ฆฌ)"]}],"description": "๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ, ์™ธ๋ถ€ ์œ„ํ˜‘์š”์ธ ๋“ฑ์„ ๊ณ ๋ คํ•˜์—ฌ ์ •๋ณด์‹œ์Šคํ…œ ์‚ฌ์šฉ์ž ๋ฐ ๊ณ ๊ฐ, ํšŒ์› ๋“ฑ ์ •๋ณด์ฃผ์ฒด(์ด์šฉ์ž)๊ฐ€ ์‚ฌ์šฉํ•˜๋Š” ๋น„๋ฐ€๋ฒˆํ˜ธ ๊ด€๋ฆฌ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 13,"manual": 0}},"2.5.5": {"name": "ํŠน์ˆ˜ ๊ณ„์ • ๋ฐ ๊ถŒํ•œ ๊ด€๋ฆฌ","checks": {"iam_avoid_root_usage": null,"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_support_role_created": null,"rds_cluster_default_admin": "FAIL","rds_instance_default_admin": "FAIL","ec2_instance_profile_attached": "PASS","iam_root_hardware_mfa_enabled": null,"organizations_delegated_administrators": null,"cloudwatch_log_metric_filter_root_usage": null,"sagemaker_notebook_instance_root_access_disabled": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.5.5 ํŠน์ˆ˜ ๊ณ„์ • ๋ฐ ๊ถŒํ•œ ๊ด€๋ฆฌ","Subdomain": "2.5. ์ธ์ฆ ๋ฐ ๊ถŒํ•œ๊ด€๋ฆฌ","AuditEvidence": ["ํŠน์ˆ˜๊ถŒํ•œ ๊ด€๋ จ ์ง€์นจ","ํŠน์ˆ˜๊ถŒํ•œ ์‹ ์ฒญยท์Šน์ธ ๋‚ด์—ญ","ํŠน์ˆ˜๊ถŒํ•œ์ž ๋ชฉ๋ก","ํŠน์ˆ˜๊ถŒํ•œ ๊ฒ€ํ†  ๋‚ด์šฉ"],"AuditChecklist": ["๊ด€๋ฆฌ์ž ๊ถŒํ•œ ๋“ฑ ํŠน์ˆ˜๊ถŒํ•œ์€ ์ตœ์†Œํ•œ์˜ ์ธ์›์—๊ฒŒ๋งŒ ๋ถ€์—ฌ๋  ์ˆ˜ ์žˆ๋„๋ก ๊ณต์‹์ ์ธ ๊ถŒํ•œ ์‹ ์ฒญ ๋ฐ ์Šน์ธ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","ํŠน์ˆ˜ ๋ชฉ์ ์„ ์œ„ํ•˜์—ฌ ๋ถ€์—ฌํ•œ ๊ณ„์ • ๋ฐ ๊ถŒํ•œ์„ ์‹๋ณ„ํ•˜๊ณ  ๋ณ„๋„ ๋ชฉ๋ก์œผ๋กœ ๊ด€๋ฆฌํ•˜๋Š” ๋“ฑ ํ†ต์ œ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์˜ ๊ด€๋ฆฌ์ž ๋ฐ ํŠน์ˆ˜๊ถŒํ•œ ๋ถ€์—ฌ ๋“ฑ์˜ ์Šน์ธ ์ด๋ ฅ์ด ์‹œ์Šคํ…œ์ด๋‚˜ ๋ฌธ์„œ์ƒ์œผ๋กœ ํ™•์ธ์ด ๋˜์ง€ ์•Š๊ฑฐ๋‚˜, ์Šน์ธ ์ด๋ ฅ๊ณผ ํŠน์ˆ˜๊ถŒํ•œ ๋‚ด์—ญ์ด ์„œ๋กœ ์ผ์น˜๋˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€ ๊ทœ์ •์—๋Š” ๊ฐœ์ธ์ •๋ณด ๊ด€๋ฆฌ์ž ๋ฐ ํŠน์ˆ˜๊ถŒํ•œ ๋ณด์œ ์ž๋ฅผ ๋ชฉ๋ก์œผ๋กœ ์ž‘์„ฑยท๊ด€๋ฆฌํ•˜๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜ ์ด๋ฅผ ์ž‘์„ฑยท๊ด€๋ฆฌํ•˜๊ณ  ์žˆ์ง€ ์•Š๊ฑฐ๋‚˜, ๋ณด์•ˆ์‹œ์Šคํ…œ ๊ด€๋ฆฌ์ž ๋“ฑ ์ผ๋ถ€ ํŠน์ˆ˜๊ถŒํ•œ์ด ์‹๋ณ„ยท๊ด€๋ฆฌ๋˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์˜ ์œ ์ง€๋ณด์ˆ˜๋ฅผ ์œ„ํ•˜์—ฌ ๋ถ„๊ธฐ 1ํšŒ์— ๋ฐฉ๋ฌธํ•˜๋Š” ์œ ์ง€๋ณด์ˆ˜์šฉ ํŠน์ˆ˜ ๊ณ„์ •์ด ์‚ฌ์šฉ๊ธฐ๊ฐ„ ์ œํ•œ์—†์ด ์ƒ์‹œ๋กœ ํ™œ์„ฑํ™”๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ด€๋ฆฌ์ž ๋ฐ ํŠน์ˆ˜๊ถŒํ•œ์˜ ์‚ฌ์šฉ ์—ฌ๋ถ€๋ฅผ ์ •๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜์ง€ ์•Š์•„ ์ผ๋ถ€ ํŠน์ˆ˜๊ถŒํ•œ์ž์˜ ์—…๋ฌด๊ฐ€ ๋ณ€๊ฒฝ๋˜์—ˆ์Œ์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ  ๊ธฐ์กด ๊ด€๋ฆฌ์ž ๋ฐ ํŠน์ˆ˜๊ถŒํ•œ์„ ๊ณ„์† ๋ณด์œ ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ5์กฐ(์ ‘๊ทผ ๊ถŒํ•œ์˜ ๊ด€๋ฆฌ)"]}],"description": "์ •๋ณด์‹œ์Šคํ…œ ๊ด€๋ฆฌ, ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด ๊ด€๋ฆฌ ๋“ฑ ํŠน์ˆ˜ ๋ชฉ์ ์„ ์œ„ํ•˜์—ฌ ์‚ฌ์šฉํ•˜๋Š” ๊ณ„์ • ๋ฐ ๊ถŒํ•œ์€ ์ตœ์†Œํ•œ์œผ๋กœ ๋ถ€์—ฌํ•˜๊ณ  ๋ณ„๋„๋กœ ์‹๋ณ„ํ•˜์—ฌ ํ†ต์ œํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 2,"pass": 1,"total": 11,"manual": 0}},"2.5.6": {"name": "์ ‘๊ทผ๊ถŒํ•œ ๊ฒ€ํ† ","checks": {"accessanalyzer_enabled": "PASS","cloudtrail_insights_exist": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","accessanalyzer_enabled_without_findings": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"cloudwatch_cross_account_sharing_disabled": null,"cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"cloudtrail_multi_region_enabled_logging_management_events": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.5.6 ์ ‘๊ทผ๊ถŒํ•œ ๊ฒ€ํ† ","Subdomain": "2.5. ์ธ์ฆ ๋ฐ ๊ถŒํ•œ๊ด€๋ฆฌ","AuditEvidence": ["์ ‘๊ทผ๊ถŒํ•œ ๊ฒ€ํ†  ๊ธฐ์ค€ ๋ฐ ์ ˆ์ฐจ","์ ‘๊ทผ๊ถŒํ•œ ๊ฒ€ํ†  ์ด๋ ฅ","์ ‘๊ทผ๊ถŒํ•œ ๊ฒ€ํ†  ๊ฒฐ๊ณผ๋ณด๊ณ ์„œ ๋ฐ ํ›„์†์กฐ์น˜ ๋‚ด์—ญ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ๊ณผ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์— ๋Œ€ํ•œ ์‚ฌ์šฉ์ž ๊ณ„์ • ๋ฐ ์ ‘๊ทผ๊ถŒํ•œ ์ƒ์„ฑยท๋“ฑ๋กยท๋ถ€์—ฌ ๋ฐ ์ด์šฉยท๋ณ€๊ฒฝยท๋ง์†Œ ๋“ฑ์˜ ์ด๋ ฅ์„ ๋‚จ๊ธฐ๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ๊ณผ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์— ๋Œ€ํ•œ ์‚ฌ์šฉ์ž ๊ณ„์ • ๋ฐ ์ ‘๊ทผ๊ถŒํ•œ์˜ ์ ์ •์„ฑ ๊ฒ€ํ†  ๊ธฐ์ค€, ๊ฒ€ํ† ์ฃผ์ฒด, ๊ฒ€ํ† ๋ฐฉ๋ฒ•, ์ฃผ๊ธฐ ๋“ฑ์„ ์ˆ˜๋ฆฝํ•˜์—ฌ ์ •๊ธฐ์  ๊ฒ€ํ† ๋ฅผ ์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ ‘๊ทผ๊ถŒํ•œ ๊ฒ€ํ†  ๊ฒฐ๊ณผ ์ ‘๊ทผ๊ถŒํ•œ ๊ณผ๋‹ค ๋ถ€์—ฌ, ๊ถŒํ•œ๋ถ€์—ฌ ์ ˆ์ฐจ ๋ฏธ์ค€์ˆ˜, ๊ถŒํ•œ ์˜คยท๋‚จ์šฉ ๋“ฑ ๋ฌธ์ œ์ ์ด ๋ฐœ๊ฒฌ๋œ ๊ฒฝ์šฐ ๊ทธ์— ๋”ฐ๋ฅธ ์กฐ์น˜์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ ‘๊ทผ๊ถŒํ•œ ๊ฒ€ํ† ์™€ ๊ด€๋ จ๋œ ๋ฐฉ๋ฒ•, ์ ๊ฒ€์ฃผ๊ธฐ, ๋ณด๊ณ ์ฒด๊ณ„, ์˜คยท๋‚จ์šฉ ๊ธฐ์ค€ ๋“ฑ์ด ๊ด€๋ จ ์ง€์นจ์— ๊ตฌ์ฒด์ ์œผ๋กœ ์ •์˜๋˜์–ด ์žˆ์ง€ ์•Š์•„ ์ ‘๊ทผ๊ถŒํ•œ ๊ฒ€ํ† ๊ฐ€ ์ •๊ธฐ์ ์œผ๋กœ ์ˆ˜ํ–‰๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€ ์ •์ฑ…, ์ง€์นจ ๋“ฑ์— ์žฅ๊ธฐ ๋ฏธ์‚ฌ์šฉ์ž ๊ณ„์ •์— ๋Œ€ํ•œ ์ž ๊ธˆ(๋น„ํ™œ์„ฑํ™”) ๋˜๋Š” ์‚ญ์ œ ์กฐ์น˜ํ•˜๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜, 6๊ฐœ์›” ์ด์ƒ ๋ฏธ์ ‘์†ํ•œ ์‚ฌ์šฉ์ž์˜ ๊ณ„์ •์ด ํ™œ์„ฑํ™”๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ(์ ‘๊ทผ๊ถŒํ•œ ๊ฒ€ํ† ๊ฐ€ ์ถฉ์‹คํžˆ ์ˆ˜ํ–‰๋˜์ง€ ์•Š์•„ ํ•ด๋‹น ๊ณ„์ •์ด ์‹๋ณ„๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ)","์‚ฌ๋ก€ 3 : ์ ‘๊ทผ๊ถŒํ•œ ๊ฒ€ํ†  ์‹œ ์ ‘๊ทผ๊ถŒํ•œ์˜ ๊ณผ๋‹ค ๋ถ€์—ฌ ๋ฐ ์˜คยท๋‚จ์šฉ ์˜์‹ฌ์‚ฌ๋ก€๊ฐ€ ๋ฐœ๊ฒฌ๋˜์—ˆ์œผ๋‚˜, ์ด์— ๋Œ€ํ•œ ์ƒ์„ธ์กฐ์‚ฌ, ๋‚ด๋ถ€๋ณด๊ณ  ๋“ฑ์˜ ํ›„์†์กฐ์น˜๊ฐ€ ์ˆ˜ํ–‰๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ5์กฐ(์ ‘๊ทผ ๊ถŒํ•œ์˜ ๊ด€๋ฆฌ)"]}],"description": "์ •๋ณด์‹œ์Šคํ…œ๊ณผ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์— ์ ‘๊ทผํ•˜๋Š” ์‚ฌ์šฉ์ž ๊ณ„์ •์˜ ๋“ฑ๋กยท์ด์šฉยท์‚ญ์ œ ๋ฐ ์ ‘๊ทผ๊ถŒํ•œ์˜ ๋ถ€์—ฌยท๋ณ€๊ฒฝยท์‚ญ์ œ ์ด๋ ฅ์„ ๋‚จ๊ธฐ๊ณ  ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜์—ฌ ์ ์ •์„ฑ ์—ฌ๋ถ€๋ฅผ ์ ๊ฒ€ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 2,"pass": 1,"total": 14,"manual": 0}},"2.6.1": {"name": "๋„คํŠธ์›Œํฌ ์ ‘๊ทผ","checks": {"ec2_ami_public": null,"elb_internet_facing": "FAIL","ec2_elastic_ip_shodan": null,"elbv2_internet_facing": "PASS","ec2_instance_public_ip": "FAIL","ec2_ebs_public_snapshot": "PASS","kafka_cluster_is_public": null,"s3_bucket_acl_prohibited": "FAIL","apigateway_restapi_public": "FAIL","lightsail_database_public": null,"lightsail_instance_public": null,"ec2_securitygroup_not_used": "FAIL","elbv2_listeners_underneath": "PASS","networkfirewall_in_all_vpc": "FAIL","s3_bucket_public_write_acl": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","ssm_documents_set_as_public": "PASS","awslambda_function_url_public": null,"dms_instance_no_public_access": null,"rds_instance_no_public_access": "PASS","emr_cluster_publicly_accesible": null,"redshift_cluster_public_access": null,"neptune_cluster_public_snapshot": null,"eks_cluster_private_nodes_enabled": null,"awslambda_function_url_cors_policy": null,"documentdb_cluster_public_snapshot": null,"eks_cluster_network_policy_enabled": null,"neptune_cluster_uses_public_subnet": null,"sns_topics_not_publicly_accessible": "PASS","sqs_queues_not_publicly_accessible": "PASS","vpc_subnet_no_public_ip_by_default": "FAIL","vpc_subnet_separate_private_public": "FAIL","eks_cluster_not_publicly_accessible": null,"glacier_vaults_policy_public_access": null,"iam_user_mfa_enabled_console_access": null,"s3_access_point_public_access_block": "PASS","s3_bucket_level_public_access_block": "PASS","iam_user_administrator_access_policy": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"iam_group_administrator_access_policy": null,"s3_account_level_public_access_blocks": null,"apigateway_restapi_authorizers_enabled": "PASS","elasticache_cluster_uses_public_subnet": "PASS","rds_instance_iam_authentication_enabled": "FAIL","appstream_fleet_maximum_session_duration": null,"ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ecr_repositories_not_publicly_accessible": "PASS","emr_cluster_account_public_block_enabled": "PASS","sagemaker_models_vpc_settings_configured": null,"apigateway_restapi_public_with_authorizer": "FAIL","ec2_instance_port_ftp_exposed_to_internet": "PASS","ec2_instance_port_rdp_exposed_to_internet": "PASS","ec2_instance_port_ssh_exposed_to_internet": "PASS","vpc_endpoint_connections_trust_boundaries": "FAIL","appstream_fleet_session_disconnect_timeout": null,"awslambda_function_not_publicly_accessible": "PASS","ec2_instance_port_cifs_exposed_to_internet": "PASS","ec2_instance_port_ldap_exposed_to_internet": "PASS","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","kafka_cluster_unrestricted_access_disabled": null,"sagemaker_models_network_isolation_enabled": null,"cognito_identity_pool_guest_access_disabled": "FAIL","ec2_instance_port_kafka_exposed_to_internet": "PASS","ec2_instance_port_mysql_exposed_to_internet": "PASS","ec2_instance_port_redis_exposed_to_internet": "PASS","workspaces_vpc_2private_1public_subnets_nat": null,"ec2_instance_port_oracle_exposed_to_internet": "PASS","ec2_instance_port_telnet_exposed_to_internet": "PASS","ec2_instance_port_mongodb_exposed_to_internet": "PASS","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","ec2_instance_port_kerberos_exposed_to_internet": "PASS","ec2_transitgateway_auto_accept_vpc_attachments": null,"appstream_fleet_session_idle_disconnect_timeout": null,"ec2_instance_port_cassandra_exposed_to_internet": "PASS","ec2_instance_port_memcached_exposed_to_internet": "PASS","ec2_instance_port_sqlserver_exposed_to_internet": "PASS","rds_instance_event_subscription_security_groups": "FAIL","sagemaker_training_jobs_vpc_settings_configured": null,"vpc_peering_routing_tables_with_least_privilege": "PASS","appstream_fleet_default_internet_access_disabled": null,"ec2_instance_port_postgresql_exposed_to_internet": "PASS","ec2_securitygroup_with_many_ingress_egress_rules": "PASS","cloudfront_distributions_geo_restrictions_enabled": null,"sagemaker_training_jobs_network_isolation_enabled": null,"opensearch_service_domains_not_publicly_accessible": null,"sagemaker_notebook_instance_vpc_settings_configured": null,"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_any_port": "PASS","vpc_endpoint_services_allowed_principals_trust_boundaries": null,"ec2_instance_port_elasticsearch_kibana_exposed_to_internet": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","cognito_user_pool_blocks_potential_malicious_sign_in_attempts": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.6.1 ๋„คํŠธ์›Œํฌ ์ ‘๊ทผ","Subdomain": "2.6. ์ ‘๊ทผํ†ต์ œ","AuditEvidence": ["๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ๋„","IP ๊ด€๋ฆฌ๋Œ€์žฅ","์ •๋ณด์ž์‚ฐ ๋ชฉ๋ก","๋ฐฉํ™”๋ฒฝ๋ฃฐ"],"AuditChecklist": ["์กฐ์ง์˜ ๋„คํŠธ์›Œํฌ์— ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋Š” ๋ชจ๋“  ๊ฒฝ๋กœ๋ฅผ ์‹๋ณ„ํ•˜๊ณ  ์ ‘๊ทผํ†ต์ œ ์ •์ฑ…์— ๋”ฐ๋ผ ๋‚ด๋ถ€ ๋„คํŠธ์›Œํฌ๋Š” ์ธ๊ฐ€๋œ ์‚ฌ์šฉ์ž๋งŒ์ด ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋„๋ก ํ†ต์ œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์„œ๋น„์Šค, ์‚ฌ์šฉ์ž ๊ทธ๋ฃน, ์ •๋ณด์ž์‚ฐ์˜ ์ค‘์š”๋„, ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์— ๋”ฐ๋ผ ๋„คํŠธ์›Œํฌ ์˜์—ญ์„ ๋ฌผ๋ฆฌ์  ๋˜๋Š” ๋…ผ๋ฆฌ์ ์œผ๋กœ ๋ถ„๋ฆฌํ•˜๊ณ  ๊ฐ ์˜์—ญ ๊ฐ„ ์ ‘๊ทผํ†ต์ œ๋ฅผ ์ ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋„คํŠธ์›Œํฌ ๋Œ€์—ญ๋ณ„ IP์ฃผ์†Œ ๋ถ€์—ฌ ๊ธฐ์ค€์„ ๋งˆ๋ จํ•˜๊ณ  ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์„œ๋ฒ„ ๋“ฑ ์™ธ๋ถ€ ์—ฐ๊ฒฐ์ด ํ•„์š”ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ ์‚ฌ์„ค IP๋กœ ํ• ๋‹นํ•˜๋Š” ๋“ฑ์˜ ๋Œ€์ฑ…์„ ์ ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ฌผ๋ฆฌ์ ์œผ๋กœ ๋–จ์–ด์ง„ IDC, ์ง€์‚ฌ, ๋Œ€๋ฆฌ์  ๋“ฑ๊ณผ์˜ ๋„คํŠธ์›Œํฌ ์—ฐ๊ฒฐ ์‹œ ์ „์†ก๊ตฌ๊ฐ„ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ๋„์™€ ์ธํ„ฐ๋ทฐ๋ฅผ ํ†ตํ•˜์—ฌ ํ™•์ธํ•œ ๊ฒฐ๊ณผ, ์™ธ๋ถ€ ์ง€์ ์—์„œ ์‚ฌ์šฉํ•˜๋Š” ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์‹œ์Šคํ…œ๊ณผ IDC์— ์œ„์น˜ํ•œ ์„œ๋ฒ„ ๊ฐ„ ์—ฐ๊ฒฐ ์‹œ ์ผ๋ฐ˜ ์ธํ„ฐ๋„ท ํšŒ์„ ์„ ํ†ตํ•˜์—ฌ ๋ฐ์ดํ„ฐ ์†ก์ˆ˜์‹ ์„ ์ฒ˜๋ฆฌํ•˜๊ณ  ์žˆ์–ด ๋‚ด๋ถ€ ๊ทœ์ •์— ๋ช…์‹œ๋œ VPN์ด๋‚˜ ์ „์šฉ๋ง ๋“ฑ์„ ์ด์šฉํ•œ ํ†ต์‹ ์ด ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€๋ง์— ์œ„์น˜ํ•œ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์„œ๋ฒ„ ๋“ฑ ์ผ๋ถ€ ์ค‘์š” ์„œ๋ฒ„์˜ IP์ฃผ์†Œ๊ฐ€ ๋‚ด๋ถ€ ๊ทœ์ •๊ณผ ๋‹ฌ๋ฆฌ ๊ณต์ธ IP๋กœ ์„ค์ •๋˜์–ด ์žˆ๊ณ , ๋„คํŠธ์›Œํฌ ์ ‘๊ทผ ์ฐจ๋‹จ์ด ์ ์šฉ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์„œ๋ฒ„ํŒœ์ด ๊ตฌ์„ฑ๋˜์–ด ์žˆ์œผ๋‚˜, ๋„คํŠธ์›Œํฌ ์ ‘๊ทผ์ œ์–ด ์„ค์ • ๋ฏธํก์œผ๋กœ ๋‚ด๋ถ€๋ง์—์„œ ์„œ๋ฒ„ํŒœ์œผ๋กœ์˜ ์ ‘๊ทผ์ด ๊ณผ๋„ํ•˜๊ฒŒ ํ—ˆ์šฉ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์™ธ๋ถ€์ž(์™ธ๋ถ€ ๊ฐœ๋ฐœ์ž, ๋ฐฉ๋ฌธ์ž ๋“ฑ)์—๊ฒŒ ์ œ๊ณต๋˜๋Š” ๋„คํŠธ์›Œํฌ๋ฅผ ๋ณ„๋„์˜ ํ†ต์ œ ์—†์ด ๋‚ด๋ถ€ ์—…๋ฌด ๋„คํŠธ์›Œํฌ์™€ ๋ถ„๋ฆฌํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ๋‚ด๋ถ€ ๊ทœ์ •๊ณผ๋Š” ๋‹ฌ๋ฆฌ MAC์ฃผ์†Œ ์ธ์ฆ, ํ•„์ˆ˜ ๋ณด์•ˆ ์†Œํ”„ํŠธ์›จ์–ด ์„ค์น˜ ๋“ฑ์˜ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ ์šฉํ•˜์ง€ ์•Š์€ ์ƒํƒœ๋กœ ๋„คํŠธ์›Œํฌ ์ผ€์ด๋ธ” ์—ฐ๊ฒฐ๋งŒ์œผ๋กœ ์‚ฌ๋‚ด ๋„คํŠธ์›Œํฌ์— ์ ‘๊ทผ ๋ฐ ์ด์šฉํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ)"]}],"description": "๋„คํŠธ์›Œํฌ์— ๋Œ€ํ•œ ๋น„์ธ๊ฐ€ ์ ‘๊ทผ์„ ํ†ต์ œํ•˜๊ธฐ ์œ„ํ•˜์—ฌ IP๊ด€๋ฆฌ, ๋‹จ๋ง์ธ์ฆ ๋“ฑ ๊ด€๋ฆฌ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝ ๋ฐ์ดํ–‰ํ•˜๊ณ , ์—…๋ฌด๋ชฉ์  ๋ฐ ์ค‘์š”๋„์— ๋”ฐ๋ผ ๋„คํŠธ์›Œํฌ ๋ถ„๋ฆฌ(DMZ, ์„œ๋ฒ„ํŒœ, DB์กด, ๊ฐœ๋ฐœ์กด ๋“ฑ)์™€ ์ ‘๊ทผํ†ต์ œ๋ฅผ ์ ์šฉํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 17,"pass": 54,"total": 112,"manual": 0}},"2.6.2": {"name": "์ •๋ณด์‹œ์Šคํ…œ ์ ‘๊ทผ","checks": {"ec2_elastic_ip_shodan": null,"ec2_instance_public_ip": "FAIL","ec2_elastic_ip_unassigned": "FAIL","lightsail_instance_public": null,"lightsail_static_ip_unused": null,"ec2_instance_managed_by_ssm": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_instance_port_ftp_exposed_to_internet": "PASS","ec2_instance_port_rdp_exposed_to_internet": "PASS","ec2_instance_port_ssh_exposed_to_internet": "PASS","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_instance_port_telnet_exposed_to_internet": "PASS","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","ec2_securitygroup_with_many_ingress_egress_rules": "PASS","ec2_instance_internet_facing_with_instance_profile": "FAIL","ec2_securitygroup_allow_ingress_from_internet_to_any_port": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.6.2 ์ •๋ณด์‹œ์Šคํ…œ ์ ‘๊ทผ","Subdomain": "2.6. ์ ‘๊ทผํ†ต์ œ","AuditEvidence": ["์ •๋ณด์‹œ์Šคํ…œ ์šด์˜์ฒด์ œ ๊ณ„์ • ๋ชฉ๋ก","์„œ๋ฒ„ ๋ณด์•ˆ ์„ค์ •","์„œ๋ฒ„์ ‘๊ทผ์ œ์–ด ์ •์ฑ…(SecureOS ๊ด€๋ฆฌํ™”๋ฉด ๋“ฑ)","์„œ๋ฒ„ ๋ฐ ๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ๋„","์ •๋ณด์ž์‚ฐ ๋ชฉ๋ก"],"AuditChecklist": ["์„œ๋ฒ„, ๋„คํŠธ์›Œํฌ์‹œ์Šคํ…œ, ๋ณด์•ˆ์‹œ์Šคํ…œ ๋“ฑ ์ •๋ณด์‹œ์Šคํ…œ๋ณ„ ์šด์˜์ฒด์ œ(OS)์— ์ ‘๊ทผ์ด ํ—ˆ์šฉ๋˜๋Š” ์‚ฌ์šฉ์ž, ์ ‘๊ทผ ๊ฐ€๋Šฅ ์œ„์น˜, ์ ‘๊ทผ ์ˆ˜๋‹จ ๋“ฑ์„ ์ •์˜ํ•˜์—ฌ ํ†ต์ œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ์— ์ ‘์† ํ›„ ์ผ์ •์‹œ๊ฐ„ ์—…๋ฌด์ฒ˜๋ฆฌ๋ฅผ ํ•˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ ์ž๋™์œผ๋กœ ์‹œ์Šคํ…œ ์ ‘์†์ด ์ฐจ๋‹จ๋˜๋„๋ก ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ์˜ ์‚ฌ์šฉ๋ชฉ์ ๊ณผ ๊ด€๊ณ„ ์—†๋Š” ์„œ๋น„์Šค๋ฅผ ์ œ๊ฑฐํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ฃผ์š” ์„œ๋น„์Šค๋ฅผ ์ œ๊ณตํ•˜๋Š” ์ •๋ณด์‹œ์Šคํ…œ์€ ๋…๋ฆฝ๋œ ์„œ๋ฒ„๋กœ ์šด์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์‚ฌ๋ฌด์‹ค์—์„œ ์„œ๋ฒ„๊ด€๋ฆฌ์ž๊ฐ€ IDC์— ์œ„์น˜ํ•œ ์œˆ๋„์šฐ ์„œ๋ฒ„์— ์ ‘๊ทผ ์‹œ ํ„ฐ๋ฏธ๋„ ์„œ๋น„์Šค๋ฅผ ์ด์šฉํ•˜์—ฌ ์ ‘๊ทผํ•˜๊ณ  ์žˆ์œผ๋‚˜, ํ„ฐ๋ฏธ๋„ ์„œ๋น„์Šค์— ๋Œ€ํ•œ ์„ธ์…˜ ํƒ€์ž„์•„์›ƒ ์„ค์ •์ด ๋˜์–ด ์žˆ์ง€ ์•Š์•„ ์žฅ์‹œ๊ฐ„ ์•„๋ฌด๋Ÿฐ ์ž‘์—…์„ ํ•˜์ง€ ์•Š์•„๋„ ํ•ด๋‹น ์„ธ์…˜์ด ์ฐจ๋‹จ๋˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์„œ๋ฒ„ ๊ฐ„ ์ ‘์†์ด ์ ์ ˆํžˆ ์ œํ•œ๋˜์ง€ ์•Š์•„ ํŠน์ • ์‚ฌ์šฉ์ž๊ฐ€ ๋ณธ์ธ์—๊ฒŒ ์ธ๊ฐ€๋œ ์„œ๋ฒ„์— ์ ‘์†ํ•œ ํ›„ ํ•ด๋‹น ์„œ๋ฒ„๋ฅผ ๊ฒฝ์œ ํ•˜์—ฌ ๋‹ค๋ฅธ ์ธ๊ฐ€๋ฐ›์ง€ ์•Š์€ ์„œ๋ฒ„์—๋„ ์ ‘์†ํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ํƒ€๋‹นํ•œ ์‚ฌ์œ  ๋˜๋Š” ๋ณด์™„ ๋Œ€์ฑ… ์—†์ด ์•ˆ์ „ํ•˜์ง€ ์•Š์€ ์ ‘์† ํ”„๋กœํ† ์ฝœ(telnet, ftp ๋“ฑ)์„ ์‚ฌ์šฉํ•˜์—ฌ ์ ‘๊ทผํ•˜๊ณ  ์žˆ์œผ๋ฉฐ, ๋ถˆํ•„์š”ํ•œ ์„œ๋น„์Šค ๋ฐ ํฌํŠธ๋ฅผ ์˜คํ”ˆํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๋ชจ๋“  ์„œ๋ฒ„๋กœ์˜ ์ ‘๊ทผ์€ ์„œ๋ฒ„์ ‘๊ทผ์ œ์–ด ์‹œ์Šคํ…œ์„ ํ†ตํ•˜๋„๋ก ์ ‘๊ทผํ†ต์ œ ์ •์ฑ…์„ ๊ฐ€์ ธ๊ฐ€๊ณ  ์žˆ์œผ๋‚˜, ์„œ๋ฒ„์ ‘๊ทผ์ œ์–ด ์‹œ์Šคํ…œ์„ ํ†ตํ•˜์ง€ ์•Š๊ณ  ์„œ๋ฒ„์— ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋Š” ์šฐํšŒ ๊ฒฝ๋กœ๊ฐ€ ์กด์žฌํ•˜๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ)"]}],"description": "์„œ๋ฒ„, ๋„คํŠธ์›Œํฌ์‹œ์Šคํ…œ ๋“ฑ ์ •๋ณด์‹œ์Šคํ…œ์— ์ ‘๊ทผ์„ ํ—ˆ์šฉํ•˜๋Š” ์‚ฌ์šฉ์ž, ์ ‘๊ทผ์ œํ•œ ๋ฐฉ์‹, ์•ˆ์ „ํ•œ ์ ‘๊ทผ์ˆ˜๋‹จ ๋“ฑ์„ ์ •์˜ํ•˜์—ฌ ํ†ต์ œํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 8,"pass": 13,"total": 24,"manual": 0}},"2.6.3": {"name": "์‘์šฉํ”„๋กœ๊ทธ๋žจ ์ ‘๊ทผ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.6.3 ์‘์šฉํ”„๋กœ๊ทธ๋žจ ์ ‘๊ทผ","Subdomain": "2.6. ์ ‘๊ทผํ†ต์ œ","AuditEvidence": ["์‘์šฉํ”„๋กœ๊ทธ๋žจ ์ ‘๊ทผ๊ถŒํ•œ ๋ถ„๋ฅ˜ ์ฒด๊ณ„","์‘์šฉํ”„๋กœ๊ทธ๋žจ ๊ณ„์ •ยท๊ถŒํ•œ ๊ด€๋ฆฌ ํ™”๋ฉด","์‘์šฉํ”„๋กœ๊ทธ๋žจ ์‚ฌ์šฉ์žยท๊ด€๋ฆฌ์ž ํ™”๋ฉด(๊ฐœ์ธ์ •๋ณด ์กฐํšŒ ๋“ฑ)","์‘์šฉํ”„๋กœ๊ทธ๋žจ ์„ธ์…˜ ํƒ€์ž„ ๋ฐ ๋™์‹œ์ ‘์† ํ—ˆ์šฉ ์—ฌ๋ถ€ ๋‚ด์—ญ","์‘์šฉํ”„๋กœ๊ทธ๋žจ ๊ด€๋ฆฌ์ž ์ ‘์†๋กœ๊ทธ ๋ชจ๋‹ˆํ„ฐ๋ง ๋‚ด์—ญ","์ •๋ณด์ž์‚ฐ ๋ชฉ๋ก","๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์˜ ๊ฐœ์ธ์ •๋ณด ์กฐํšŒ, ๊ฒ€์ƒ‰ ํ™”๋ฉด","๊ฐœ์ธ์ •๋ณด ๋งˆ์Šคํ‚น ํ‘œ์ค€","๊ฐœ์ธ์ •๋ณด ๋งˆ์Šคํ‚น ์ ์šฉ ํ™”๋ฉด"],"AuditChecklist": ["์ค‘์š”์ •๋ณด ์ ‘๊ทผ์„ ํ†ต์ œํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์‚ฌ์šฉ์ž์˜ ์—…๋ฌด์— ๋”ฐ๋ผ ์‘์šฉํ”„๋กœ๊ทธ๋žจ ์ ‘๊ทผ๊ถŒํ•œ์„ ์ฐจ๋“ฑ ๋ถ€์—ฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ผ์ •์‹œ๊ฐ„ ๋™์•ˆ ์ž…๋ ฅ์ด ์—†๋Š” ์„ธ์…˜์€ ์ž๋™ ์ฐจ๋‹จํ•˜๊ณ , ๋™์ผ ์‚ฌ์šฉ์ž์˜ ๋™์‹œ ์„ธ์…˜ ์ˆ˜๋ฅผ ์ œํ•œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ด€๋ฆฌ์ž ์ „์šฉ ์‘์šฉํ”„๋กœ๊ทธ๋žจ(๊ด€๋ฆฌ์ž ์›นํŽ˜์ด์ง€, ๊ด€๋ฆฌ์ฝ˜์†” ๋“ฑ)์€ ๋น„์ธ๊ฐ€์ž๊ฐ€ ์ ‘๊ทผํ•  ์ˆ˜ ์—†๋„๋ก ์ ‘๊ทผ์„ ํ†ต์ œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์˜ ํ‘œ์‹œ์ œํ•œ ๋ณดํ˜ธ์กฐ์น˜์˜ ์ผ๊ด€์„ฑ์„ ํ™•๋ณดํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ด€๋ จ ๊ธฐ์ค€์„ ์ˆ˜๋ฆฝํ•˜์—ฌ ์ ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์˜ ๋ถˆํ•„์š”ํ•œ ๋…ธ์ถœ(์กฐํšŒ, ํ™”๋ฉดํ‘œ์‹œ, ์ธ์‡„, ๋‹ค์šด๋กœ๋“œ ๋“ฑ)์„ ์ตœ์†Œํ™”ํ•  ์ˆ˜ ์žˆ๋„๋ก ์‘์šฉํ”„๋กœ๊ทธ๋žจ์„ ๊ตฌํ˜„ํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์‘์šฉํ”„๋กœ๊ทธ๋žจ์˜ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌํ™”๋ฉด ์ค‘ ์ผ๋ถ€ ํ™”๋ฉด์˜ ๊ถŒํ•œ ์ œ์–ด ๊ธฐ๋Šฅ์— ์˜ค๋ฅ˜๊ฐ€ ์กด์žฌํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด ์—ด๋žŒ ๊ถŒํ•œ์ด ์—†๋Š” ์‚ฌ์šฉ์ž์—๊ฒŒ๋„ ๊ฐœ์ธ์ •๋ณด๊ฐ€ ๋…ธ์ถœ๋˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์‘์šฉํ”„๋กœ๊ทธ๋žจ์˜ ๊ด€๋ฆฌ์ž ํŽ˜์ด์ง€๊ฐ€ ์™ธ๋ถ€์ธํ„ฐ๋„ท์— ์˜คํ”ˆ๋˜์–ด ์žˆ์œผ๋ฉด์„œ ์•ˆ์ „ํ•œ ์ธ์ฆ์ˆ˜๋‹จ์ด ์ ์šฉ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์‘์šฉํ”„๋กœ๊ทธ๋žจ์— ๋Œ€ํ•˜์—ฌ ํƒ€๋‹นํ•œ ์‚ฌ์œ  ์—†์ด ์„ธ์…˜ ํƒ€์ž„์•„์›ƒ ๋˜๋Š” ๋™์ผ ์‚ฌ์šฉ์ž ๊ณ„์ •์˜ ๋™์‹œ ์ ‘์†์„ ์ œํ•œํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์‘์šฉํ”„๋กœ๊ทธ๋žจ์„ ํ†ตํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด๋ฅผ ๋‹ค์šด๋กœ๋“œ๋ฐ›๋Š” ๊ฒฝ์šฐ ํ•ด๋‹น ํŒŒ์ผ ๋‚ด์— ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ๋“ฑ ์—…๋ฌด์ƒ ๋ถˆํ•„์š”ํ•œ ์ •๋ณด๊ฐ€ ๊ณผ๋„ํ•˜๊ฒŒ ํฌํ•จ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ์‘์šฉํ”„๋กœ๊ทธ๋žจ์˜ ๊ฐœ์ธ์ •๋ณด ์กฐํšŒํ™”๋ฉด์—์„œ like ๊ฒ€์ƒ‰์„ ๊ณผ๋„ํ•˜๊ฒŒ ํ—ˆ์šฉํ•˜๊ณ  ์žˆ์–ด, ๋ชจ๋“  ์‚ฌ์šฉ์ž๊ฐ€ ๋ณธ์ธ์˜ ์—…๋ฌด ๋ฒ”์œ„๋ฅผ ์ดˆ๊ณผํ•˜์—ฌ ์„ฑ์”จ๋งŒ์œผ๋กœ๋„ ์ „์ฒด ๊ณ ๊ฐ ์ •๋ณด๋ฅผ ์กฐํšŒํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 6 : ๊ฐœ์ธ์ •๋ณด ํ‘œ์‹œ์ œํ•œ ์กฐ์น˜ ๊ธฐ์ค€์ด ๋งˆ๋ จ๋˜์–ด ์žˆ์ง€ ์•Š๊ฑฐ๋‚˜ ์ด๋ฅผ ์ค€์ˆ˜ํ•˜์ง€ ์•Š๋Š” ๋“ฑ์˜ ์‚ฌ์œ ๋กœ ๋™์ผํ•œ ๊ฐœ์ธ์ •๋ณด ํ•ญ๋ชฉ์— ๋Œ€ํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ํ™”๋ฉด๋ณ„๋กœ ์„œ๋กœ ๋‹ค๋ฅธ ๋งˆ์Šคํ‚น ๊ธฐ์ค€์ด ์ ์šฉ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 7 : ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์˜ ํ™”๋ฉด์ƒ์—๋Š” ๊ฐœ์ธ์ •๋ณด๊ฐ€ ๋งˆ์Šคํ‚น๋˜์–ด ํ‘œ์‹œ๋˜์–ด ์žˆ์œผ๋‚˜, ์›น๋ธŒ๋ผ์šฐ์ € ์†Œ์Šค๋ณด๊ธฐ๋ฅผ ํ†ตํ•˜์—ฌ ๋งˆ์Šคํ‚น๋˜์ง€ ์•Š์€ ์ „์ฒด ๊ฐœ์ธ์ •๋ณด๊ฐ€ ๋…ธ์ถœ๋˜๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ5์กฐ(์ ‘๊ทผ๊ถŒํ•œ์˜ ๊ด€๋ฆฌ), ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ), ์ œ12์กฐ(์ถœ๋ ฅยท๋ณต์‚ฌ์‹œ ์•ˆ์ „์กฐ์น˜)"]}],"description": "์‚ฌ์šฉ์ž๋ณ„ ์—…๋ฌด ๋ฐ ์ ‘๊ทผ ์ •๋ณด์˜ ์ค‘์š”๋„ ๋“ฑ์— ๋”ฐ๋ผ ์‘์šฉํ”„๋กœ๊ทธ๋žจ ์ ‘๊ทผ๊ถŒํ•œ์„ ์ œํ•œํ•˜๊ณ , ๋ถˆํ•„์š”ํ•œ ์ •๋ณด ๋˜๋Š” ์ค‘์š”์ •๋ณด ๋…ธ์ถœ์„ ์ตœ์†Œํ™”ํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ธฐ์ค€์„ ์ˆ˜๋ฆฝํ•˜์—ฌ ์ ์šฉํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.6.4": {"name": "๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘๊ทผ","checks": {"accessanalyzer_enabled": "PASS","lightsail_database_public": null,"rds_snapshots_public_access": "PASS","dms_instance_no_public_access": null,"rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"neptune_cluster_public_snapshot": null,"rds_instance_transport_encrypted": "FAIL","documentdb_cluster_public_snapshot": null,"neptune_cluster_uses_public_subnet": null,"vpc_subnet_separate_private_public": "FAIL","dynamodb_table_cross_account_access": null,"rds_cluster_iam_authentication_enabled": "FAIL","accessanalyzer_enabled_without_findings": "FAIL","rds_instance_iam_authentication_enabled": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","neptune_cluster_iam_authentication_enabled": null,"ec2_instance_port_mysql_exposed_to_internet": "PASS","ec2_instance_port_redis_exposed_to_internet": "PASS","ec2_instance_port_oracle_exposed_to_internet": "PASS","ec2_instance_port_mongodb_exposed_to_internet": "PASS","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","ec2_instance_port_cassandra_exposed_to_internet": "PASS","ec2_instance_port_sqlserver_exposed_to_internet": "PASS","opensearch_service_domains_not_publicly_accessible": null,"opensearch_service_domains_https_communications_enforced": null,"opensearch_service_domains_internal_user_database_enabled": null,"ec2_instance_port_elasticsearch_kibana_exposed_to_internet": "PASS","opensearch_service_domains_use_cognito_authentication_for_kibana": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.6.4 ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘๊ทผ","Subdomain": "2.6. ์ ‘๊ทผํ†ต์ œ","AuditEvidence": ["๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ํ˜„ํ™ฉ(ํ…Œ์ด๋ธ”, ์ปฌ๋Ÿผ ๋“ฑ)","๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘์†์ž ๊ณ„์ •ยท๊ถŒํ•œ ๋ชฉ๋ก","๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘๊ทผ์ œ์–ด ์ •์ฑ…(๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘๊ทผ์ œ์–ด์‹œ์Šคํ…œ ๊ด€๋ฆฌํ™”๋ฉด ๋“ฑ)","๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ๋„(๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์กด ๋“ฑ)","์ •๋ณด์ž์‚ฐ ๋ชฉ๋ก"],"AuditChecklist": ["๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์˜ ํ…Œ์ด๋ธ” ๋ชฉ๋ก ๋“ฑ ์ €์žฅยท๊ด€๋ฆฌ๋˜๊ณ  ์žˆ๋Š” ์ •๋ณด๋ฅผ ์‹๋ณ„ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ๋‚ด ์ •๋ณด์— ์ ‘๊ทผ์ด ํ•„์š”ํ•œ ์‘์šฉํ”„๋กœ๊ทธ๋žจ, ์ •๋ณด์‹œ์Šคํ…œ(์„œ๋ฒ„) ๋ฐ ์‚ฌ์šฉ์ž๋ฅผ ๋ช…ํ™•ํžˆ ์‹๋ณ„ํ•˜๊ณ  ์ ‘๊ทผํ†ต์ œ ์ •์ฑ…์— ๋”ฐ๋ผ ํ†ต์ œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋Œ€๋Ÿ‰์˜ ๊ฐœ์ธ์ •๋ณด๋ฅผ ๋ณด๊ด€ยท์ฒ˜๋ฆฌํ•˜๊ณ  ์žˆ๋Š” ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค๋ฅผ ์ธํ„ฐ๋„ท์„ ํ†ตํ•˜์—ฌ ์ ‘๊ทผ ๊ฐ€๋Šฅํ•œ ์›น ์‘์šฉํ”„๋กœ๊ทธ๋žจ๊ณผ ๋ถ„๋ฆฌํ•˜์ง€ ์•Š๊ณ  ๋ฌผ๋ฆฌ์ ์œผ๋กœ ๋™์ผํ•œ ์„œ๋ฒ„์—์„œ ์šด์˜ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ๋ฐœ์ž ๋ฐ ์šด์˜์ž๋“ค์ด ์‘์‘ ํ”„๋กœ๊ทธ๋žจ์—์„œ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š” ๊ณ„์ •์„ ๊ณต์œ ํ•˜์—ฌ ์šด์˜ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์— ์ ‘์†ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋‚ด๋ถ€ ๊ทœ์ •์—๋Š” ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์˜ ์ ‘์†๊ถŒํ•œ์„ ์˜ค๋ธŒ์ ํŠธ๋ณ„๋กœ ์ œํ•œํ•˜๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜, ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘๊ทผ๊ถŒํ•œ์„ ์šด์˜์ž์—๊ฒŒ ์ผ๊ด„ ๋ถ€์—ฌํ•˜๊ณ  ์žˆ์–ด ๊ฐœ์ธ์ •๋ณด ํ…Œ์ด๋ธ”์— ์ ‘๊ทผํ•  ํ•„์š”๊ฐ€ ์—†๋Š” ์šด์˜์ž์—๊ฒŒ๋„ ๊ณผ๋„ํ•˜๊ฒŒ ์ ‘๊ทผ ๊ถŒํ•œ์ด ๋ถ€์—ฌ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘๊ทผ์ œ์–ด ์†”๋ฃจ์…˜์„ ๋„์ž…ํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘์†์ž์— ๋Œ€ํ•œ IP์ฃผ์†Œ ๋“ฑ์ด ์ ์ ˆํžˆ ์ œํ•œ๋˜์–ด ์žˆ์ง€ ์•Š์•„ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘๊ทผ์ œ์–ด ์†”๋ฃจ์…˜์„ ์šฐํšŒํ•˜์—ฌ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์— ์ ‘์†ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ €์žฅํ•˜๊ณ  ์žˆ๋Š” ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์˜ ํ…Œ์ด๋ธ” ํ˜„ํ™ฉ์ด ํŒŒ์•…๋˜์ง€ ์•Š์•„, ์ž„์‹œ๋กœ ์ƒ์„ฑ๋œ ํ…Œ์ด๋ธ”์— ๋ถˆํ•„์š”ํ•œ ๊ฐœ์ธ์ •๋ณด๊ฐ€ ํŒŒ๊ธฐ๋˜์ง€ ์•Š๊ณ  ๋Œ€๋Ÿ‰์œผ๋กœ ์ €์žฅ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ5์กฐ(์ ‘๊ทผ๊ถŒํ•œ์˜ ๊ด€๋ฆฌ), ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ)"]}],"description": "ํ…Œ์ด๋ธ” ๋ชฉ๋ก ๋“ฑ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ๋‚ด์—์„œ ์ €์žฅยท๊ด€๋ฆฌ๋˜๊ณ  ์žˆ๋Š” ์ •๋ณด๋ฅผ ์‹๋ณ„ํ•˜๊ณ , ์ •๋ณด์˜ ์ค‘์š”๋„์™€ ์‘์šฉํ”„๋กœ๊ทธ๋žจ ๋ฐ ์‚ฌ์šฉ์ž ์œ ํ˜• ๋“ฑ์— ๋”ฐ๋ฅธ ์ ‘๊ทผํ†ต์ œ ์ •์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 6,"pass": 19,"total": 37,"manual": 0}},"2.6.5": {"name": "๋ฌด์„  ๋„คํŠธ์›Œํฌ ์ ‘๊ทผ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.6.5 ๋ฌด์„  ๋„คํŠธ์›Œํฌ ์ ‘๊ทผ","Subdomain": "2.6. ์ ‘๊ทผํ†ต์ œ","AuditEvidence": ["๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ๋„","AP ๋ณด์•ˆ ์„ค์ • ๋‚ด์—ญ","๋น„์ธ๊ฐ€ ๋ฌด์„  ๋„คํŠธ์›Œํฌ ์ ๊ฒ€ ์ด๋ ฅ","๋ฌด์„ ๋„คํŠธ์›Œํฌ ์‚ฌ์šฉ ์‹ ์ฒญยท์Šน์ธ ์ด๋ ฅ"],"AuditChecklist": ["๋ฌด์„ ๋„คํŠธ์›Œํฌ๋ฅผ ์—…๋ฌด์ ์œผ๋กœ ์‚ฌ์šฉํ•˜๋Š” ๊ฒฝ์šฐ ๋ฌด์„  AP ๋ฐ ๋„คํŠธ์›Œํฌ ๊ตฌ๊ฐ„ ๋ณด์•ˆ์„ ์œ„ํ•˜์—ฌ ์ธ์ฆ, ์†ก์ˆ˜์‹  ๋ฐ์ดํ„ฐ ์•”ํ˜ธํ™” ๋“ฑ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ธ๊ฐ€๋œ ์ž„์ง์›๋งŒ์ด ๋ฌด์„ ๋„คํŠธ์›Œํฌ๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋„๋ก ์‚ฌ์šฉ ์‹ ์ฒญ ๋ฐ ํ•ด์ง€ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝ ๋ฐ ์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","AD Hoc ์ ‘์† ๋ฐ ์กฐ์ง ๋‚ด ํ—ˆ๊ฐ€๋ฐ›์ง€ ์•Š์€ ๋ฌด์„  AP ํƒ์ง€ยท์ฐจ๋‹จ ๋“ฑ ๋น„์ธ๊ฐ€๋œ ๋ฌด์„ ๋„คํŠธ์›Œํฌ์— ๋Œ€ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์™ธ๋ถ€์ธ์šฉ ๋ฌด์„  ๋„คํŠธ์›Œํฌ์™€ ๋‚ด๋ถ€ ๋ฌด์„  ๋„คํŠธ์›Œํฌ ์˜์—ญ๋Œ€๊ฐ€ ๋™์ผํ•˜์—ฌ ์™ธ๋ถ€์ธ๋„ ๋ฌด์„ ๋„คํŠธ์›Œํฌ๋ฅผ ํ†ตํ•˜์—ฌ ๋ณ„๋„์˜ ํ†ต์ œ ์—†์ด ๋‚ด๋ถ€ ๋„คํŠธ์›Œํฌ์— ์ ‘๊ทผ์ด ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋ฌด์„  AP ์„ค์ • ์‹œ ์ •๋ณด ์†ก์ˆ˜์‹  ์•”ํ˜ธํ™” ๊ธฐ๋Šฅ์„ ์„ค์ •ํ•˜์˜€์œผ๋‚˜, ์•ˆ์ „ํ•˜์ง€ ์•Š์€ ๋ฐฉ์‹์œผ๋กœ ์„ค์ •ํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์—…๋ฌด ๋ชฉ์ ์œผ๋กœ ๋‚ด๋ถ€๋ง์— ์—ฐ๊ฒฐ๋œ ๋ฌด์„ AP์— ๋Œ€ํ•˜์—ฌ ๋ฌด์„ AP ๊ด€๋ฆฌ์ž ๋น„๋ฐ€๋ฒˆํ˜ธ ๋…ธ์ถœ(๋””ํดํŠธ ๋น„๋ฐ€๋ฒˆํ˜ธ ์‚ฌ์šฉ), ์ ‘๊ทผ์ œ์–ด ๋ฏธ์ ์šฉ ๋“ฑ ๋ณด์•ˆ ์„ค์ •์ด ๋ฏธํกํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ)"]}],"description": "๋ฌด์„  ๋„คํŠธ์›Œํฌ๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๊ฒฝ์šฐ ์‚ฌ์šฉ์ž ์ธ์ฆ, ์†ก์ˆ˜์‹  ๋ฐ์ดํ„ฐ ์•”ํ˜ธํ™”, AP ํ†ต์ œ ๋“ฑ ๋ฌด์„  ๋„คํŠธ์›Œํฌ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ ์šฉํ•˜์—ฌ์•ผ ํ•œ๋‹ค. ๋˜ํ•œ AD Hoc ์ ‘์†, ๋น„์ธ๊ฐ€ AP ์‚ฌ์šฉ ๋“ฑ ๋น„์ธ๊ฐ€ ๋ฌด์„  ๋„คํŠธ์›Œํฌ ์ ‘์†์œผ๋กœ๋ถ€ํ„ฐ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.6.6": {"name": "์›๊ฒฉ์ ‘๊ทผ ํ†ต์ œ","checks": {"vpc_flow_logs_enabled": "FAIL","networkfirewall_in_all_vpc": "FAIL","cognito_user_pool_mfa_enabled": null,"iam_user_console_access_unused": null,"vpc_subnet_no_public_ip_by_default": "FAIL","vpc_subnet_separate_private_public": "FAIL","iam_user_mfa_enabled_console_access": null,"workspaces_volume_encryption_enabled": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","appstream_fleet_maximum_session_duration": null,"ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_instance_port_rdp_exposed_to_internet": "PASS","ec2_instance_port_ssh_exposed_to_internet": "PASS","appstream_fleet_session_disconnect_timeout": null,"ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","cognito_identity_pool_guest_access_disabled": "FAIL","workspaces_vpc_2private_1public_subnets_nat": null,"cognito_user_pool_self_registration_disabled": null,"appstream_fleet_session_idle_disconnect_timeout": null,"appstream_fleet_default_internet_access_disabled": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"ec2_client_vpn_endpoint_connection_logging_enabled": null,"cloudwatch_log_metric_filter_authentication_failures": null,"ec2_securitygroup_allow_ingress_from_internet_to_any_port": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.6.6 ์›๊ฒฉ์ ‘๊ทผ ํ†ต์ œ","Subdomain": "2.6. ์ ‘๊ทผํ†ต์ œ","AuditEvidence": ["VPN ๋“ฑ ์‚ฌ์™ธ์ ‘์† ์‹ ์ฒญ์„œ","VPN ๊ณ„์ • ๋ชฉ๋ก","VPN ์ ‘๊ทผ์ œ์–ด ์ •์ฑ… ์„ค์ • ํ˜„ํ™ฉ","IP ๊ด€๋ฆฌ๋Œ€์žฅ","์›๊ฒฉ ์ ‘๊ทผ์ œ์–ด ์„ค์ •(์„œ๋ฒ„ ์„ค์ •, ๋ณด์•ˆ์‹œ์Šคํ…œ ์„ค์ • ๋“ฑ)","๊ด€๋ฆฌ์šฉ ๋‹จ๋ง๊ธฐ ์ง€์ • ๋ฐ ๊ด€๋ฆฌ ํ˜„ํ™ฉ","๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ๋„"],"AuditChecklist": ["์ธํ„ฐ๋„ท๊ณผ ๊ฐ™์€ ์™ธ๋ถ€ ๋„คํŠธ์›Œํฌ๋ฅผ ํ†ตํ•œ ์ •๋ณด์‹œ์Šคํ…œ ์›๊ฒฉ์šด์˜์€ ์›์น™์ ์œผ๋กœ ๊ธˆ์ง€ํ•˜๊ณ  ์žฅ์• ๋Œ€์‘ ๋“ฑ ๋ถ€๋“์ดํ•˜๊ฒŒ ํ—ˆ์šฉํ•˜๋Š” ๊ฒฝ์šฐ ๋ณด์™„๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋‚ด๋ถ€ ๋„คํŠธ์›Œํฌ๋ฅผ ํ†ตํ•˜์—ฌ ์›๊ฒฉ์œผ๋กœ ์ •๋ณด์‹œ์Šคํ…œ์„ ์šด์˜ํ•˜๋Š” ๊ฒฝ์šฐ ํŠน์ • ๋‹จ๋ง์— ํ•œํ•ด์„œ๋งŒ ์ ‘๊ทผ์„ ํ—ˆ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์žฌํƒ๊ทผ๋ฌด, ์›๊ฒฉํ˜‘์—…, ์Šค๋งˆํŠธ์›Œํฌ ๋“ฑ๊ณผ ๊ฐ™์€ ์›๊ฒฉ์—…๋ฌด ์ˆ˜ํ–‰ ์‹œ ์ค‘์š”์ •๋ณด ์œ ์ถœ, ํ•ดํ‚น ๋“ฑ ์นจํ•ด์‚ฌ๊ณ  ์˜ˆ๋ฐฉ์„ ์œ„ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์˜ ๊ด€๋ฆฌ, ์šด์˜, ๊ฐœ๋ฐœ, ๋ณด์•ˆ ๋“ฑ์„ ๋ชฉ์ ์œผ๋กœ ์›๊ฒฉ์œผ๋กœ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ ์‹œ์Šคํ…œ์— ์ ‘์†ํ•˜๋Š” ๋‹จ๋ง๊ธฐ๋Š” ๊ด€๋ฆฌ์šฉ ๋‹จ๋ง๊ธฐ๋กœ ์ง€์ •ํ•˜๊ณ  ์ž„์˜์กฐ์ž‘ ๋ฐ ๋ชฉ์  ์™ธ ์‚ฌ์šฉ ๊ธˆ์ง€ ๋“ฑ ์•ˆ์ „์กฐ์น˜๋ฅผ ์ ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋‚ด๋ถ€ ๊ทœ์ •์—๋Š” ์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ์›๊ฒฉ ์ ‘๊ทผ์€ ์›์น™์ ์œผ๋กœ ๊ธˆ์ง€ํ•˜๊ณ  ๋ถˆ๊ฐ€ํ”ผํ•œ ๊ฒฝ์šฐ IP ๊ธฐ๋ฐ˜์˜ ์ ‘๊ทผํ†ต์ œ๋ฅผ ํ†ตํ•˜์—ฌ ์Šน์ธ๋œ ์‚ฌ์šฉ์ž๋งŒ ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋„๋ก ๋ช…์‹œํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ์›๊ฒฉ ๋ฐ์Šคํฌํ†ฑ ์—ฐ๊ฒฐ, SSH ์ ‘์†์ด IP์ฃผ์†Œ ๋“ฑ์œผ๋กœ ์ œํ•œ๋˜์–ด ์žˆ์ง€ ์•Š์•„ ๋ชจ๋“  PC์—์„œ ์›๊ฒฉ ์ ‘์†์ด ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์›๊ฒฉ์šด์˜๊ด€๋ฆฌ๋ฅผ ์œ„ํ•˜์—ฌ VPN์„ ๊ตฌ์ถ•ํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, VPN์— ๋Œ€ํ•œ ์‚ฌ์šฉ ์Šน์ธ ๋˜๋Š” ์ ‘์† ๊ธฐ๊ฐ„ ์ œํ•œ ์—†์ด ์ƒ์‹œ ํ—ˆ์šฉํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์™ธ๋ถ€ ๊ทผ๋ฌด์ž๋ฅผ ์œ„ํ•˜์—ฌ ๊ฐœ์ธ ์Šค๋งˆํŠธ ๊ธฐ๊ธฐ์— ์—…๋ฌด์šฉ ๋ชจ๋ฐ”์ผ ์•ฑ์„ ์„ค์น˜ํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์•…์„ฑ์ฝ”๋“œ, ๋ถ„์‹คยท๋„๋‚œ ๋“ฑ์— ์˜ํ•œ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์„ ๋ฐฉ์ง€ํ•˜๊ธฐ ์œ„ํ•œ ์ ์ ˆํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…(๋ฐฑ์‹ , ์ดˆ๊ธฐํ™”, ์•”ํ˜ธํ™” ๋“ฑ)์„ ์ ์šฉํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์™ธ๋ถ€ ์ ‘์†์šฉ VPN์—์„œ ์‚ฌ์šฉ์ž๋ณ„๋กœ ์›๊ฒฉ์ ‘๊ทผ์ด ๊ฐ€๋Šฅํ•œ ๋„คํŠธ์›Œํฌ ๊ตฌ๊ฐ„ ๋ฐ ์ •๋ณด์‹œ์Šคํ…œ์„ ์ œํ•œํ•˜์ง€ ์•Š์•„ ์›๊ฒฉ์ ‘๊ทผ ์ธ์ฆ์„ ๋ฐ›์€ ์‚ฌ์šฉ์ž๊ฐ€ ์ „์ฒด ๋‚ด๋ถ€๋ง ๋ฐ ์ •๋ณด์‹œ์Šคํ…œ์— ๊ณผ๋„ํ•˜๊ฒŒ ์ ‘๊ทผ์ด ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ)"]}],"description": "๋ณดํ˜ธ๊ตฌ์—ญ ์ด์™ธ ์žฅ์†Œ์—์„œ์˜ ์ •๋ณด์‹œ์Šคํ…œ ๊ด€๋ฆฌ ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋Š” ์›์น™์ ์œผ๋กœ ๊ธˆ์ง€ํ•˜๊ณ , ์žฌํƒ๊ทผ๋ฌดยท์žฅ์• ๋Œ€์‘ยท์›๊ฒฉํ˜‘์—… ๋“ฑ ๋ถˆ๊ฐ€ํ”ผํ•œ ์‚ฌ์œ ๋กœ ์›๊ฒฉ์ ‘๊ทผ์„ ํ—ˆ์šฉํ•˜๋Š” ๊ฒฝ์šฐ ์ฑ…์ž„์ž ์Šน์ธ, ์ ‘๊ทผ ๋‹จ๋ง ์ง€์ •, ์ ‘๊ทผ ํ—ˆ์šฉ๋ฒ”์œ„ ๋ฐ ๊ธฐ๊ฐ„ ์„ค์ •, ๊ฐ•ํ™”๋œ ์ธ์ฆ, ๊ตฌ๊ฐ„ ์•”ํ˜ธํ™”, ์ ‘์†๋‹จ๋ง ๋ณด์•ˆ(๋ฐฑ์‹ , ํŒจ์น˜ ๋“ฑ) ๋“ฑ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 8,"pass": 5,"total": 26,"manual": 0}},"2.6.7": {"name": "์ธํ„ฐ๋„ท ์ ‘์† ํ†ต์ œ","checks": {"ec2_elastic_ip_shodan": null,"vpc_flow_logs_enabled": "FAIL","ec2_instance_public_ip": "FAIL","ec2_elastic_ip_unassigned": "FAIL","networkfirewall_in_all_vpc": "FAIL","vpc_subnet_no_public_ip_by_default": "FAIL","vpc_subnet_separate_private_public": "FAIL","workspaces_volume_encryption_enabled": null,"route53_dangling_ip_subdomain_takeover": null,"appstream_fleet_maximum_session_duration": null,"appstream_fleet_session_disconnect_timeout": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"workspaces_vpc_2private_1public_subnets_nat": null,"ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","appstream_fleet_session_idle_disconnect_timeout": null,"appstream_fleet_default_internet_access_disabled": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.6.7 ์ธํ„ฐ๋„ท ์ ‘์† ํ†ต์ œ","Subdomain": "2.6. ์ ‘๊ทผํ†ต์ œ","AuditEvidence": ["๋น„์—…๋ฌด์‚ฌ์ดํŠธ(P2P ๋“ฑ) ์ฐจ๋‹จ์ •์ฑ…(๋น„์—…๋ฌด์‚ฌ์ดํŠธ ์ฐจ๋‹จ์‹œ์Šคํ…œ ๊ด€๋ฆฌํ™”๋ฉด ๋“ฑ)","์ธํ„ฐ๋„ท ์ ‘์†๋‚ด์—ญ ๋ชจ๋‹ˆํ„ฐ๋ง ์ด๋ ฅ","์ธํ„ฐ๋„ท๋ง ์ฐจ๋‹จ์กฐ์น˜ ๋Œ€์ƒ์ž ๋ชฉ๋ก","๋ง๊ฐ„ ์ž๋ฃŒ ์ „์†ก ์ ˆ์ฐจ ๋ฐ ์ฒ˜๋ฆฌ๋‚ด์—ญ(์‹ ์ฒญยท์Šน์ธ๋‚ด์—ญ ๋“ฑ)","๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ๋„"],"AuditChecklist": ["์ฃผ์š” ์ง๋ฌด ์ˆ˜ํ–‰ ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ทจ๊ธ‰ ๋‹จ๋ง๊ธฐ ๋“ฑ ์—…๋ฌด์šฉ PC์˜ ์ธํ„ฐ๋„ท ์ ‘์†์— ๋Œ€ํ•œ ํ†ต์ œ์ •์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ฃผ์š” ์ •๋ณด์‹œ์Šคํ…œ(DB์„œ๋ฒ„ ๋“ฑ)์—์„œ ๋ถˆํ•„์š”ํ•œ ์™ธ๋ถ€ ์ธํ„ฐ๋„ท ์ ‘์†์„ ํ†ต์ œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ด€๋ จ ๋ฒ•๋ น์— ๋”ฐ๋ผ ์ธํ„ฐ๋„ท๋ง ์ฐจ๋‹จ ์˜๋ฌด๊ฐ€ ๋ถ€๊ณผ๋œ ๊ฒฝ์šฐ ๋Œ€์ƒ์ž๋ฅผ ์‹๋ณ„ํ•˜์—ฌ ์•ˆ์ „ํ•œ ๋ฐฉ์‹์œผ๋กœ ์ธํ„ฐ๋„ท๋ง ์ฐจ๋‹จ ์กฐ์น˜๋ฅผ ์ ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ•์— ๋”ฐ๋ผ ์ธํ„ฐ๋„ท๋ง ์ฐจ๋‹จ ์กฐ์น˜๋ฅผ ์ ์šฉํ•˜์˜€์œผ๋‚˜, ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์˜ ์ ‘๊ทผ๊ถŒํ•œ ์„ค์ • ๊ฐ€๋Šฅ์ž ๋“ฑ ์ผ๋ถ€ ์˜๋ฌด๋Œ€์ƒ์ž์— ๋Œ€ํ•˜์—ฌ ์ธํ„ฐ๋„ท๋ง ์ฐจ๋‹จ ์กฐ์น˜ ์ ์šฉ์ด ๋ˆ„๋ฝ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ•์— ๋”ฐ๋ฅธ ์ธํ„ฐ๋„ท๋ง ์ฐจ๋‹จ ์กฐ์น˜ ์˜๋ฌด๋Œ€์ƒ์œผ๋กœ์„œ ์ธํ„ฐ๋„ท๋ง ์ฐจ๋‹จ ์กฐ์น˜๋ฅผ ์ ์šฉํ•˜์˜€์œผ๋‚˜, ๋‹ค๋ฅธ ์„œ๋ฒ„๋ฅผ ๊ฒฝ์œ ํ•œ ์šฐํšŒ์ ‘์†์ด ๊ฐ€๋Šฅํ•˜์—ฌ ์ธํ„ฐ๋„ท๋ง ์ฐจ๋‹จ ์กฐ์น˜๊ฐ€ ์ ์šฉ๋˜์ง€ ์•Š์€ ํ™˜๊ฒฝ์—์„œ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์— ์ ‘์†ํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด์˜ ๋‹ค์šด๋กœ๋“œ, ํŒŒ๊ธฐ ๋“ฑ์ด ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : DMZ ๋ฐ ๋‚ด๋ถ€๋ง์— ์œ„์น˜ํ•œ ์ผ๋ถ€ ์„œ๋ฒ„์—์„œ ๋ถˆํ•„์š”ํ•˜๊ฒŒ ์ธํ„ฐ๋„ท์œผ๋กœ์˜ ์ง์ ‘ ์ ‘์†์ด ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์ธํ„ฐ๋„ท PC์™€ ๋‚ด๋ถ€ ์—…๋ฌด์šฉ PC๋ฅผ ๋ฌผ๋ฆฌ์  ๋ง๋ถ„๋ฆฌ ๋ฐฉ์‹์œผ๋กœ ์ธํ„ฐ๋„ท๋ง ์ฐจ๋‹จ ์กฐ์น˜๋ฅผ ์ ์šฉํ•˜๊ณ  ๋ง๊ฐ„ ์ž๋ฃŒ์ „์†ก์‹œ์Šคํ…œ์„ ๊ตฌ์ถ•ยท์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ž๋ฃŒ ์ „์†ก์— ๋Œ€ํ•œ ์Šน์ธ ์ ˆ์ฐจ๊ฐ€ ๋ถ€์žฌํ•˜๊ณ  ์ž๋ฃŒ ์ „์†ก ๋‚ด์—ญ์— ๋Œ€ํ•œ ์ฃผ๊ธฐ์  ๊ฒ€ํ† ๊ฐ€ ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ๋‚ด๋ถ€ ๊ทœ์ •์—๋Š” ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž๊ฐ€ P2P ๋ฐ ์›นํ•˜๋“œ ์‚ฌ์ดํŠธ ์ ‘์† ์‹œ ์ฑ…์ž„์ž ์Šน์ธ์„ ๊ฑฐ์ณ ํŠน์ • ๊ธฐ๊ฐ„ ๋™์•ˆ๋งŒ ํ—ˆ์šฉํ•˜๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜, ์Šน์ธ์ ˆ์ฐจ๋ฅผ ๊ฑฐ์น˜์ง€ ์•Š๊ณ  ์˜ˆ์™ธ ์ ‘์†์ด ํ—ˆ์šฉ๋œ ์‚ฌ๋ก€๊ฐ€ ๋‹ค์ˆ˜ ์กด์žฌํ•˜๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ)"]}],"description": "์ธํ„ฐ๋„ท์„ ํ†ตํ•œ ์ •๋ณด ์œ ์ถœ, ์•…์„ฑ์ฝ”๋“œ ๊ฐ์—ผ, ๋‚ด๋ถ€๋ง ์นจํˆฌ ๋“ฑ์„ ์˜ˆ๋ฐฉํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์ฃผ์š” ์ •๋ณด์‹œ์Šคํ…œ, ์ฃผ์š” ์ง๋ฌด ์ˆ˜ํ–‰ ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ทจ๊ธ‰ ๋‹จ๋ง๊ธฐ ๋“ฑ์— ๋Œ€ํ•œ ์ธํ„ฐ๋„ท ์ ‘์† ๋˜๋Š” ์„œ๋น„์Šค(P2P, ์›นํ•˜๋“œ, ๋ฉ”์‹ ์ € ๋“ฑ)๋ฅผ ์ œํ•œํ•˜๋Š” ๋“ฑ ์ธํ„ฐ๋„ท ์ ‘์† ํ†ต์ œ ์ •์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 6,"pass": 1,"total": 19,"manual": 0}},"2.7.1": {"name": "์•”ํ˜ธ์ •์ฑ… ์ ์šฉ","checks": {"elb_ssl_listeners": "FAIL","backup_vaults_exist": null,"elbv2_ssl_listeners": "FAIL","ssm_document_secrets": "PASS","backup_vaults_encrypted": "PASS","rds_snapshots_encrypted": "FAIL","elb_insecure_ssl_ciphers": "PASS","s3_bucket_kms_encryption": "FAIL","ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","elbv2_insecure_ssl_ciphers": "PASS","athena_workgroup_encryption": null,"ec2_ebs_snapshots_encrypted": "FAIL","s3_bucket_default_encryption": "PASS","ec2_instance_secrets_user_data": "PASS","ec2_launch_template_no_secrets": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","rds_instance_transport_encrypted": "FAIL","cloudtrail_kms_encryption_enabled": "FAIL","neptune_cluster_storage_encrypted": null,"s3_bucket_secure_transport_policy": "FAIL","documentdb_cluster_storage_encrypted": null,"workspaces_volume_encryption_enabled": null,"awslambda_function_no_secrets_in_code": "PASS","glue_database_connections_ssl_enabled": null,"athena_workgroup_enforce_configuration": null,"cloudfront_distributions_https_enabled": null,"cloudwatch_log_group_no_secrets_in_logs": "FAIL","cloudformation_stack_outputs_find_secrets": "PASS","codebuild_project_no_secrets_in_variables": "PASS","kafka_cluster_encryption_at_rest_uses_cmk": null,"sns_subscription_not_using_http_endpoints": "PASS","sns_topics_kms_encryption_at_rest_enabled": "FAIL","sqs_queues_server_side_encryption_enabled": "PASS","awslambda_function_no_secrets_in_variables": "PASS","dynamodb_tables_kms_cmk_encryption_enabled": null,"glue_etl_jobs_amazon_s3_encryption_enabled": "PASS","acm_certificates_with_secure_key_algorithms": "PASS","cloudwatch_log_group_kms_encryption_enabled": "FAIL","ecs_task_definitions_no_environment_secrets": "PASS","kafka_cluster_in_transit_encryption_enabled": null,"storagegateway_fileshare_encryption_enabled": null,"apigateway_restapi_client_certificate_enabled": "FAIL","glue_etl_jobs_job_bookmark_encryption_enabled": "FAIL","glue_data_catalogs_metadata_encryption_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"dynamodb_accelerator_cluster_encryption_enabled": null,"kafka_cluster_mutual_tls_authentication_enabled": null,"directoryservice_radius_server_security_protocol": null,"glue_development_endpoints_s3_encryption_enabled": null,"glue_etl_jobs_cloudwatch_logs_encryption_enabled": "FAIL","autoscaling_find_secrets_ec2_launch_configuration": "PASS","eks_cluster_kms_cmk_encryption_in_secrets_enabled": null,"elasticache_redis_cluster_rest_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"cloudfront_distributions_field_level_encryption_enabled": null,"cloudfront_distributions_using_deprecated_ssl_protocols": null,"elasticache_redis_cluster_in_transit_encryption_enabled": null,"opensearch_service_domains_https_communications_enforced": null,"sagemaker_training_jobs_intercontainer_encryption_enabled": null,"glue_data_catalogs_connection_passwords_encryption_enabled": "FAIL","glue_development_endpoints_job_bookmark_encryption_enabled": null,"opensearch_service_domains_node_to_node_encryption_enabled": null,"sagemaker_training_jobs_volume_and_output_encryption_enabled": null,"glue_development_endpoints_cloudwatch_logs_encryption_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.7.1 ์•”ํ˜ธ์ •์ฑ… ์ ์šฉ","Subdomain": "2.7. ์•”ํ˜ธํ™” ์ ์šฉ","AuditEvidence": ["์•”ํ˜ธํ†ต์ œ ์ •์ฑ…(๋Œ€์ƒ, ๋ฐฉ์‹, ์•Œ๊ณ ๋ฆฌ์ฆ˜ ๋“ฑ)","์•”ํ˜ธํ™” ์ ์šฉํ˜„ํ™ฉ(์ €์žฅ ๋ฐ ์ „์†ก ์‹œ)","์œ„ํ—˜๋„ ๋ถ„์„ ๊ฒฐ๊ณผ(๋‚ด๋ถ€๋ง์—์„œ ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ด์™ธ์˜ ๊ณ ์œ ์‹๋ณ„์ •๋ณด ์•”ํ˜ธํ™” ๋ฏธ์ ์šฉ ์‹œ)","์•”ํ˜ธํ™” ์†”๋ฃจ์…˜ ๊ด€๋ฆฌ ํ™”๋ฉด"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด ๋ฐ ์ฃผ์š”์ •๋ณด์˜ ๋ณดํ˜ธ๋ฅผ ์œ„ํ•˜์—ฌ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์„ ๋ฐ˜์˜ํ•œ ์•”ํ˜ธํ™” ๋Œ€์ƒ, ์•”ํ˜ธ๊ฐ•๋„, ์•”ํ˜ธ์‚ฌ์šฉ ๋“ฑ์ด ํฌํ•จ๋œ ์•”ํ˜ธ์ •์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์•”ํ˜ธ์ •์ฑ…์— ๋”ฐ๋ผ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ฃผ์š”์ •๋ณด์˜ ์ €์žฅ, ์ „์†ก, ์ „๋‹ฌ ์‹œ ์•”ํ˜ธํ™”๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋‚ด๋ถ€ ์ •์ฑ…ยท์ง€์นจ์— ์•”ํ˜ธํ†ต์ œ ๊ด€๋ จ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์„ ๊ณ ๋ คํ•œ ์•”ํ˜ธํ™” ๋Œ€์ƒ, ์•”ํ˜ธ ๊ฐ•๋„, ์ €์žฅ ๋ฐ ์ „์†ก ์‹œ ์•”ํ˜ธํ™” ๋ฐฉ๋ฒ•, ์•”ํ˜ธํ™” ๊ด€๋ จ ๋‹ด๋‹น์ž์˜ ์—ญํ•  ๋ฐ ์ฑ…์ž„ ๋“ฑ์— ๊ด€ํ•œ ์‚ฌํ•ญ์ด ์ ์ ˆํžˆ ๋ช…์‹œ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์•”ํ˜ธ์ •์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๋ฉด์„œ ํ•ด๋‹น ๊ธฐ์—…์ด ์ ์šฉ๋ฐ›๋Š” ๋ฒ•๊ทœ๋ฅผ ์ž˜๋ชป ์ ์šฉํ•˜์—ฌ ์•”ํ˜ธํ™” ๊ด€๋ จ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์„ ์ค€์ˆ˜ํ•˜์ง€ ๋ชปํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ(์˜ˆ๋ฅผ ๋“ค์–ด, ์ด์šฉ์ž์˜ ๊ณ„์ขŒ๋ฒˆํ˜ธ๋ฅผ ์ €์žฅํ•˜๋ฉด์„œ ์•”ํ˜ธํ™” ๋ฏธ์ ์šฉ)","์‚ฌ๋ก€ 3 : ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ๋ฐ ์ •๋ณด์ฃผ์ฒด์˜ ๋น„๋ฐ€๋ฒˆํ˜ธ์— ๋Œ€ํ•˜์—ฌ ์ผ๋ฐฉํ–ฅ ์•”ํ˜ธํ™”๋ฅผ ์ ์šฉํ•˜์˜€์œผ๋‚˜, ์•ˆ์ „ํ•˜์ง€ ์•Š์€ MD5 ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ์‚ฌ์šฉํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์ž๊ฐ€ ๊ด€๋ จ ๋ฒ•๊ทœ ๋ฐ ๋‚ด๋ถ€ ๊ทœ์ •์— ๋”ฐ๋ผ ์ธํ„ฐ๋„ท ์‡ผํ•‘๋ชฐ์— ๋Œ€ํ•˜์—ฌ ๋ณด์•ˆ์„œ๋ฒ„๋ฅผ ์ ์šฉํ•˜์˜€์œผ๋‚˜, ํšŒ์›์ •๋ณด ์กฐํšŒ ๋ฐ ๋ณ€๊ฒฝ, ๋น„๋ฐ€๋ฒˆํ˜ธ ์ฐพ๊ธฐ, ๋น„๋ฐ€๋ฒˆํ˜ธ ๋ณ€๊ฒฝ ๋“ฑ ์ด์šฉ์ž์˜ ๊ฐœ์ธ์ •๋ณด๊ฐ€ ์ „์†ก๋˜๋Š” ์ผ๋ถ€ ๊ตฌ๊ฐ„์— ์•”ํ˜ธํ™” ์กฐ์น˜๊ฐ€ ๋ˆ„๋ฝ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ์ •๋ณด์‹œ์Šคํ…œ ์ ‘์†์šฉ ๋น„๋ฐ€๋ฒˆํ˜ธ, ์ธ์ฆํ‚ค ๊ฐ’ ๋“ฑ์ด ์‹œ์Šคํ…œ ์„ค์ •ํŒŒ์ผ ๋ฐ ์†Œ์Šค์ฝ”๋“œ ๋‚ด์— ํ‰๋ฌธ์œผ๋กœ ์ €์žฅ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ24์กฐ์˜2(์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ฒ˜๋ฆฌ์˜ ์ œํ•œ), ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ7์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ์•”ํ˜ธํ™”)"]}],"description": "๊ฐœ์ธ์ •๋ณด ๋ฐ ์ฃผ์š”์ •๋ณด ๋ณดํ˜ธ๋ฅผ ์œ„ํ•˜์—ฌ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์„ ๋ฐ˜์˜ํ•œ ์•”ํ˜ธํ™” ๋Œ€์ƒ, ์•”ํ˜ธ ๊ฐ•๋„, ์•”ํ˜ธ ์‚ฌ์šฉ ์ •์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ฃผ์š”์ •๋ณด์˜ ์ €์žฅยท์ „์†กยท์ „๋‹ฌ ์‹œ ์•”ํ˜ธํ™”๋ฅผ ์ ์šฉํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 18,"pass": 19,"total": 66,"manual": 0}},"2.7.2": {"name": "์•”ํ˜ธํ‚ค ๊ด€๋ฆฌ","checks": {"kms_cmk_are_used": null,"kms_cmk_rotation_enabled": null,"kms_key_not_publicly_accessible": null,"kms_cmk_not_deleted_unintentionally": null,"rds_instance_certificate_expiration": "PASS","secretsmanager_automatic_rotation_enabled": "FAIL","acm_certificates_transparency_logs_enabled": "PASS","directoryservice_ldap_certificate_expiration": null,"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.7.2 ์•”ํ˜ธํ‚ค ๊ด€๋ฆฌ","Subdomain": "2.7. ์•”ํ˜ธํ™” ์ ์šฉ","AuditEvidence": ["์•”ํ˜ธํ‚ค ๊ด€๋ฆฌ์ •์ฑ…","์•”ํ˜ธํ‚ค ๊ด€๋ฆฌ๋Œ€์žฅ ๋ฐ ๊ด€๋ฆฌ์‹œ์Šคํ…œ ํ™”๋ฉด"],"AuditChecklist": ["์•”ํ˜ธํ‚ค ์ƒ์„ฑ, ์ด์šฉ, ๋ณด๊ด€, ๋ฐฐํฌ, ๋ณ€๊ฒฝ, ๋ณต๊ตฌ, ํŒŒ๊ธฐ ๋“ฑ์— ๊ด€ํ•œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์•”ํ˜ธํ‚ค๋Š” ํ•„์š”์‹œ ๋ณต๊ตฌ๊ฐ€ ๊ฐ€๋Šฅํ•˜๋„๋ก ๋ณ„๋„์˜ ์•ˆ์ „ํ•œ ์žฅ์†Œ์— ๋ณด๊ด€ํ•˜๊ณ  ์•”ํ˜ธํ‚ค ์‚ฌ์šฉ์— ๊ด€ํ•œ ์ ‘๊ทผ๊ถŒํ•œ์„ ์ตœ์†Œํ™”ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์•”ํ˜ธ ์ •์ฑ… ๋‚ด์— ์•”ํ˜ธํ‚ค ๊ด€๋ฆฌ์™€ ๊ด€๋ จ๋œ ์ ˆ์ฐจ, ๋ฐฉ๋ฒ• ๋“ฑ์ด ๋ช…์‹œ๋˜์–ด ์žˆ์ง€ ์•Š์•„ ๋‹ด๋‹น์ž๋ณ„๋กœ ์•”ํ˜ธํ‚ค ๊ด€๋ฆฌ ์ˆ˜์ค€ ๋ฐ ๋ฐฉ๋ฒ• ์ƒ์ด ๋“ฑ ์•”ํ˜ธํ‚ค ๊ด€๋ฆฌ์— ์ทจ์•ฝ์‚ฌํ•ญ์ด ์กด์žฌํ•˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€ ๊ทœ์ •์— ์ค‘์š” ์ •๋ณด๋ฅผ ์•”ํ˜ธํ™” ํ•  ๊ฒฝ์šฐ ๊ด€๋ จ ์ฑ…์ž„์ž ์Šน์ธ ํ•˜์— ์•”ํ˜ธํ™” ํ‚ค๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์•”ํ˜ธํ‚ค ๊ด€๋ฆฌ๋Œ€์žฅ์„ ์ž‘์„ฑํ•˜๋„๋ก ์ •ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์•”ํ˜ธํ‚ค ๊ด€๋ฆฌ๋Œ€์žฅ์— ์ผ๋ถ€ ์•”ํ˜ธํ‚ค๊ฐ€ ๋ˆ„๋ฝ๋˜์–ด ์žˆ๊ฑฐ๋‚˜ ํ˜„ํ–‰ํ™”๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ๋ฐœ์‹œ์Šคํ…œ์— ์ ์šฉ๋˜์–ด ์žˆ๋Š” ์•”ํ˜ธํ‚ค์™€ ์šด์˜์‹œ์Šคํ…œ์— ์ ์šฉ๋œ ์•”ํ˜ธํ‚ค๊ฐ€ ๋™์ผํ•˜์—ฌ, ์•”ํ˜ธํ™”๋œ ์‹ค๋ฐ์ดํ„ฐ๊ฐ€ ๊ฐœ๋ฐœ์‹œ์Šคํ…œ์„ ํ†ตํ•ด ์‰ฝ๊ฒŒ ๋ณตํ˜ธํ™”๊ฐ€ ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ7์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ์•”ํ˜ธํ™”)"]}],"description": "์•”ํ˜ธํ‚ค์˜ ์•ˆ์ „ํ•œ ์ƒ์„ฑยท์ด์šฉยท๋ณด๊ด€ยท๋ฐฐํฌยทํŒŒ๊ธฐ๋ฅผ ์œ„ํ•œ ๊ด€๋ฆฌ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ , ํ•„์š” ์‹œ ๋ณต๊ตฌ๋ฐฉ์•ˆ์„ ๋งˆ๋ จํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 1,"pass": 2,"total": 9,"manual": 0}},"2.8.1": {"name": "๋ณด์•ˆ ์š”๊ตฌ์‚ฌํ•ญ ์ •์˜","checks": {"macie_is_enabled": "PASS","securityhub_enabled": "PASS","fms_policy_compliant": null,"guardduty_is_enabled": "PASS","inspector2_is_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","accessanalyzer_enabled": "PASS","networkfirewall_in_all_vpc": "FAIL","guardduty_centrally_managed": "FAIL","wafv2_webacl_logging_enabled": "FAIL","config_recorder_all_regions_enabled": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","codebuild_project_no_secrets_in_variables": "PASS","codebuild_project_user_controlled_buildspec": "PASS","wellarchitected_workload_no_high_or_medium_risks": "FAIL","codebuild_project_source_repo_url_no_sensitive_credentials": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.8.1 ๋ณด์•ˆ ์š”๊ตฌ์‚ฌํ•ญ ์ •์˜","Subdomain": "2.8. ์ •๋ณด์‹œ์Šคํ…œ ๋„์ž… ๋ฐ ๊ฐœ๋ฐœ ๋ณด์•ˆ","AuditEvidence": ["์ •๋ณด์‹œ์Šคํ…œ ์ธ์ˆ˜ ๊ธฐ์ค€ ๋ฐ ์ ˆ์ฐจ","์ •๋ณด์‹œ์Šคํ…œ ๋„์ž… RFP(์ œ์•ˆ์š”์ฒญ์„œ) ๋ฐ ๊ตฌ๋งค๊ณ„์•ฝ์„œ","๊ฐœ๋ฐœ ์‚ฐ์ถœ๋ฌผ(์‚ฌ์—…์ˆ˜ํ–‰๊ณ„ํš์„œ, ์š”๊ตฌ์‚ฌํ•ญ์ •์˜์„œ, ํ™”๋ฉด์„ค๊ณ„์„œ, ๋ณด์•ˆ์•„ํ‚คํ…์ฒ˜ ์„ค๊ณ„์„œ, ์‹œํ—˜๊ณ„ํš์„œ ๋“ฑ)","์‹œํ์–ด ์ฝ”๋”ฉ ํ‘œ์ค€"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ์„ ์‹ ๊ทœ๋กœ ๋„์ž…ยท๊ฐœ๋ฐœ ๋˜๋Š” ๋ณ€๊ฒฝํ•˜๋Š” ๊ฒฝ์šฐ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ์ธก๋ฉด์˜ ํƒ€๋‹น์„ฑ ๊ฒ€ํ†  ๋ฐ ์ธ์ˆ˜ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ์„ ์‹ ๊ทœ๋กœ ๋„์ž…ยท๊ฐœ๋ฐœ ๋˜๋Š” ๋ณ€๊ฒฝํ•˜๋Š” ๊ฒฝ์šฐ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ, ์ตœ์‹  ์ทจ์•ฝ์  ๋“ฑ์„ ํฌํ•จํ•œ ๋ณด์•ˆ ์š”๊ตฌ์‚ฌํ•ญ์„ ๋ช…ํ™•ํžˆ ์ •์˜ํ•˜๊ณ  ์„ค๊ณ„ ๋‹จ๊ณ„์—์„œ๋ถ€ํ„ฐ ๋ฐ˜์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ์˜ ์•ˆ์ „ํ•œ ๊ตฌํ˜„์„ ์œ„ํ•œ ์ฝ”๋”ฉ ํ‘œ์ค€์„ ์ˆ˜๋ฆฝํ•˜์—ฌ ์ ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด์‹œ์Šคํ…œ ์ธ์ˆ˜ ์ „ ๋ณด์•ˆ์„ฑ ๊ฒ€์ฆ ๊ธฐ์ค€ ๋ฐ ์ ˆ์ฐจ๊ฐ€ ๋งˆ๋ จ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์‹ ๊ทœ ์‹œ์Šคํ…œ ๋„์ž… ์‹œ ๊ธฐ์กด ์šด์˜ํ™˜๊ฒฝ์— ๋Œ€ํ•œ ์˜ํ–ฅ ๋ฐ ๋ณด์•ˆ์„ฑ์„ ๊ฒ€ํ† ํ•˜๋„๋ก ๋‚ด๋ถ€ ๊ทœ์ •์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ตœ๊ทผ ๋„์ž…ํ•œ ์ผ๋ถ€ ์ •๋ณด์‹œ์Šคํ…œ์— ๋Œ€ํ•˜์—ฌ ์ธ์ˆ˜ ์‹œ ๋ณด์•ˆ์š”๊ฑด์— ๋Œ€ํ•ด ์„ธ๋ถ€ ๊ธฐ์ค€ ๋ฐ ๊ณ„ํš์ด ์ˆ˜๋ฆฝ๋˜์ง€ ์•Š์•˜์œผ๋ฉฐ, ์ด์— ๋”ฐ๋ผ ์ธ์ˆ˜ ์‹œ ๋ณด์•ˆ์„ฑ๊ฒ€ํ† ๊ฐ€ ์ˆ˜ํ–‰๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ๋ฐœ ๊ด€๋ จ ๋‚ด๋ถ€ ์ง€์นจ์— ๊ฐœ๋ฐœ๊ณผ ๊ด€๋ จ๋œ ์ฃผ์š” ๋ณด์•ˆ ์š”๊ตฌ์‚ฌํ•ญ(์ธ์ฆ ๋ฐ ์•”ํ˜ธํ™”, ๋ณด์•ˆ๋กœ๊ทธ ๋“ฑ)์ด ์ •์˜๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : สป๊ฐœ๋ฐœํ‘œ์ค€์ •์˜์„œสผ์— ์‚ฌ์šฉ์ž ํŒจ์Šค์›Œ๋“œ๋ฅผ ์•ˆ์ „ํ•˜์ง€ ์•Š์€ ์•”ํ˜ธํ™” ์•Œ๊ณ ๋ฆฌ์ฆ˜(MD5, SHA1)์œผ๋กœ ์‚ฌ์šฉํ•˜๋„๋ก ๋˜์–ด ์žˆ์–ด ๊ด€๋ จ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์„ ์ ์ ˆํžˆ ๋ฐ˜์˜ํ•˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์ •๋ณด์‹œ์Šคํ…œ์˜ ๋„์ž…ยท๊ฐœ๋ฐœยท๋ณ€๊ฒฝ ์‹œ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ ๊ด€๋ จ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ, ์ตœ์‹  ๋ณด์•ˆ์ทจ์•ฝ์ , ์•ˆ์ „ํ•œ ์ฝ”๋”ฉ๋ฐฉ๋ฒ• ๋“ฑ ๋ณด์•ˆ ์š”๊ตฌ์‚ฌํ•ญ์„ ์ •์˜ํ•˜๊ณ  ์ ์šฉํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 7,"pass": 7,"total": 16,"manual": 0}},"2.8.2": {"name": "๋ณด์•ˆ ์š”๊ตฌ์‚ฌํ•ญ ๊ฒ€ํ†  ๋ฐ ์‹œํ—˜","checks": {"macie_is_enabled": "PASS","securityhub_enabled": "PASS","fms_policy_compliant": null,"guardduty_is_enabled": "PASS","inspector2_is_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","accessanalyzer_enabled": "PASS","networkfirewall_in_all_vpc": "FAIL","guardduty_centrally_managed": "FAIL","wafv2_webacl_logging_enabled": "FAIL","inspector2_active_findings_exist": "FAIL","config_recorder_all_regions_enabled": null,"guardduty_no_high_severity_findings": "FAIL","cloudtrail_cloudwatch_logging_enabled": "FAIL","accessanalyzer_enabled_without_findings": "FAIL","codebuild_project_no_secrets_in_variables": "PASS","codebuild_project_user_controlled_buildspec": "PASS","wellarchitected_workload_no_high_or_medium_risks": "FAIL","codebuild_project_source_repo_url_no_sensitive_credentials": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.8.2 ๋ณด์•ˆ ์š”๊ตฌ์‚ฌํ•ญ ๊ฒ€ํ†  ๋ฐ ์‹œํ—˜","Subdomain": "2.8. ์ •๋ณด์‹œ์Šคํ…œ ๋„์ž… ๋ฐ ๊ฐœ๋ฐœ ๋ณด์•ˆ","AuditEvidence": ["์ •๋ณด์‹œ์Šคํ…œ ์ธ์ˆ˜ ์‹œํ—˜ ๊ฒฐ๊ณผ","์š”๊ตฌ์‚ฌํ•ญ ์ถ”์  ๋งคํŠธ๋ฆญ์Šค","์‹œํ—˜ ๊ณ„ํš์„œ, ์‹œํ—˜ ๊ฒฐ๊ณผ์„œ","์ทจ์•ฝ์  ์ ๊ฒ€ ๊ฒฐ๊ณผ์„œ","๊ฐœ์ธ์ •๋ณด ์˜ํ–ฅํ‰๊ฐ€์„œ","๊ฐœ์ธ์ •๋ณด ์˜ํ–ฅํ‰๊ฐ€ ๊ฐœ์„ ๊ณ„ํš ์ดํ–‰์ ๊ฒ€ ํ™•์ธ์„œ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ์˜ ๋„์ž…, ๊ฐœ๋ฐœ, ๋ณ€๊ฒฝ ์‹œ ๋ถ„์„ ๋ฐ ์„ค๊ณ„ ๋‹จ๊ณ„์—์„œ ์ •์˜ํ•œ ๋ณด์•ˆ ์š”๊ตฌ์‚ฌํ•ญ์ด ํšจ๊ณผ์ ์œผ๋กœ ์ ์šฉ๋˜์—ˆ๋Š”์ง€๋ฅผ ํ™•์ธํ•˜๊ธฐ ์œ„ํ•œ ์‹œํ—˜์„ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ์ด ์•ˆ์ „ํ•œ ์ฝ”๋”ฉ ๊ธฐ์ค€ ๋“ฑ์— ๋”ฐ๋ผ ์•ˆ์ „ํ•˜๊ฒŒ ๊ฐœ๋ฐœ๋˜์—ˆ๋Š”์ง€๋ฅผ ํ™•์ธํ•˜๊ธฐ ์œ„ํ•œ ์ทจ์•ฝ์  ์ ๊ฒ€์ด ์ˆ˜ํ–‰๋˜๊ณ  ์žˆ๋Š”๊ฐ€?","์‹œํ—˜ ๋ฐ ์ทจ์•ฝ์  ์ ๊ฒ€ ๊ณผ์ •์—์„œ ๋ฐœ๊ฒฌ๋œ ๋ฌธ์ œ์ ์ด ์‹ ์†ํ•˜๊ฒŒ ๊ฐœ์„ ๋  ์ˆ˜ ์žˆ๋„๋ก ๊ฐœ์„ ๊ณ„ํš ์ˆ˜๋ฆฝ, ์ดํ–‰์ ๊ฒ€ ๋“ฑ์˜ ์ ˆ์ฐจ๋ฅผ ์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ณต๊ณต๊ธฐ๊ด€์€ ๊ด€๋ จ ๋ฒ•๋ น์— ๋”ฐ๋ผ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ์‹ ๊ทœ ๊ฐœ๋ฐœ ๋ฐ ๋ณ€๊ฒฝ ์‹œ ๋ถ„์„ยท์„ค๊ณ„ ๋‹จ๊ณ„์—์„œ ์˜ํ–ฅํ‰๊ฐ€๊ธฐ๊ด€์„ ํ†ตํ•˜์—ฌ ์˜ํ–ฅํ‰๊ฐ€๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  ๊ทธ ๊ฒฐ๊ณผ๋ฅผ ๊ฐœ๋ฐœ ๋ฐ ๋ณ€๊ฒฝ ์‹œ ๋ฐ˜์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ •๋ณด์‹œ์Šคํ…œ ๊ตฌํ˜„ ์ดํ›„ ๊ฐœ๋ฐœ ๊ด€๋ จ ๋‚ด๋ถ€ ์ง€์นจ ๋ฐ ๋ฌธ์„œ์— ์ •์˜๋œ ๋ณด์•ˆ ์š”๊ตฌ์‚ฌํ•ญ์„ ์‹œํ—˜ํ•˜์ง€ ์•Š๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์‘์šฉํ”„๋กœ๊ทธ๋žจ ํ…Œ์ŠคํŠธ ์‹œ๋‚˜๋ฆฌ์˜ค ๋ฐ ๊ธฐ์ˆ ์  ์ทจ์•ฝ์  ์ ๊ฒ€ํ•ญ๋ชฉ์— ์ž…๋ ฅ๊ฐ’ ์œ ํšจ์„ฑ ์ฒดํฌ ๋“ฑ์˜ ์ค‘์š” ์ ๊ฒ€ํ•ญ๋ชฉ ์ผ๋ถ€๊ฐ€ ๋ˆ„๋ฝ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ตฌํ˜„ ๋˜๋Š” ์‹œํ—˜ ๊ณผ์ •์—์„œ ์•Œ๋ ค์ง„ ๊ธฐ์ˆ ์  ์ทจ์•ฝ์ ์ด ์กด์žฌํ•˜๋Š”์ง€ ์—ฌ๋ถ€๋ฅผ ์ ๊ฒ€ํ•˜์ง€ ์•Š๊ฑฐ๋‚˜, ํƒ€๋‹นํ•œ ์‚ฌ์œ  ๋˜๋Š” ์Šน์ธ ์—†์ด ํ™•์ธ๋œ ์ทจ์•ฝ์ ์— ๋Œ€ํ•œ ๊ฐœ์„ ์กฐ์น˜๋ฅผ ์ดํ–‰ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ณต๊ณต๊ธฐ๊ด€์ด 5๋งŒ ๋ช… ์ด์ƒ ์ •๋ณด์ฃผ์ฒด์˜ ๊ณ ์œ ์‹๋ณ„์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๋Š” ๋“ฑ ์˜ํ–ฅํ‰๊ฐ€ ์˜๋ฌด ๋Œ€์ƒ ๊ฐœ์ธ์ •๋ณด ํŒŒ์ผ ๋ฐ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์„ ์‹ ๊ทœ๋กœ ๊ตฌ์ถ•ํ•˜๋ฉด์„œ ์˜ํ–ฅํ‰๊ฐ€๋ฅผ ์‹ค์‹œํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ๊ณต๊ณต๊ธฐ๊ด€์ด ์˜ํ–ฅํ‰๊ฐ€๋ฅผ ์ˆ˜ํ–‰ํ•œ ํ›„ ์˜ํ–ฅํ‰๊ฐ€๊ธฐ๊ด€์œผ๋กœ๋ถ€ํ„ฐ ์˜ํ–ฅํ‰๊ฐ€์„œ๋ฅผ ๋ฐ›์€ ์ง€ 2๊ฐœ์›”์ด ์ง€๋‚ฌ์Œ์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ  ์˜ํ–ฅํ‰๊ฐ€์„œ๋ฅผ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์œ„์›ํšŒ์— ์ œ์ถœํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 6 : ์‹ ๊ทœ ์‹œ์Šคํ…œ ๋„์ž… ์‹œ ๊ธฐ์กด ์šด์˜ํ™˜๊ฒฝ์— ๋Œ€ํ•œ ์˜ํ–ฅ ๋ฐ ๋ณด์•ˆ์„ฑ์„ ๊ฒ€ํ† (์ทจ์•ฝ์  ์ ๊ฒ€ ๋“ฑ)ํ•˜๋„๋ก ๋‚ด๋ถ€ ์ง€์นจ์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ตœ๊ทผ ๋„์ž…ํ•œ ์ผ๋ถ€ ์ •๋ณด์‹œ์Šคํ…œ์— ๋Œ€ํ•˜์—ฌ ์ธ์ˆ˜ ์‹œ ์ทจ์•ฝ์  ์ ๊ฒ€ ๋“ฑ ๋ณด์•ˆ์„ฑ๊ฒ€ํ† ๊ฐ€ ์ˆ˜ํ–‰๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ33์กฐ(๊ฐœ์ธ์ •๋ณด ์˜ํ–ฅํ‰๊ฐ€)","๊ฐœ์ธ์ •๋ณด ์˜ํ–ฅํ‰๊ฐ€์— ๊ด€ํ•œ ๊ณ ์‹œ"]}],"description": "์‚ฌ์ „ ์ •์˜๋œ ๋ณด์•ˆ ์š”๊ตฌ์‚ฌํ•ญ์— ๋”ฐ๋ผ ์ •๋ณด์‹œ์Šคํ…œ์ด ๋„์ž… ๋˜๋Š” ๊ตฌํ˜„๋˜์—ˆ๋Š”์ง€๋ฅผ ๊ฒ€ํ† ํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ ์ค€์ˆ˜, ์ตœ์‹  ๋ณด์•ˆ์ทจ์•ฝ์  ์ ๊ฒ€, ์•ˆ์ „ํ•œ ์ฝ”๋”ฉ ๊ตฌํ˜„, ๊ฐœ์ธ์ •๋ณด ์˜ํ–ฅํ‰๊ฐ€ ๋“ฑ์˜ ๊ฒ€ํ†  ๊ธฐ์ค€๊ณผ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ , ๋ฐœ๊ฒฌ๋œ ๋ฌธ์ œ์ ์— ๋Œ€ํ•œ ๊ฐœ์„ ์กฐ์น˜๋ฅผ ์ˆ˜ํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 10,"pass": 7,"total": 19,"manual": 0}},"2.8.3": {"name": "์‹œํ—˜๊ณผ ์šด์˜ ํ™˜๊ฒฝ ๋ถ„๋ฆฌ","checks": {"codebuild_project_user_controlled_buildspec": "PASS"},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.8.3 ์‹œํ—˜๊ณผ ์šด์˜ ํ™˜๊ฒฝ ๋ถ„๋ฆฌ","Subdomain": "2.8. ์ •๋ณด์‹œ์Šคํ…œ ๋„์ž… ๋ฐ ๊ฐœ๋ฐœ ๋ณด์•ˆ","AuditEvidence": ["๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ๋„(์‹œํ—˜ํ™˜๊ฒฝ ๊ตฌ์„ฑ ํฌํ•จ)","์šด์˜ ํ™˜๊ฒฝ๊ณผ ๊ฐœ๋ฐœยท์‹œํ—˜ ํ™˜๊ฒฝ ๊ฐ„ ์ ‘๊ทผํ†ต์ œ ์ ์šฉ ํ˜„ํ™ฉ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ์˜ ๊ฐœ๋ฐœ ๋ฐ ์‹œํ—˜ ์‹œ์Šคํ…œ์„ ์šด์˜์‹œ์Šคํ…œ๊ณผ ๋ถ„๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ถˆ๊ฐ€ํ”ผํ•œ ์‚ฌ์œ ๋กœ ๊ฐœ๋ฐœ๊ณผ ์šด์˜ํ™˜๊ฒฝ์˜ ๋ถ„๋ฆฌ๊ฐ€ ์–ด๋ ค์šด ๊ฒฝ์šฐ ์ƒํ˜ธ๊ฒ€ํ† , ์ƒ๊ธ‰์ž ๋ชจ๋‹ˆํ„ฐ๋ง, ๋ณ€๊ฒฝ ์Šน์ธ, ์ฑ…์ž„์ถ”์ ์„ฑ ํ™•๋ณด ๋“ฑ์˜ ๋ณด์•ˆ๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ํƒ€๋‹นํ•œ ์‚ฌ์œ  ๋˜๋Š” ์Šน์ธ ์—†์ด ๋ณ„๋„์˜ ๊ฐœ๋ฐœํ™˜๊ฒฝ์„ ๊ตฌ์„ฑํ•˜์ง€ ์•Š๊ณ  ์šด์˜ํ™˜๊ฒฝ์—์„œ ์ง์ ‘ ์†Œ์Šค์ฝ”๋“œ ๋ณ€๊ฒฝ์„ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋ถˆ๊ฐ€ํ”ผํ•˜๊ฒŒ ๊ฐœ๋ฐœ์‹œ์Šคํ…œ๊ณผ ์šด์˜์‹œ์Šคํ…œ์„ ๋ถ„๋ฆฌํ•˜์ง€ ์•Š๊ณ  ์šด์˜ ์ค‘์— ์žˆ์œผ๋‚˜, ์ด์— ๋Œ€ํ•œ ์ƒํ˜ธ ๊ฒ€ํ†  ๋‚ด์—ญ, ๋ชจ๋‹ˆํ„ฐ๋ง ๋‚ด์—ญ ๋“ฑ์ด ๋ˆ„๋ฝ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ๋ฐœ์‹œ์Šคํ…œ์ด ๋ณ„๋„๋กœ ๊ตฌ์„ฑ๋˜์–ด ์žˆ์œผ๋‚˜, ๊ฐœ๋ฐœํ™˜๊ฒฝ์œผ๋กœ๋ถ€ํ„ฐ ์šด์˜ํ™˜๊ฒฝ์œผ๋กœ์˜ ์ ‘๊ทผ์ด ํ†ต์ œ๋˜์ง€ ์•Š์•„ ๊ฐœ๋ฐœ์ž๋“ค์ด ๊ฐœ๋ฐœ์‹œ์Šคํ…œ์„ ๊ฒฝ์œ ํ•˜์—ฌ ๋ถˆํ•„์š”ํ•˜๊ฒŒ ์šด์˜์‹œ์Šคํ…œ ์ ‘๊ทผ์ด ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "๊ฐœ๋ฐœ ๋ฐ ์‹œํ—˜ ์‹œ์Šคํ…œ์€ ์šด์˜์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ๋น„์ธ๊ฐ€ ์ ‘๊ทผ ๋ฐ ๋ณ€๊ฒฝ์˜ ์œ„ํ—˜์„ ๊ฐ์†Œ์‹œํ‚ค๊ธฐ ์œ„ํ•˜์—ฌ ์›์น™์ ์œผ๋กœ ๋ถ„๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.8.4": {"name": "์‹œํ—˜ ๋ฐ์ดํ„ฐ ๋ณด์•ˆ","checks": {"codebuild_project_no_secrets_in_variables": "PASS"},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.8.4 ์‹œํ—˜ ๋ฐ์ดํ„ฐ ๋ณด์•ˆ","Subdomain": "2.8. ์ •๋ณด์‹œ์Šคํ…œ ๋„์ž… ๋ฐ ๊ฐœ๋ฐœ ๋ณด์•ˆ","AuditEvidence": ["์‹œํ—˜๋ฐ์ดํ„ฐ ํ˜„ํ™ฉ","์‹œํ—˜๋ฐ์ดํ„ฐ ์ƒ์„ฑ ๊ทœ์น™","์šด์˜๋ฐ์ดํ„ฐ๋ฅผ ์‹œํ—˜ํ™˜๊ฒฝ์— ์‚ฌ์šฉํ•œ ๊ฒฝ์šฐ, ๊ด€๋ จ ์Šน์ธ ์ด๋ ฅ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ์˜ ๊ฐœ๋ฐœ ๋ฐ ์‹œํ—˜ ๊ณผ์ •์—์„œ ์‹ค์ œ ์šด์˜ ๋ฐ์ดํ„ฐ์˜ ์‚ฌ์šฉ์„ ์ œํ•œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ถˆ๊ฐ€ํ”ผํ•˜๊ฒŒ ์šด์˜๋ฐ์ดํ„ฐ๋ฅผ ์‹œํ—˜ ํ™˜๊ฒฝ์—์„œ ์‚ฌ์šฉํ•  ๊ฒฝ์šฐ ์ฑ…์ž„์ž ์Šน์ธ, ์ ‘๊ทผ ๋ฐ ์œ ์ถœ๋ชจ๋‹ˆํ„ฐ๋ง, ์‹œํ—˜ ํ›„ ๋ฐ์ดํ„ฐ ์‚ญ์ œ ๋“ฑ์˜ ํ†ต์ œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ๋ฐœ ์„œ๋ฒ„์—์„œ ์‚ฌ์šฉํ•  ์‹œํ—˜ ๋ฐ์ดํ„ฐ ์ƒ์„ฑ์— ๋Œ€ํ•œ ๊ตฌ์ฒด์  ๊ธฐ์ค€ ๋ฐ ์ ˆ์ฐจ๊ฐ€ ์ˆ˜๋ฆฝ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ํƒ€๋‹นํ•œ ์‚ฌ์œ  ๋ฐ ์ฑ…์ž„์ž ์Šน์ธ ์—†์ด ์‹ค ์šด์˜๋ฐ์ดํ„ฐ๋ฅผ ๊ฐ€๊ณตํ•˜์ง€ ์•Š๊ณ  ์‹œํ—˜ ๋ฐ์ดํ„ฐ๋กœ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ถˆ๊ฐ€ํ”ผํ•œ ์‚ฌ์œ ๋กœ ์‚ฌ์ „ ์Šน์ธ์„ ๋ฐ›์•„ ์‹ค ์šด์˜๋ฐ์ดํ„ฐ๋ฅผ ์‹œํ—˜ ์šฉ๋„๋กœ ์‚ฌ์šฉํ•˜๋ฉด์„œ, ํ…Œ์ŠคํŠธ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์— ๋Œ€ํ•˜์—ฌ ์šด์˜ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์™€ ๋™์ผํ•œ ์ˆ˜์ค€์˜ ์ ‘๊ทผํ†ต์ œ๋ฅผ ์ ์šฉํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์‹ค ์šด์˜๋ฐ์ดํ„ฐ๋ฅผ ํ…Œ์ŠคํŠธ ์šฉ๋„๋กœ ์‚ฌ์šฉํ•œ ํ›„ ํ…Œ์ŠคํŠธ๊ฐ€ ์™„๋ฃŒ๋˜์—ˆ์Œ์—๋„ ์‹ค ์šด์˜๋ฐ์ดํ„ฐ๋ฅผ ํ…Œ์ŠคํŠธ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์—์„œ ์‚ญ์ œํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์‹œ์Šคํ…œ ์‹œํ—˜ ๊ณผ์ •์—์„œ ์šด์˜๋ฐ์ดํ„ฐ์˜ ์œ ์ถœ์„ ์˜ˆ๋ฐฉํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์‹œํ—˜ ๋ฐ์ดํ„ฐ์˜ ์ƒ์„ฑ๊ณผ ์ด์šฉ ๋ฐ ๊ด€๋ฆฌ, ํŒŒ๊ธฐ, ๊ธฐ์ˆ ์  ๋ณดํ˜ธ์กฐ์น˜์— ๊ด€ํ•œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.8.5": {"name": "์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ ๊ด€๋ฆฌ","checks": {"ecr_repositories_not_publicly_accessible": "PASS","codeartifact_packages_external_public_publishing_disabled": null},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.8.5 ์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ ๊ด€๋ฆฌ","Subdomain": "2.8. ์ •๋ณด์‹œ์Šคํ…œ ๋„์ž… ๋ฐ ๊ฐœ๋ฐœ ๋ณด์•ˆ","AuditEvidence": ["SVN ๋“ฑ ํ˜•์ƒ๊ด€๋ฆฌ์‹œ์Šคํ…œ ์šด์˜ ํ˜„ํ™ฉ(์ ‘๊ทผ๊ถŒํ•œ์ž ๋ชฉ๋ก ๋“ฑ)","์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ ๋ณ€๊ฒฝ ์ด๋ ฅ"],"AuditChecklist": ["๋น„์ธ๊ฐ€์ž์— ์˜ํ•œ ์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ ์ ‘๊ทผ์„ ํ†ต์ œํ•˜๊ธฐ ์œ„ํ•œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ์€ ์žฅ์•  ๋“ฑ ๋น„์ƒ์‹œ๋ฅผ ๋Œ€๋น„ํ•˜์—ฌ ์šด์˜ํ™˜๊ฒฝ์ด ์•„๋‹Œ ๊ณณ์— ์•ˆ์ „ํ•˜๊ฒŒ ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ์— ๋Œ€ํ•œ ๋ณ€๊ฒฝ์ด๋ ฅ์„ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋ณ„๋„์˜ ์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ ๋ฐฑ์—… ๋ฐ ํ˜•์ƒ๊ด€๋ฆฌ์‹œ์Šคํ…œ์ด ๊ตฌ์ถ•๋˜์–ด ์žˆ์ง€ ์•Š์œผ๋ฉฐ, ์ด์ „ ๋ฒ„์ „์˜ ์†Œ์Šค ์ฝ”๋“œ๋ฅผ ์šด์˜ ์„œ๋ฒ„ ๋˜๋Š” ๊ฐœ๋ฐœ์ž PC์— ์Šน์ธ ๋ฐ ์ด๋ ฅ๊ด€๋ฆฌ ์—†์ด ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ํ˜•์ƒ๊ด€๋ฆฌ์‹œ์Šคํ…œ์„ ๊ตฌ์ถ•ํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜ ํ˜•์ƒ๊ด€๋ฆฌ์‹œ์Šคํ…œ ๋˜๋Š” ํ˜•์ƒ๊ด€๋ฆฌ์‹œ์Šคํ…œ์— ์ €์žฅ๋œ ์†Œ์Šค์ฝ”๋“œ์— ๋Œ€ํ•œ ์ ‘๊ทผ์ œํ•œ, ์ ‘๊ทผ ๋ฐ ๋ณ€๊ฒฝ์ด๋ ฅ์ด ์ ์ ˆํžˆ ๊ด€๋ฆฌ๋˜์ง€ ์•Š๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋‚ด๋ถ€ ๊ทœ์ •์—๋Š” ํ˜•์ƒ๊ด€๋ฆฌ์‹œ์Šคํ…œ์„ ํ†ตํ•˜์—ฌ ์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ ๋ฒ„์ „๊ด€๋ฆฌ๋ฅผ ํ•˜๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜, ์ตœ์‹  ๋ฒ„์ „์˜ ์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ์€ ๊ฐœ๋ฐœ์ž PC์—๋งŒ ๋ณด๊ด€๋˜์–ด ์žˆ๊ณ  ์ด์— ๋Œ€ํ•œ ๋ณ„๋„์˜ ๋ฐฑ์—…์ด ์ˆ˜ํ–‰๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ์€ ์ธ๊ฐ€๋œ ์‚ฌ์šฉ์ž๋งŒ์ด ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ด€๋ฆฌํ•˜๊ณ , ์šด์˜ํ™˜๊ฒฝ์— ๋ณด๊ด€ํ•˜์ง€ ์•Š๋Š” ๊ฒƒ์„ ์›์น™์œผ๋กœ ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"2.8.6": {"name": "์šด์˜ํ™˜๊ฒฝ ์ด๊ด€","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.8.6 ์šด์˜ํ™˜๊ฒฝ ์ด๊ด€","Subdomain": "2.8. ์ •๋ณด์‹œ์Šคํ…œ ๋„์ž… ๋ฐ ๊ฐœ๋ฐœ ๋ณด์•ˆ","AuditEvidence": ["์ด๊ด€ ์ ˆ์ฐจ","์ด๊ด€ ๋‚ด์—ญ(์‹ ์ฒญยท์Šน์ธ, ์‹œํ—˜, ์ด๊ด€ ๋“ฑ)"],"AuditChecklist": ["์‹ ๊ทœ ๋„์ž…ยท๊ฐœ๋ฐœ ๋ฐ ๋ณ€๊ฒฝ๋œ ์‹œ์Šคํ…œ์„ ์šด์˜ํ™˜๊ฒฝ์œผ๋กœ ์•ˆ์ „ํ•˜๊ฒŒ ์ด๊ด€ํ•˜๊ธฐ ์œ„ํ•œ ํ†ต์ œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์šด์˜ํ™˜๊ฒฝ์œผ๋กœ ์ด๊ด€ ์‹œ ๋ฐœ์ƒํ•  ์ˆ˜ ์žˆ๋Š” ๋ฌธ์ œ์— ๋Œ€ํ•œ ๋Œ€์‘ ๋ฐฉ์•ˆ์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์šด์˜ํ™˜๊ฒฝ์—๋Š” ์„œ๋น„์Šค ์‹คํ–‰์— ํ•„์š”ํ•œ ํŒŒ์ผ๋งŒ์„ ์„ค์น˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ๋ฐœยท๋ณ€๊ฒฝ์ด ์™„๋ฃŒ๋œ ์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ์„ ์šด์˜ํ™˜๊ฒฝ์œผ๋กœ ์ด๊ด€ ์‹œ ๊ฒ€ํ† ยท์Šน์ธํ•˜๋Š” ์ ˆ์ฐจ๊ฐ€ ๋งˆ๋ จ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์šด์˜์„œ๋ฒ„์— ์„œ๋น„์Šค ์‹คํ–‰์— ๋ถˆํ•„์š”ํ•œ ํŒŒ์ผ(์†Œ์Šค์ฝ”๋“œ ๋˜๋Š” ๋ฐฐํฌ๋ชจ๋“ˆ, ๋ฐฑ์—…๋ณธ, ๊ฐœ๋ฐœ ๊ด€๋ จ ๋ฌธ์„œ, ๋งค๋‰ด์–ผ ๋“ฑ)์ด ์กด์žฌํ•˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋‚ด๋ถ€ ์ง€์นจ์— ์šด์˜ํ™˜๊ฒฝ ์ด๊ด€ ์‹œ ์•ˆ์ „ํ•œ ์ด๊ด€ยท๋ณต๊ตฌ๋ฅผ ์œ„ํ•˜์—ฌ ๋ณ€๊ฒฝ์ž‘์—… ์š”์ฒญ์„œ ๋ฐ ๊ฒฐ๊ณผ์„œ๋ฅผ ์ž‘์„ฑํ•˜๋„๋ก ์ •ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๊ด€๋ จ ๋ฌธ์„œ๊ฐ€ ํ™•์ธ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๋‚ด๋ถ€ ์ง€์นจ์—๋Š” ๋ชจ๋ฐ”์ผ ์•ฑ์„ ์•ฑ๋งˆ์ผ“์— ๋ฐฐํฌํ•˜๋Š” ๊ฒฝ์šฐ ๋‚ด๋ถ€ ๊ฒ€ํ†  ๋ฐ ์Šน์ธ์„ ๋ฐ›๋„๋ก ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๊ฐœ๋ฐœ์ž๊ฐ€ ํ•ด๋‹น ์ ˆ์ฐจ๋ฅผ ๊ฑฐ์น˜์ง€ ์•Š๊ณ  ์ž„์˜๋กœ ์•ฑ๋งˆ์ผ“์— ๋ฐฐํฌํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์‹ ๊ทœ ๋„์ž…ยท๊ฐœ๋ฐœ ๋˜๋Š” ๋ณ€๊ฒฝ๋œ ์‹œ์Šคํ…œ์„ ์šด์˜ํ™˜๊ฒฝ์œผ๋กœ ์ด๊ด€ํ•  ๋•Œ๋Š” ํ†ต์ œ๋œ ์ ˆ์ฐจ๋ฅผ ๋”ฐ๋ผ์•ผ ํ•˜๊ณ , ์‹คํ–‰์ฝ”๋“œ๋Š” ์‹œํ—˜ ๋ฐ ์‚ฌ์šฉ์ž ์ธ์ˆ˜ ์ ˆ์ฐจ์— ๋”ฐ๋ผ ์‹คํ–‰๋˜์–ด์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.9.1": {"name": "๋ณ€๊ฒฝ๊ด€๋ฆฌ","checks": {"codebuild_project_older_90_days": "FAIL","config_recorder_all_regions_enabled": null,"cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.9.1 ๋ณ€๊ฒฝ๊ด€๋ฆฌ","Subdomain": "2.9. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ์šด์˜๊ด€๋ฆฌ","AuditEvidence": ["๋ณ€๊ฒฝ๊ด€๋ฆฌ ์ ˆ์ฐจ","๋ณ€๊ฒฝ๊ด€๋ฆฌ ์ˆ˜ํ–‰ ๋‚ด์—ญ(์‹ ์ฒญยท์Šน์ธ, ๋ณ€๊ฒฝ ๋‚ด์—ญ ๋“ฑ)","๋ณ€๊ฒฝ์— ๋”ฐ๋ฅธ ์˜ํ–ฅ๋ถ„์„ ๊ฒฐ๊ณผ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ ๊ด€๋ จ ์ž์‚ฐ(ํ•˜๋“œ์›จ์–ด, ์šด์˜์ฒด์ œ, ์ƒ์šฉ ์†Œํ”„ํŠธ์›จ์–ด ํŒจํ‚ค์ง€ ๋“ฑ) ๋ณ€๊ฒฝ์— ๊ด€ํ•œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ ๊ด€๋ จ ์ž์‚ฐ ๋ณ€๊ฒฝ์„ ์ˆ˜ํ–‰ํ•˜๊ธฐ ์ „ ์„ฑ๋Šฅ ๋ฐ ๋ณด์•ˆ์— ๋ฏธ์น˜๋Š” ์˜ํ–ฅ์„ ๋ถ„์„ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ตœ๊ทผ DMZ ๊ตฌ๊ฐ„ ์ด์ค‘ํ™”์— ๋”ฐ๋ฅธ ๋ณ€๊ฒฝ ์ž‘์—…์„ ์ˆ˜ํ–‰ํ•˜์˜€์œผ๋‚˜, ๋ณ€๊ฒฝ ํ›„ ๋ฐœ์ƒํ•  ์ˆ˜ ์žˆ๋Š” ๋ณด์•ˆ์œ„ํ—˜์„ฑ ๋ฐ ์„ฑ๋Šฅ ํ‰๊ฐ€์— ๋Œ€ํ•œ ์ˆ˜ํ–‰ยท์Šน์ธ ์ฆ๊ฑฐ์ž๋ฃŒ๊ฐ€ ํ™•์ธ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ตœ๊ทผ ๋„คํŠธ์›Œํฌ ๋ณ€๊ฒฝ ์ž‘์—…์„ ์ˆ˜ํ–‰ํ•˜์˜€์œผ๋‚˜ ๊ด€๋ จ ๊ฒ€ํ†  ๋ฐ ๊ณต์ง€๊ฐ€ ์ถฉ๋ถ„ํžˆ ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์•„ ๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ๋„ ๋ฐ ์ผ๋ถ€ ์ ‘๊ทผํ†ต์ œ์‹œ์Šคํ…œ(์นจ์ž…์ฐจ๋‹จ์‹œ์Šคํ…œ, ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘๊ทผ์ œ์–ด์‹œ์Šคํ…œ ๋“ฑ)์˜ ์ ‘๊ทผํ†ต์ œ ๋ฆฌ์ŠคํŠธ(ACL)์— ์ ์ ˆํžˆ ๋ฐ˜์˜๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ณ€๊ฒฝ๊ด€๋ฆฌ์‹œ์Šคํ…œ์„ ๊ตฌ์ถ•ํ•˜์—ฌ ์ •๋ณด์‹œ์Šคํ…œ ์ž…๊ณ  ๋˜๋Š” ๋ณ€๊ฒฝ ์‹œ ์„ฑ๋Šฅ ๋ฐ ๋ณด์•ˆ์— ๋ฏธ์น˜๋Š” ์˜ํ–ฅ์„ ๋ถ„์„ ๋ฐํ˜‘์˜ํ•˜๊ณ  ๊ด€๋ จ ์ด๋ ฅ์„ ๊ด€๋ฆฌํ•˜๋„๋ก ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ํ•ด๋‹น ์‹œ์Šคํ…œ์„ ํ†ตํ•˜์ง€ ์•Š๊ณ ๋„ ์‹œ์Šคํ…œ ๋ณ€๊ฒฝ์ด ๊ฐ€๋Šฅํ•˜๋ฉฐ, ๊ด€๋ จ ๋ณ€๊ฒฝ์‚ฌํ•ญ์ด ์ ์ ˆํžˆ ๊ฒ€ํ† ๋˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์ •๋ณด์‹œ์Šคํ…œ ๊ด€๋ จ ์ž์‚ฐ์˜ ๋ชจ๋“  ๋ณ€๊ฒฝ๋‚ด์—ญ์„ ๊ด€๋ฆฌํ•  ์ˆ˜ ์žˆ๋„๋ก ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ , ๋ณ€๊ฒฝ ์ „ ์‹œ์Šคํ…œ์˜ ์„ฑ๋Šฅ ๋ฐ ๋ณด์•ˆ์— ๋ฏธ์น˜๋Š” ์˜ํ–ฅ์„ ๋ถ„์„ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 2,"pass": 0,"total": 14,"manual": 0}},"2.9.2": {"name": "์„ฑ๋Šฅ ๋ฐ ์žฅ์• ๊ด€๋ฆฌ","checks": {"rds_cluster_multi_az": "FAIL","elb_is_in_multiple_az": "FAIL","rds_instance_multi_az": "FAIL","elbv2_is_in_multiple_az": "PASS","s3_bucket_no_mfa_delete": "FAIL","vpc_subnet_different_az": "PASS","neptune_cluster_multi_az": null,"elbv2_deletion_protection": "FAIL","autoscaling_group_multiple_az": null,"dms_instance_multi_az_enabled": null,"rds_cluster_backtrack_enabled": null,"cloudtrail_multi_region_enabled": "PASS","rds_cluster_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL","acm_certificates_expiration_check": "PASS","s3_bucket_cross_region_replication": "FAIL","trustedadvisor_errors_and_warnings": null,"config_recorder_all_regions_enabled": null,"kms_cmk_not_deleted_unintentionally": null,"neptune_cluster_deletion_protection": null,"networkfirewall_deletion_protection": null,"rds_instance_certificate_expiration": "PASS","route53_domains_transferlock_enabled": null,"cloudtrail_bucket_requires_mfa_delete": null,"elb_cross_zone_load_balancing_enabled": "PASS","documentdb_cluster_deletion_protection": null,"documentdb_cluster_cloudwatch_log_export": null,"ec2_instance_detailed_monitoring_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","iam_no_expired_server_certificates_stored": null,"kafka_cluster_enhanced_monitoring_enabled": null,"elasticache_redis_cluster_multi_az_enabled": null,"directoryservice_ldap_certificate_expiration": null,"cognito_user_pool_deletion_protection_enabled": null,"trustedadvisor_premium_support_plan_subscribed": null,"directoryservice_directory_monitor_notifications": null,"cloudformation_stacks_termination_protection_enabled": "FAIL","cloudtrail_multi_region_enabled_logging_management_events": null,"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.9.2 ์„ฑ๋Šฅ ๋ฐ ์žฅ์• ๊ด€๋ฆฌ","Subdomain": "2.9. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ์šด์˜๊ด€๋ฆฌ","AuditEvidence": ["์„ฑ๋Šฅ ๋ฐ ์šฉ๋Ÿ‰ ๋ชจ๋‹ˆํ„ฐ๋ง ์ ˆ์ฐจ","์„ฑ๋Šฅ ๋ฐ ์šฉ๋Ÿ‰ ๋ชจ๋‹ˆํ„ฐ๋ง ์ฆ๊ฑฐ์ž๋ฃŒ(๋‚ด๋ถ€๋ณด๊ณ  ๊ฒฐ๊ณผ ๋“ฑ)","์žฅ์• ๋Œ€์‘ ์ ˆ์ฐจ","์žฅ์• ์กฐ์น˜๋ณด๊ณ ์„œ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ์˜ ๊ฐ€์šฉ์„ฑ ๋ณด์žฅ์„ ์œ„ํ•˜์—ฌ ์„ฑ๋Šฅ ๋ฐ ์šฉ๋Ÿ‰์„ ์ง€์†์ ์œผ๋กœ ๋ชจ๋‹ˆํ„ฐ๋งํ•  ์ˆ˜ ์žˆ๋Š” ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ ์„ฑ๋Šฅ ๋ฐ ์šฉ๋Ÿ‰ ์š”๊ตฌ์‚ฌํ•ญ(์ž„๊ณ„์น˜)์„ ์ดˆ๊ณผํ•˜๋Š” ๊ฒฝ์šฐ์— ๋Œ€ํ•œ ๋Œ€์‘์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝ ๋ฐ ์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ ์žฅ์• ๋ฅผ ์ฆ‰์‹œ ์ธ์ง€ํ•˜๊ณ  ๋Œ€์‘ํ•˜๊ธฐ ์œ„ํ•œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์žฅ์•  ๋ฐœ์ƒ ์‹œ ์ ˆ์ฐจ์— ๋”ฐ๋ผ ์กฐ์น˜ํ•˜๊ณ  ์žฅ์• ์กฐ์น˜๋ณด๊ณ ์„œ ๋“ฑ์„ ํ†ตํ•˜์—ฌ ์žฅ์• ์กฐ์น˜๋‚ด์—ญ์„ ๊ธฐ๋กํ•˜์—ฌ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์‹ฌ๊ฐ๋„๊ฐ€ ๋†’์€ ์žฅ์• ์˜ ๊ฒฝ์šฐ ์›์ธ๋ถ„์„์„ ํ†ตํ•œ ์žฌ๋ฐœ๋ฐฉ์ง€ ๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์„ฑ๋Šฅ ๋ฐ ์šฉ๋Ÿ‰ ๊ด€๋ฆฌ๋ฅผ ์œ„ํ•œ ๋Œ€์ƒ๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ(์ž„๊ณ„์น˜ ๋“ฑ)์„ ์ •์˜ํ•˜๊ณ  ์žˆ์ง€ ์•Š๊ฑฐ๋‚˜ ์ •๊ธฐ ์ ๊ฒ€๋ณด๊ณ ์„œ ๋“ฑ์— ๊ธฐ๋กํ•˜๊ณ  ์žˆ์ง€ ์•Š์•„ ํ˜„ํ™ฉ์„ ํŒŒ์•…ํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์„ฑ๋Šฅ ๋˜๋Š” ์šฉ๋Ÿ‰ ๊ธฐ์ค€์„ ์ดˆ๊ณผํ•˜์˜€์œผ๋‚˜ ๊ด€๋ จ ๊ฒ€ํ†  ๋ฐ ํ›„์†์กฐ์น˜๋ฐฉ์•ˆ ์ˆ˜๋ฆฝยท์ดํ–‰์ด ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ „์‚ฐ์žฅ๋น„ ์žฅ์• ๋Œ€์‘์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ์œผ๋‚˜ ๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ ๋ฐ ์™ธ์ฃผ์—…์ฒด ๋ณ€๊ฒฝ ๋“ฑ์˜ ๋‚ดยท์™ธ๋ถ€ ํ™˜๊ฒฝ๋ณ€ํ™”๊ฐ€ ์ ์ ˆํžˆ ๋ฐ˜์˜๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์žฅ์• ์ฒ˜๋ฆฌ์ ˆ์ฐจ์™€ ์žฅ์• ์œ ํ˜•๋ณ„ ์กฐ์น˜๋ฐฉ๋ฒ• ๊ฐ„ ์ผ๊ด€์„ฑ์ด ์—†๊ฑฐ๋‚˜ ์˜ˆ์ƒ์†Œ์š”์‹œ๊ฐ„ ์‚ฐ์ •์— ๋Œ€ํ•œ ๊ทผ๊ฑฐ๊ฐ€ ๋ถ€์กฑํ•˜์—ฌ ์‹ ์†ยท์ •ํ™•ํ•˜๊ณ  ์ฒด๊ณ„์ ์ธ ๋Œ€์‘์ด ์–ด๋ ค์šด ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์ •๋ณด์‹œ์Šคํ…œ์˜ ๊ฐ€์šฉ์„ฑ ๋ณด์žฅ์„ ์œ„ํ•˜์—ฌ ์„ฑ๋Šฅ ๋ฐ ์šฉ๋Ÿ‰ ์š”๊ตฌ์‚ฌํ•ญ์„ ์ •์˜ํ•˜๊ณ  ํ˜„ํ™ฉ์„ ์ง€์†์ ์œผ๋กœ ๋ชจ๋‹ˆํ„ฐ๋งํ•˜์—ฌ์•ผ ํ•˜๋ฉฐ, ์žฅ์•  ๋ฐœ์ƒ ์‹œ ํšจ๊ณผ์ ์œผ๋กœ ๋Œ€์‘ํ•˜๊ธฐ ์œ„ํ•œ ํƒ์ง€ยท๊ธฐ๋กยท๋ถ„์„ยท๋ณต๊ตฌยท๋ณด๊ณ  ๋“ฑ์˜ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 11,"pass": 6,"total": 39,"manual": 0}},"2.9.3": {"name": "๋ฐฑ์—… ๋ฐ ๋ณต๊ตฌ๊ด€๋ฆฌ","checks": {"ec2_ami_public": null,"backup_plans_exist": "PASS","backup_vaults_exist": null,"backup_vaults_encrypted": "PASS","ec2_ebs_public_snapshot": "PASS","efs_have_backup_enabled": "FAIL","s3_bucket_public_access": null,"backup_reportplans_exist": null,"s3_bucket_kms_encryption": "FAIL","s3_bucket_public_list_acl": null,"s3_bucket_public_write_acl": null,"ec2_ebs_snapshots_encrypted": "FAIL","rds_instance_backup_enabled": "PASS","rds_snapshots_public_access": "PASS","s3_bucket_lifecycle_enabled": "FAIL","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"s3_bucket_default_encryption": "PASS","rds_cluster_backtrack_enabled": null,"neptune_cluster_backup_enabled": null,"ec2_ebs_volume_snapshots_exists": "FAIL","neptune_cluster_public_snapshot": null,"documentdb_cluster_backup_enabled": null,"documentdb_cluster_public_snapshot": null,"rds_cluster_copy_tags_to_snapshots": "FAIL","s3_bucket_cross_region_replication": "FAIL","rds_instance_copy_tags_to_snapshots": null,"redshift_cluster_automated_snapshot": null,"s3_access_point_public_access_block": "PASS","s3_bucket_policy_public_write_access": "PASS","lightsail_instance_automated_snapshots": null,"dlm_ebs_snapshot_lifecycle_policy_exists": "FAIL","elasticache_redis_cluster_backup_enabled": null,"ecr_repositories_lifecycle_policy_enabled": "FAIL","directoryservice_directory_snapshots_limit": null,"ec2_ebs_snapshot_account_block_public_access": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.9.3 ๋ฐฑ์—… ๋ฐ ๋ณต๊ตฌ๊ด€๋ฆฌ","Subdomain": "2.9. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ์šด์˜๊ด€๋ฆฌ","AuditEvidence": ["๋ฐฑ์—… ๋ฐ ๋ณต๊ตฌ ์ ˆ์ฐจ","๋ณต๊ตฌํ…Œ์ŠคํŠธ ๊ฒฐ๊ณผ","์†Œ์‚ฐ๋ฐฑ์—… ํ˜„ํ™ฉ"],"AuditChecklist": ["๋ฐฑ์—… ๋Œ€์ƒ, ์ฃผ๊ธฐ, ๋ฐฉ๋ฒ•, ์ ˆ์ฐจ ๋“ฑ์ด ํฌํ•จ๋œ ๋ฐฑ์—… ๋ฐ ๋ณต๊ตฌ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ฐฑ์—…๋œ ์ •๋ณด์˜ ์™„์ „์„ฑ๊ณผ ์ •ํ™•์„ฑ, ๋ณต๊ตฌ์ ˆ์ฐจ์˜ ์ ์ ˆ์„ฑ์„ ํ™•์ธํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์ •๊ธฐ์ ์œผ๋กœ๋ณต๊ตฌ ํ…Œ์ŠคํŠธ๋ฅผ ์‹ค์‹œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ค‘์š”์ •๋ณด๊ฐ€ ์ €์žฅ๋œ ๋ฐฑ์—…๋งค์ฒด์˜ ๊ฒฝ์šฐ ์žฌํ•ดยท์žฌ๋‚œ์— ๋Œ€์ฒ˜ํ•  ์ˆ˜ ์žˆ๋„๋ก ๋ฐฑ์—…๋งค์ฒด๋ฅผ๋ฌผ๋ฆฌ์ ์œผ๋กœ ๋–จ์–ด์ง„ ์žฅ์†Œ์— ์†Œ์‚ฐํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋ฐฑ์—… ๋Œ€์ƒ, ์ฃผ๊ธฐ, ๋ฐฉ๋ฒ•, ์ ˆ์ฐจ ๋“ฑ์ด ํฌํ•จ๋œ ๋ฐฑ์—… ๋ฐ ๋ณต๊ตฌ ์ ˆ์ฐจ๊ฐ€ ์ˆ˜๋ฆฝ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋ฐฑ์—…์ •์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ์œผ๋‚˜ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์— ๋”ฐ๋ผ ์žฅ๊ธฐ๊ฐ„(6๊ฐœ์›”, 3๋…„, 5๋…„ ๋“ฑ) ๋ณด๊ด€์ด ํ•„์š”ํ•œ ๋ฐฑ์—… ๋Œ€์ƒ ์ •๋ณด๊ฐ€ ๋ฐฑ์—… ์ •์ฑ…์— ๋”ฐ๋ผ ๋ณด๊ด€๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ƒ์œ„ ์ง€์นจ ๋˜๋Š” ๋‚ด๋ถ€ ์ง€์นจ์— ๋”ฐ๋ผ ๋ณ„๋„๋กœ ๋ฐฑ์—…ํ•˜์—ฌ ๊ด€๋ฆฌํ•˜๋„๋ก ๋ช…์‹œ๋œ ์ผ๋ถ€ ์‹œ์Šคํ…œ(๋ณด์•ˆ์‹œ์Šคํ…œ ์ •์ฑ… ๋ฐ ๋กœ๊ทธ ๋“ฑ)์— ๋Œ€ํ•œ ๋ฐฑ์—…์ด ์ดํ–‰๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์ƒ์œ„ ์ง€์นจ ๋˜๋Š” ๋‚ด๋ถ€ ์ง€์นจ์—๋Š” ์ฃผ๊ธฐ์ ์œผ๋กœ ๋ฐฑ์—…๋งค์ฒด์— ๋Œ€ํ•œ ๋ณต๊ตฌ ํ…Œ์ŠคํŠธ๋ฅผ ์ˆ˜ํ–‰ํ•˜๋„๋ก ์ •ํ•˜๊ณ  ์žˆ์œผ๋‚˜ ๋ณต๊ตฌํ…Œ์ŠคํŠธ๋ฅผ ์žฅ๊ธฐ๊ฐ„ ์‹ค์‹œํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜ ์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ11์กฐ(์žฌํ•ดยท์žฌ๋‚œ ๋Œ€๋น„ ์•ˆ์ „์กฐ์น˜)"]}],"description": "์ •๋ณด์‹œ์Šคํ…œ์˜ ๊ฐ€์šฉ์„ฑ๊ณผ ๋ฐ์ดํ„ฐ ๋ฌด๊ฒฐ์„ฑ์„ ์œ ์ง€ํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ๋ฐฑ์—… ๋Œ€์ƒ, ์ฃผ๊ธฐ, ๋ฐฉ๋ฒ•, ๋ณด๊ด€์žฅ์†Œ, ๋ณด๊ด€๊ธฐ๊ฐ„, ์†Œ์‚ฐ ๋“ฑ์˜ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค. ์•„์šธ๋Ÿฌ ์‚ฌ๊ณ  ๋ฐœ์ƒ ์‹œ ์ ์‹œ์— ๋ณต๊ตฌํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 11,"pass": 8,"total": 37,"manual": 0}},"2.9.4": {"name": "๋กœ๊ทธ ๋ฐ ์ ‘์†๊ธฐ๋ก ๊ด€๋ฆฌ","checks": {"macie_is_enabled": "PASS","elb_logging_enabled": "FAIL","securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","elbv2_logging_enabled": "FAIL","inspector2_is_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","ec2_ebs_public_snapshot": "PASS","eventbridge_bus_exposed": "PASS","rds_snapshots_encrypted": "FAIL","s3_bucket_public_access": null,"s3_bucket_kms_encryption": "FAIL","cloudtrail_insights_exist": null,"s3_bucket_public_list_acl": null,"s3_bucket_public_write_acl": null,"ec2_ebs_snapshots_encrypted": "FAIL","ec2_instance_managed_by_ssm": "FAIL","efs_not_publicly_accessible": "FAIL","guardduty_centrally_managed": "FAIL","rds_snapshots_public_access": "PASS","s3_bucket_default_encryption": "PASS","wafv2_webacl_logging_enabled": "FAIL","iam_securityaudit_role_created": null,"redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","config_recorder_all_regions_enabled": null,"s3_access_point_public_access_block": "PASS","s3_bucket_level_public_access_block": "PASS","eventbridge_bus_cross_account_access": "FAIL","s3_bucket_policy_public_write_access": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"s3_account_level_public_access_blocks": null,"cloudtrail_log_file_validation_enabled": "FAIL","cloudtrail_s3_dataevents_write_enabled": null,"apigatewayv2_api_access_logging_enabled": "FAIL","cloudwatch_log_group_no_secrets_in_logs": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"s3_bucket_server_access_logging_enabled": "FAIL","cloudfront_distributions_logging_enabled": null,"documentdb_cluster_cloudwatch_log_export": null,"ec2_instance_detailed_monitoring_enabled": "FAIL","rds_instance_enhanced_monitoring_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","cloudwatch_cross_account_sharing_disabled": null,"kafka_cluster_enhanced_monitoring_enabled": null,"acm_certificates_transparency_logs_enabled": "PASS","cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","cloudwatch_log_metric_filter_policy_changes": null,"eks_control_plane_logging_all_types_enabled": null,"ec2_ebs_snapshot_account_block_public_access": null,"iam_inline_policy_no_full_access_to_cloudtrail": null,"trustedadvisor_premium_support_plan_subscribed": null,"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL","cloudwatch_log_metric_filter_sign_in_without_mfa": null,"directoryservice_directory_monitor_notifications": null,"eventbridge_schema_registry_cross_account_access": "FAIL","glue_etl_jobs_cloudwatch_logs_encryption_enabled": "FAIL","opensearch_service_domains_audit_logging_enabled": null,"directoryservice_directory_log_forwarding_enabled": null,"ec2_client_vpn_endpoint_connection_logging_enabled": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS","cloudwatch_log_metric_filter_authentication_failures": null,"opensearch_service_domains_cloudwatch_logging_enabled": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"route53_public_hosted_zones_cloudwatch_logging_enabled": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudtrail_multi_region_enabled_logging_management_events": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL","glue_development_endpoints_cloudwatch_logs_encryption_enabled": null,"awslambda_function_invoke_api_operations_cloudtrail_logging_enabled": "PASS","cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.9.4 ๋กœ๊ทธ ๋ฐ ์ ‘์†๊ธฐ๋ก ๊ด€๋ฆฌ","Subdomain": "2.9. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ์šด์˜๊ด€๋ฆฌ","AuditEvidence": ["๋กœ๊ทธ๊ด€๋ฆฌ ์ ˆ์ฐจ","๋กœ๊ทธ๊ธฐ๋ก ๋‚ด์—ญ","๋กœ๊ทธ ์ €์žฅ์žฅ์น˜์— ๋Œ€ํ•œ ์ ‘๊ทผํ†ต์ œ ๋‚ด์—ญ","๊ฐœ์ธ์ •๋ณด ์ ‘์†๊ธฐ๋ก ๋‚ด์—ญ"],"AuditChecklist": ["์„œ๋ฒ„, ์‘์šฉํ”„๋กœ๊ทธ๋žจ, ๋ณด์•ˆ์‹œ์Šคํ…œ, ๋„คํŠธ์›Œํฌ์‹œ์Šคํ…œ ๋“ฑ ์ •๋ณด์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ๋กœ๊ทธ๊ด€๋ฆฌ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝํ•˜๊ณ  ์ด์— ๋”ฐ๋ผ ํ•„์š”ํ•œ ๋กœ๊ทธ๋ฅผ ์ƒ์„ฑํ•˜์—ฌ ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ์˜ ๋กœ๊ทธ๊ธฐ๋ก์€ ์œ„ยท๋ณ€์กฐ ๋ฐ ๋„๋‚œ, ๋ถ„์‹ค๋˜์ง€ ์•Š๋„๋ก ์•ˆ์ „ํ•˜๊ฒŒ ๋ณด๊ด€ํ•˜๊ณ  ๋กœ๊ทธ๊ธฐ๋ก์— ๋Œ€ํ•œ ์ ‘๊ทผ๊ถŒํ•œ์€ ์ตœ์†Œํ™”ํ•˜์—ฌ ๋ถ€์—ฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ์ ‘์†๊ธฐ๋ก์€ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์„ ์ค€์ˆ˜ํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•„์š”ํ•œ ํ•ญ๋ชฉ์„ ๋ชจ๋‘ ํฌํ•จํ•˜์—ฌ ์ผ์ •๊ธฐ๊ฐ„ ์•ˆ์ „ํ•˜๊ฒŒ ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋กœ๊ทธ ๊ธฐ๋ก ๋Œ€์ƒ, ๋ฐฉ๋ฒ•, ๋ณด์กด๊ธฐ๊ฐ„, ๊ฒ€ํ†  ์ฃผ๊ธฐ, ๋‹ด๋‹น์ž ๋“ฑ์— ๋Œ€ํ•œ ์„ธ๋ถ€ ๊ธฐ์ค€ ๋ฐ ์ ˆ์ฐจ๊ฐ€ ์ˆ˜๋ฆฝ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋ณด์•ˆ ์ด๋ฒคํŠธ ๋กœ๊ทธ, ์‘์šฉํ”„๋กœ๊ทธ๋žจ ๋ฐ ์„œ๋น„์Šค ๋กœ๊ทธ(์œˆ๋„์šฐ 2008 ์„œ๋ฒ„ ์ด์ƒ) ๋“ฑ ์ค‘์š” ๋กœ๊ทธ์— ๋Œ€ํ•œ ์ตœ๋Œ€ ํฌ๊ธฐ๋ฅผ ์ถฉ๋ถ„ํ•˜๊ฒŒ ์„ค์ •ํ•˜์ง€ ์•Š์•„ ๋‚ด๋ถ€ ๊ธฐ์ค€์— ์ •ํ•œ ๊ธฐ๊ฐ„ ๋™์•ˆ ๊ธฐ๋กยท๋ณด๊ด€๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ค‘์š” Linux/UNIX ๊ณ„์—ด ์„œ๋ฒ„์— ๋Œ€ํ•œ ๋กœ๊ทธ ๊ธฐ๋ก์„ ๋ณ„๋„๋กœ ๋ฐฑ์—…ํ•˜๊ฑฐ๋‚˜ ์ ์ ˆํžˆ ๋ณดํ˜ธํ•˜์ง€ ์•Š์•„ ์‚ฌ์šฉ์ž์˜ ๋ช…๋ น ์‹คํ–‰ ๊ธฐ๋ก ๋ฐ ์ ‘์† ์ด๋ ฅ ๋“ฑ์„ ์ž„์˜๋กœ ์‚ญ์ œํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์— ์ ‘์†ํ•œ ๊ธฐ๋ก์„ ํ™•์ธํ•œ ๊ฒฐ๊ณผ ์ ‘์†์ž์˜ ๊ณ„์ •, ์ ‘์† ์ผ์‹œ, ์ ‘์†์ž IP์ฃผ์†Œ ์ •๋ณด๋Š” ๋‚จ๊ธฐ๊ณ  ์žˆ์œผ๋‚˜, ์ฒ˜๋ฆฌํ•œ ์ •๋ณด์ฃผ์ฒด ์ •๋ณด ๋ฐ ์ˆ˜ํ–‰์—…๋ฌด(์กฐํšŒ, ๋ณ€๊ฒฝ, ์‚ญ์ œ, ๋‹ค์šด๋กœ๋“œ ๋“ฑ)์™€ ๊ด€๋ จ๋œ ์ •๋ณด๋ฅผ ๋‚จ๊ธฐ๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ๋กœ๊ทธ ์„œ๋ฒ„์˜ ์šฉ๋Ÿ‰์˜ ์ถฉ๋ถ„ํ•˜์ง€ ์•Š์•„์„œ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ์ ‘์†๊ธฐ๋ก์ด 2๊ฐœ์›” ๋ฐ–์— ๋‚จ์•„ ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 6 : ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์ž๊ฐ€ ์ •๋ณด์ฃผ์ฒด 10๋งŒ ๋ช…์˜ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์˜ ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž ์ ‘์†๊ธฐ๋ก์„ 1๋…„๊ฐ„๋งŒ ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ8์กฐ(์ ‘์†๊ธฐ๋ก์˜ ๋ณด๊ด€ ๋ฐ ์ ๊ฒ€)"]}],"description": "์„œ๋ฒ„, ์‘์šฉํ”„๋กœ๊ทธ๋žจ, ๋ณด์•ˆ์‹œ์Šคํ…œ, ๋„คํŠธ์›Œํฌ์‹œ์Šคํ…œ ๋“ฑ ์ •๋ณด์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ์‚ฌ์šฉ์ž ์ ‘์†๊ธฐ๋ก, ์‹œ์Šคํ…œ๋กœ๊ทธ, ๊ถŒํ•œ๋ถ€์—ฌ ๋‚ด์—ญ ๋“ฑ์˜ ๋กœ๊ทธ์œ ํ˜•, ๋ณด์กด๊ธฐ๊ฐ„, ๋ณด์กด๋ฐฉ๋ฒ• ๋“ฑ์„ ์ •ํ•˜๊ณ  ์œ„ยท๋ณ€์กฐ, ๋„๋‚œ, ๋ถ„์‹ค๋˜์ง€ ์•Š๋„๋ก ์•ˆ์ „ํ•˜๊ฒŒ ๋ณด์กดยท๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 25,"pass": 15,"total": 81,"manual": 0}},"2.9.5": {"name": "๋กœ๊ทธ ๋ฐ ์ ‘์†๊ธฐ๋ก ์ ๊ฒ€","checks": {"cloudtrail_insights_exist": null,"inspector2_active_findings_exist": "FAIL","trustedadvisor_errors_and_warnings": null,"guardduty_no_high_severity_findings": "FAIL","accessanalyzer_enabled_without_findings": "FAIL","cloudtrail_threat_detection_enumeration": null,"cloudwatch_log_group_no_secrets_in_logs": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"cloudwatch_cross_account_sharing_disabled": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"cognito_user_pool_client_prevent_user_existence_errors": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL","cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.9.5 ๋กœ๊ทธ ๋ฐ ์ ‘์†๊ธฐ๋ก ์ ๊ฒ€","Subdomain": "2.9. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ์šด์˜๊ด€๋ฆฌ","AuditEvidence": ["๋กœ๊ทธ ๊ฒ€ํ†  ๋ฐ ๋ชจ๋‹ˆํ„ฐ๋ง ์ ˆ์ฐจ","๋กœ๊ทธ ๊ฒ€ํ†  ๋ฐ ๋ชจ๋‹ˆํ„ฐ๋ง ๊ฒฐ๊ณผ(๊ฒ€ํ†  ๋‚ด์—ญ, ๋ณด๊ณ ์„œ ๋“ฑ)","๊ฐœ์ธ์ •๋ณด ์ ‘์†๊ธฐ๋ก ์ ๊ฒ€ ๋‚ด์—ญ","๊ฐœ์ธ์ •๋ณด ๋‹ค์šด๋กœ๋“œ ์‹œ ์‚ฌ์œ  ํ™•์ธ ๊ธฐ์ค€ ๋ฐ ๊ฒฐ๊ณผ","์ด์ƒ์ง•ํ›„ ๋ฐœ๊ฒฌ ์‹œ ๋Œ€์‘ ์ฆ๊ฑฐ์ž๋ฃŒ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ ๊ด€๋ จ ์˜ค๋ฅ˜, ์˜คยท๋‚จ์šฉ(๋น„์ธ๊ฐ€์ ‘์†, ๊ณผ๋‹ค์กฐํšŒ ๋“ฑ), ๋ถ€์ •ํ–‰์œ„ ๋“ฑ ์ด์ƒ์ง•ํ›„๋ฅผ ์ธ์ง€ํ•  ์ˆ˜ ์žˆ๋„๋ก ๋กœ๊ทธ ๊ฒ€ํ†  ์ฃผ๊ธฐ, ๋Œ€์ƒ, ๋ฐฉ๋ฒ• ๋“ฑ์„ ํฌํ•จํ•œ ๋กœ๊ทธ ๊ฒ€ํ†  ๋ฐ ๋ชจ๋‹ˆํ„ฐ๋ง์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋กœ๊ทธ ๊ฒ€ํ†  ๋ฐ ๋ชจ๋‹ˆํ„ฐ๋ง ๊ฒฐ๊ณผ๋ฅผ ์ฑ…์ž„์ž์—๊ฒŒ ๋ณด๊ณ ํ•˜๊ณ  ์ด์ƒ์ง•ํ›„ ๋ฐœ๊ฒฌ ์‹œ ์ ˆ์ฐจ์— ๋”ฐ๋ผ ๋Œ€์‘ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์˜ ์ ‘์†๊ธฐ๋ก์€ ๊ด€๋ จ ๋ฒ•๋ น์—์„œ ์ •ํ•œ ์ฃผ๊ธฐ์— ๋”ฐ๋ผ ์ •๊ธฐ์ ์œผ๋กœ ์ ๊ฒ€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ค‘์š” ์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๊ณ  ์žˆ๋Š” ์ •๋ณด์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ์ด์ƒ์ ‘์†(ํœด์ผ ์ƒˆ๋ฒฝ ์ ‘์†, ์šฐํšŒ๊ฒฝ๋กœ ์ ‘์† ๋“ฑ) ๋˜๋Š” ์ด์ƒํ–‰์œ„(๋Œ€๋Ÿ‰ ๋ฐ์ดํ„ฐ ์กฐํšŒ ๋˜๋Š” ์†Œ๋Ÿ‰ ๋ฐ์ดํ„ฐ์˜ ์ง€์†์ ยท์—ฐ์†์  ์กฐํšŒ ๋“ฑ)์— ๋Œ€ํ•œ ๋ชจ๋‹ˆํ„ฐ๋ง ๋ฐ ๊ฒฝ๊ณ ยท์•Œ๋ฆผ ์ •์ฑ…(๊ธฐ์ค€)์ด ์ˆ˜๋ฆฝ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€ ์ง€์นจ ๋˜๋Š” ์‹œ์Šคํ…œ ๋“ฑ์— ์ ‘๊ทผ ๋ฐ ์‚ฌ์šฉ์— ๋Œ€ํ•œ ์ฃผ๊ธฐ์ ์ธ ์ ๊ฒ€ยท๋ชจ๋‹ˆํ„ฐ๋ง ๊ธฐ์ค€์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ์œผ๋‚˜ ์‹ค์ œ ์ด์ƒ์ ‘์† ๋ฐ ์ด์ƒํ–‰์œ„์— ๋Œ€ํ•œ ๊ฒ€ํ†  ๋‚ด์—ญ์ด ํ™•์ธ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์ž๊ฐ€ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์˜ ์ ‘์†๊ธฐ๋ก ์ ๊ฒ€ ์ฃผ๊ธฐ๋ฅผ ๋ถ„๊ธฐ 1ํšŒ๋กœ ์ •ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์ž์˜ ๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์—๋Š” 1,000๋ช… ์ด์ƒ์˜ ์ •๋ณด์ฃผ์ฒด์— ๋Œ€ํ•œ ๊ฐœ์ธ์ •๋ณด๋ฅผ ๋‹ค์šด๋กœ๋“œํ•œ ๊ฒฝ์šฐ์—๋Š” ์‚ฌ์œ ๋ฅผ ํ™•์ธํ•˜๋„๋ก ๊ธฐ์ค€์ด ์ฑ…์ •๋˜์–ด ์žˆ๋Š” ์ƒํƒœ์—์„œ 1,000๊ฑด ์ด์ƒ์˜ ๊ฐœ์ธ์ •๋ณด ๋‹ค์šด๋กœ๋“œ๊ฐ€ ๋ฐœ์ƒํ•˜์˜€์œผ๋‚˜ ๊ทธ ์‚ฌ์œ ๋ฅผ ํ™•์ธํ•˜์ง€ ์•Š๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ8์กฐ(์ ‘์†๊ธฐ๋ก์˜ ๋ณด๊ด€ ๋ฐ ์ ๊ฒ€)"]}],"description": "์ •๋ณด์‹œ์Šคํ…œ์˜ ์ •์ƒ์ ์ธ ์‚ฌ์šฉ์„ ๋ณด์žฅํ•˜๊ณ  ์‚ฌ์šฉ์ž ์˜คยท๋‚จ์šฉ(๋น„์ธ๊ฐ€์ ‘์†, ๊ณผ๋‹ค์กฐํšŒ ๋“ฑ)์„ ๋ฐฉ์ง€ํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์ ‘๊ทผ ๋ฐ ์‚ฌ์šฉ์— ๋Œ€ํ•œ ๋กœ๊ทธ ๊ฒ€ํ† ๊ธฐ์ค€์„ ์ˆ˜๋ฆฝํ•˜์—ฌ ์ฃผ๊ธฐ์ ์œผ๋กœ ์ ๊ฒ€ํ•˜๋ฉฐ, ๋ฌธ์ œ ๋ฐœ์ƒ ์‹œ ์‚ฌํ›„์กฐ์น˜๋ฅผ ์ ์‹œ์— ์ˆ˜ํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 6,"pass": 0,"total": 26,"manual": 0}},"2.9.6": {"name": "์‹œ๊ฐ„ ๋™๊ธฐํ™”","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.9.6 ์‹œ๊ฐ„ ๋™๊ธฐํ™”","Subdomain": "2.9. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ์šด์˜๊ด€๋ฆฌ","AuditEvidence": ["์‹œ๊ฐ„ ๋™๊ธฐํ™” ์„ค์ •","์ฃผ์š” ์‹œ์Šคํ…œ ์‹œ๊ฐ„ ๋™๊ธฐํ™” ์ฆ๊ฑฐ์ž๋ฃŒ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ์˜ ์‹œ๊ฐ„์„ ํ‘œ์ค€์‹œ๊ฐ„์œผ๋กœ ๋™๊ธฐํ™”ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์‹œ๊ฐ„ ๋™๊ธฐํ™”๊ฐ€ ์ •์ƒ์ ์œผ๋กœ ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ๋Š”์ง€ ์ฃผ๊ธฐ์ ์œผ๋กœ ์ ๊ฒ€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ผ๋ถ€ ์ค‘์š” ์‹œ์Šคํ…œ(๋ณด์•ˆ์‹œ์Šคํ…œ, CCTV ๋“ฑ)์˜ ์‹œ๊ฐ์ด ํ‘œ์ค€์‹œ์™€ ๋™๊ธฐํ™”๋˜์–ด ์žˆ์ง€ ์•Š์œผ๋ฉฐ, ๊ด€๋ จ ๋™๊ธฐํ™” ์—ฌ๋ถ€์— ๋Œ€ํ•œ ์ฃผ๊ธฐ์  ์ ๊ฒ€์ด ์ดํ–‰๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€ NTP ์„œ๋ฒ„์™€ ์‹œ๊ฐ์„ ๋™๊ธฐํ™”ํ•˜๋„๋ก ์„ค์ •ํ•˜๊ณ  ์žˆ์œผ๋‚˜ ์ผ๋ถ€ ์‹œ์Šคํ…œ์˜ ์‹œ๊ฐ์ด ๋™๊ธฐํ™”๋˜์ง€ ์•Š๊ณ  ์žˆ๊ณ , ์ด์— ๋Œ€ํ•œ ์›์ธ๋ถ„์„ ๋ฐ ๋Œ€์‘์ด ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "๋กœ๊ทธ ๋ฐ ์ ‘์†๊ธฐ๋ก์˜ ์ •ํ™•์„ฑ์„ ๋ณด์žฅํ•˜๊ณ  ์‹ ๋ขฐ์„ฑ ์žˆ๋Š” ๋กœ๊ทธ๋ถ„์„์„ ์œ„ํ•˜์—ฌ ๊ด€๋ จ ์ •๋ณด์‹œ์Šคํ…œ์˜ ์‹œ๊ฐ์„ ํ‘œ์ค€์‹œ๊ฐ์œผ๋กœ ๋™๊ธฐํ™”ํ•˜๊ณ  ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.9.7": {"name": "์ •๋ณด์ž์‚ฐ์˜ ์žฌ์‚ฌ์šฉ ๋ฐ ํ๊ธฐ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.9.7 ์ •๋ณด์ž์‚ฐ์˜ ์žฌ์‚ฌ์šฉ ๋ฐ ํ๊ธฐ","Subdomain": "2.9. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ์šด์˜๊ด€๋ฆฌ","AuditEvidence": ["์ •๋ณด์ž์‚ฐ ํ๊ธฐ ๋ฐ ์žฌ์‚ฌ์šฉ ์ ˆ์ฐจ","์ €์žฅ๋งค์ฒด ๊ด€๋ฆฌ๋Œ€์žฅ","์ •๋ณด์ž์‚ฐ ๋ฐ ์ €์žฅ๋งค์ฒด ํ๊ธฐ ์ฆ๊ฑฐ์ž๋ฃŒ","์ •๋ณด์ž์‚ฐ ๋ฐ ์ €์žฅ๋งค์ฒด ํ๊ธฐ ๊ด€๋ จ ์œ„ํƒ๊ณ„์•ฝ์„œ"],"AuditChecklist": ["์ •๋ณด์ž์‚ฐ์˜ ์•ˆ์ „ํ•œ ์žฌ์‚ฌ์šฉ ๋ฐ ํ๊ธฐ์— ๋Œ€ํ•œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ž์‚ฐ ๋ฐ ์ €์žฅ๋งค์ฒด๋ฅผ ์žฌ์‚ฌ์šฉ ๋ฐ ํ๊ธฐํ•˜๋Š” ๊ฒฝ์šฐ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด๋ฅผ ๋ณต๊ตฌ๋˜์ง€ ์•Š๋Š” ๋ฐฉ๋ฒ•์œผ๋กœ ์ฒ˜๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ž์ฒด์ ์œผ๋กœ ์ •๋ณด์ž์‚ฐ ๋ฐ ์ €์žฅ๋งค์ฒด๋ฅผ ํ๊ธฐํ•  ๊ฒฝ์šฐ ๊ด€๋ฆฌ๋Œ€์žฅ์„ ํ†ตํ•˜์—ฌ ํ๊ธฐ์ด๋ ฅ์„ ๋‚จ๊ธฐ๊ณ  ํ๊ธฐํ™•์ธ ์ฆ์ ์„ ํ•จ๊ป˜ ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์™ธ๋ถ€์—…์ฒด๋ฅผ ํ†ตํ•˜์—ฌ ์ •๋ณด์ž์‚ฐ ๋ฐ ์ €์žฅ๋งค์ฒด๋ฅผ ํ๊ธฐํ•  ๊ฒฝ์šฐ ํ๊ธฐ ์ ˆ์ฐจ๋ฅผ ๊ณ„์•ฝ์„œ์— ๋ช…์‹œํ•˜๊ณ  ์™„์ „ํžˆ ํ๊ธฐํ•˜์˜€๋Š”์ง€ ์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์‹œ์Šคํ…œ, PC ๋“ฑ ์œ ์ง€๋ณด์ˆ˜, ์ˆ˜๋ฆฌ ๊ณผ์ •์—์„œ ์ €์žฅ๋งค์ฒด ๊ต์ฒด, ๋ณต๊ตฌ ๋“ฑ ๋ฐœ์ƒ ์‹œ ์ €์žฅ๋งค์ฒด ๋‚ด ์ •๋ณด๋ฅผ ๋ณดํ˜ธํ•˜๊ธฐ ์œ„ํ•œ ๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ์ธ์ •๋ณด์ทจ๊ธ‰์ž PC๋ฅผ ์žฌ์‚ฌ์šฉํ•  ๊ฒฝ์šฐ ๋ฐ์ดํ„ฐ ์‚ญ์ œํ”„๋กœ๊ทธ๋žจ์„ ์ด์šฉํ•˜์—ฌ ์™„์ „์‚ญ์ œ ํ•˜๋„๋ก ์ •์ฑ… ๋ฐ ์ ˆ์ฐจ๊ฐ€ ์ˆ˜๋ฆฝ๋˜์–ด ์žˆ์œผ๋‚˜, ์‹ค์ œ๋กœ๋Š” ์™„์ „์‚ญ์ œ ์กฐ์น˜ ์—†์ด ์žฌ์‚ฌ์šฉํ•˜๊ฑฐ๋‚˜ ๊ธฐ๋ณธ ํฌ๋งท๋งŒ ํ•˜๊ณ  ์žฌ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š” ๋“ฑ ๊ด€๋ จ ์ ˆ์ฐจ๊ฐ€ ์ดํ–‰๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์™ธ๋ถ€์—…์ฒด๋ฅผ ํ†ตํ•˜์—ฌ ์ €์žฅ๋งค์ฒด๋ฅผ ํ๊ธฐํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๊ณ„์•ฝ ๋‚ด์šฉ์ƒ ์•ˆ์ „ํ•œ ํ๊ธฐ ์ ˆ์ฐจ ๋ฐ ๋ณดํ˜ธ๋Œ€์ฑ…์— ๋Œ€ํ•œ ๋‚ด์šฉ์ด ๋ˆ„๋ฝ๋˜์–ด ์žˆ๊ณ  ํ๊ธฐ ์ดํ–‰ ์ฆ๊ฑฐ์ž๋ฃŒ ํ™•์ธ ๋ฐ ์‹ค์‚ฌ ๋“ฑ์˜ ๊ด€๋ฆฌยท๊ฐ๋…์ด ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ํ๊ธฐ๋œ HDD์˜ ์ผ๋ จ๋ฒˆํ˜ธ๊ฐ€ ์•„๋‹Œ ์‹œ์Šคํ…œ๋ช…์„ ๊ธฐ๋กํ•˜๊ฑฐ๋‚˜ ํ๊ธฐ ๋Œ€์žฅ์„ ์ž‘์„ฑํ•˜์ง€ ์•Š์•„ ํ๊ธฐ ์ด๋ ฅ ๋ฐ ์ถ”์ ํ•  ์ˆ˜ ์žˆ๋Š” ์ฆ๊ฑฐ์ž๋ฃŒ๋ฅผ ํ™•์ธํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ํšŒ์ˆ˜ํ•œ ํ๊ธฐ ๋Œ€์ƒ ํ•˜๋“œ๋””์Šคํฌ๊ฐ€ ์™„์ „์‚ญ์ œ ๋˜์ง€ ์•Š์€ ์ƒํƒœ๋กœ ์ž ๊ธˆ์žฅ์น˜๊ฐ€ ๋˜์ง€ ์•Š์€ ์žฅ์†Œ์— ๋ฐฉ์น˜๋˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ21์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ํŒŒ๊ธฐ)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ13์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ํŒŒ๊ธฐ)"]}],"description": "์ •๋ณด์ž์‚ฐ์˜ ์žฌ์‚ฌ์šฉ๊ณผ ํ๊ธฐ ๊ณผ์ •์—์„œ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด๊ฐ€ ๋ณต๊ตฌยท์žฌ์ƒ๋˜์ง€ ์•Š๋„๋ก ์•ˆ์ „ํ•œ ์žฌ์‚ฌ์šฉ ๋ฐ ํ๊ธฐ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.1": {"name": "๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ยท์ด์šฉ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.1.1 ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ยท์ด์šฉ","Subdomain": "3.1. ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["์˜จ๋ผ์ธ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์–‘์‹(ํ™ˆํŽ˜์ด์ง€ ํšŒ์›๊ฐ€์ž… ํ™”๋ฉด, ๋ชจ๋ฐ”์ผ์•ฑ ํšŒ์›๊ฐ€์ž… ํ™”๋ฉด, ์ด๋ฒคํŠธ ์ฐธ์—ฌ ๋“ฑ)","์˜คํ”„๋ผ์ธ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์–‘์‹(ํšŒ์›๊ฐ€์ž…์‹ ์ฒญ์„œ ๋“ฑ)","๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ๋™์˜ ๊ธฐ๋ก(ํšŒ์› ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ๋“ฑ)","๋ฒ•์ •๋Œ€๋ฆฌ์ธ ๋™์˜ ๊ธฐ๋ก","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฒฝ์šฐ ์ •๋ณด์ฃผ์ฒด ๋™์˜, ๋ฒ•๋ น์ƒ ์˜๋ฌด์ค€์ˆ˜, ๊ณ„์•ฝ ์ฒด๊ฒฐยท์ดํ–‰ ๋“ฑ ์ ๋ฒ• ์š”๊ฑด์— ๋”ฐ๋ผ ์ˆ˜์ง‘ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ๋™์˜๋ฅผ ๋ฐ›๋Š” ๊ฒฝ์šฐ ๋™์˜๋ฐฉ๋ฒ• ๋ฐ ์‹œ์ ์€ ์ ์ ˆํ•˜๊ฒŒ ๋˜์–ด ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ๋™์˜๋ฅผ ๋ฐ›๋Š” ๊ฒฝ์šฐ ๊ด€๋ จ ๋‚ด์šฉ์„ ๋ช…ํ™•ํ•˜๊ฒŒ ๊ณ ์ง€ํ•˜๊ณ  ๋ฒ•๋ น์—์„œ ์ •ํ•œ ์ค‘์š”ํ•œ ๋‚ด์šฉ์— ๋Œ€ํ•ด ์•Œ์•„๋ณด๊ธฐ ์‰ฝ๊ฒŒ ํ‘œ์‹œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋งŒ 14์„ธ ๋ฏธ๋งŒ ์•„๋™์˜ ๊ฐœ์ธ์ •๋ณด์— ๋Œ€ํ•ด ์ˆ˜์ง‘ยท์ด์šฉยท์ œ๊ณต ๋“ฑ์˜ ๋™์˜๋ฅผ ๋ฐ›๋Š” ๊ฒฝ์šฐ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ์—๊ฒŒ ํ•„์š”ํ•œ ์‚ฌํ•ญ์— ๋Œ€ํ•˜์—ฌ ๊ณ ์ง€ํ•˜๊ณ  ๋™์˜๋ฅผ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?","๋ฒ•์ •๋Œ€๋ฆฌ์ธ์˜ ๋™์˜๋ฅผ ๋ฐ›๊ธฐ ์œ„ํ•˜์—ฌ ํ•„์š”ํ•œ ์ตœ์†Œํ•œ์˜ ๊ฐœ์ธ์ •๋ณด๋งŒ์„ ์ˆ˜์ง‘ํ•˜๊ณ  ์žˆ์œผ๋ฉฐ, ๋ฒ•์ •๋Œ€๋ฆฌ์ธ์ด ์ž๊ฒฉ ์š”๊ฑด์„ ๊ฐ–์ถ”๊ณ  ์žˆ๋Š”์ง€ ํ™•์ธํ•˜๋Š” ์ ˆ์ฐจ์™€ ๋ฐฉ๋ฒ•์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋งŒ 14์„ธ ๋ฏธ๋งŒ์˜ ์•„๋™์—๊ฒŒ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์™€ ๊ด€๋ จํ•œ ์‚ฌํ•ญ ๋“ฑ์˜ ๊ณ ์ง€ ์‹œ ์ดํ•ดํ•˜๊ธฐ ์‰ฌ์šด ์–‘์‹๊ณผ ๋ช…ํ™•ํ•˜๊ณ  ์•Œ๊ธฐ ์‰ฌ์šด ์–ธ์–ด๋กœ ํ‘œํ˜„ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด ๋ฐ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ์—๊ฒŒ ๋™์˜๋ฅผ ๋ฐ›์€ ๊ธฐ๋ก์„ ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์˜ ๋™์˜ ์—†์ด ์ฒ˜๋ฆฌํ•  ์ˆ˜ ์žˆ๋Š” ๊ฐœ์ธ์ •๋ณด์— ๋Œ€ํ•ด์„œ๋Š” ๊ทธ ํ•ญ๋ชฉ๊ณผ ์ฒ˜๋ฆฌ์˜ ๋ฒ•์  ๊ทผ๊ฑฐ๋ฅผ ์ •๋ณด์ฃผ์ฒด์˜ ๋™์˜๋ฅผ ๋ฐ›์•„ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด์™€ ๊ตฌ๋ถ„ํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์—๊ณต๊ฐœํ•˜๊ฑฐ๋‚˜ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์•Œ๋ฆฌ๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์˜ ๋™์˜ ์—†์ด ๊ฐœ์ธ์ •๋ณด์˜ ์ถ”๊ฐ€์ ์ธ ์ด์šฉ ์‹œ ๋‹น์ดˆ ์ˆ˜์ง‘ ๋ชฉ์ ๊ณผ์˜ ๊ด€๋ จ์„ฑ, ์˜ˆ์ธก ๊ฐ€๋Šฅ์„ฑ, ์ด์ต ์นจํ•ด ์—ฌ๋ถ€, ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๋“ฑ์˜ ๊ณ ๋ ค์‚ฌํ•ญ์— ๋Œ€ํ•œ ํŒ๋‹จ๊ธฐ์ค€์„ ์ˆ˜๋ฆฝ ๋ฐ ์ดํ–‰ํ•˜๊ณ , ์ถ”๊ฐ€์ ์ธ ์ด์šฉ์ด ์ง€์†์ ์œผ๋กœ ๋ฐœ์ƒํ•˜๋Š” ๊ฒฝ์šฐ ๊ณ ๋ ค์‚ฌํ•ญ์— ๋Œ€ํ•œ ํŒ๋‹จ๊ธฐ์ค€์„๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ๊ณต๊ฐœํ•˜๊ณ  ์ด๋ฅผ ์ ๊ฒ€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ•์„ ์ ์šฉ๋ฐ›๋Š” ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์ž๊ฐ€ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ๋™์˜ ์‹œ ๊ณ ์ง€ ์‚ฌํ•ญ์— สป๋™์˜ ๊ฑฐ๋ถ€ ๊ถŒ๋ฆฌ ๋ฐ ๋™์˜ ๊ฑฐ๋ถ€์— ๋”ฐ๋ฅธ ๋ถˆ์ด์ต ๋‚ด์šฉสผ์„ ๋ˆ„๋ฝํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ๋™์˜ ์‹œ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด ํ•ญ๋ชฉ์„ ๊ตฌ์ฒด์ ์œผ๋กœ ๋ช…์‹œํ•˜์ง€ ์•Š๊ณ  สป~ ๋“ฑสผ๊ณผ ๊ฐ™์ด ํฌ๊ด„์ ์œผ๋กœ ์•ˆ๋‚ดํ•˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์‡ผํ•‘๋ชฐ ํ™ˆํŽ˜์ด์ง€์—์„œ ํšŒ์›๊ฐ€์ž… ์‹œ ํšŒ์›๊ฐ€์ž…์— ํ•„์š”ํ•œ ๊ฐœ์ธ์ •๋ณด ์™ธ์— ์ถ”ํ›„ ๋ฌผํ’ˆ ๊ตฌ๋งค ์‹œ ํ•„์š”ํ•œ ๊ฒฐ์ œยท๋ฐฐ์†ก ์ •๋ณด๋ฅผ ๋ฏธ๋ฆฌ ํ•„์ˆ˜ ํ•ญ๋ชฉ์œผ๋กœ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : Q&A, ๊ฒŒ์‹œํŒ์„ ํ†ตํ•˜์—ฌ ๋น„ํšŒ์›์˜ ๊ฐœ์ธ์ •๋ณด(์ด๋ฆ„, ์ด๋ฉ”์ผ, ํœด๋Œ€ํฐ๋ฒˆํ˜ธ)๋ฅผ ์ˆ˜์ง‘ํ•˜๋ฉด์„œ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ๋™์˜ ์ ˆ์ฐจ๋ฅผ ๊ฑฐ์น˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ๋งŒ 14์„ธ ๋ฏธ๋งŒ ์•„๋™์˜ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๋ฉด์„œ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ์˜ ๋™์˜๋ฅผ ๋ฐ›์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 6 : ๋งŒ 14์„ธ ๋ฏธ๋งŒ ์•„๋™์— ๋Œ€ํ•˜์—ฌ ์„œ๋น„์Šค๋ฅผ ์ œ๊ณตํ•˜๊ณ  ์žˆ์ง€ ์•Š์ง€๋งŒ, ํšŒ์›๊ฐ€์ž… ๋‹จ๊ณ„์—์„œ ์ž…๋ ฅ๋ฐ›๋Š” ์ƒ๋…„์›”์ผ์„ ํ†ตํ•˜์—ฌ ๋‚˜์ด ์ฒดํฌ๋ฅผ ํ•˜์ง€ ์•Š์•„ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ ๋™์˜ ์—†์ด ๊ฐ€์ž…๋œ ๋งŒ 14์„ธ ๋ฏธ๋งŒ ์•„๋™ ํšŒ์›์ด ์กด์žฌํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 7 : ๋ฒ•์ •๋Œ€๋ฆฌ์ธ์˜ ์ง„์œ„ ์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•˜๋Š” ์ ˆ์ฐจ๊ฐ€ ๋ฏธํกํ•˜์—ฌ ๋ฏธ์„ฑ๋…„์ž ๋“ฑ ์•„๋™์˜ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ์œผ๋กœ ๋ณด๊ธฐ ์–ด๋ ค์šด๋ฐ๋„ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ ๋™์˜๊ฐ€ ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 8 : ๋งŒ 14์„ธ ๋ฏธ๋งŒ ์•„๋™์œผ๋กœ๋ถ€ํ„ฐ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ ๋™์˜๋ฅผ ๋ฐ›๋Š” ๋ชฉ์ ์œผ๋กœ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ์˜ ๊ฐœ์ธ์ •๋ณด(์ด๋ฆ„, ํœด๋Œ€ํฐ๋ฒˆํ˜ธ)๋ฅผ ์ˆ˜์ง‘ํ•œ ์ดํ›„ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ์˜ ๋™์˜๊ฐ€ ์žฅ๊ธฐ๊ฐ„ ํ™•์ธ๋˜์ง€ ์•Š์•˜์Œ์—๋„ ์ด๋ฅผ ํŒŒ๊ธฐํ•˜์ง€ ์•Š๊ณ  ๊ณ„์† ๋ณด์œ ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 9 : ๋ฒ•์ •๋Œ€๋ฆฌ์ธ ๋™์˜์— ๊ทผ๊ฑฐํ•˜์—ฌ ๋งŒ 14์„ธ ๋ฏธ๋งŒ ์•„๋™์˜ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜์˜€์œผ๋‚˜, ๊ด€๋ จ ๊ธฐ๋ก์„ ๋ณด์กดํ•˜์ง€ ์•Š์•„ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ ๋™์˜์™€ ๊ด€๋ จ๋œ ์‚ฌํ•ญ(๋ฒ•์ •๋Œ€๋ฆฌ์ธ ์ด๋ฆ„, ๋™์˜ ์ผ์‹œ ๋“ฑ)์„ ํ™•์ธํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ15์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ์ˆ˜์ง‘ยท์ด์šฉ), ์ œ22์กฐ(๋™์˜๋ฅผ ๋ฐ›๋Š” ๋ฐฉ๋ฒ•), ์ œ22์กฐ์˜2(์•„๋™์˜ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ)","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋ฐฉ๋ฒ•์— ๊ด€ํ•œ ๊ณ ์‹œ"]}],"description": "๊ฐœ์ธ์ •๋ณด๋Š” ์ ๋ฒ•ํ•˜๊ณ  ์ •๋‹นํ•˜๊ฒŒ ์ˆ˜์ง‘ยท์ด์šฉํ•˜์—ฌ์•ผ ํ•˜๋ฉฐ, ์ •๋ณด์ฃผ์ฒด์˜ ๋™์˜๋ฅผ ๊ทผ๊ฑฐ๋กœ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฒฝ์šฐ์—๋Š” ์ ๋ฒ•ํ•œ ๋ฐฉ๋ฒ•์œผ๋กœ ์ •๋ณด์ฃผ์ฒด์˜ ๋™์˜๋ฅผ ๋ฐ›์•„์•ผ ํ•œ๋‹ค. ๋˜ํ•œ ๋งŒ 14์„ธ ๋ฏธ๋งŒ ์•„๋™์˜ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฒฝ์šฐ์—๋Š” ๊ทธ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ์˜ ๋™์˜๋ฅผ ๋ฐ›์•„์•ผ ํ•˜๋ฉฐ ๋ฒ•์ •๋Œ€๋ฆฌ์ธ์ด ๋™์˜ํ•˜์˜€๋Š”์ง€๋ฅผ ํ™•์ธํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.2": {"name": "๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์ œํ•œ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.1.2 ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์ œํ•œ","Subdomain": "3.1. ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["์˜จ๋ผ์ธ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์–‘์‹(ํ™ˆํŽ˜์ด์ง€ ํšŒ์›๊ฐ€์ž… ํ™”๋ฉด, ์ด๋ฒคํŠธ ์ฐธ์—ฌ ํ™”๋ฉด ๋“ฑ)","์˜คํ”„๋ผ์ธ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์–‘์‹(๋ฉค๋ฒ„์‹ญ ๊ฐ€์ž…์‹ ์ฒญ์„œ ๋“ฑ)","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฒฝ์šฐ ๊ทธ ๋ชฉ์ ์— ํ•„์š”ํ•œ ๋ฒ”์œ„์—์„œ ์ตœ์†Œํ•œ์˜ ์ •๋ณด๋งŒ์„ ์ˆ˜์ง‘ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์˜ ๋™์˜๋ฅผ ๋ฐ›์•„ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฒฝ์šฐ ํ•„์š”ํ•œ ์ตœ์†Œํ•œ์˜ ์ •๋ณด ์™ธ์˜ ๊ฐœ์ธ์ •๋ณด์ˆ˜์ง‘์—๋Š” ๋™์˜ํ•˜์ง€ ์•Š์„ ์ˆ˜ ์žˆ๋‹ค๋Š” ์‚ฌ์‹ค์„ ๊ตฌ์ฒด์ ์œผ๋กœ ์•Œ๋ฆฌ๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด๊ฐ€ ์ˆ˜์ง‘ ๋ชฉ์ ์— ํ•„์š”ํ•œ ์ตœ์†Œํ•œ์˜ ์ •๋ณด ์ด์™ธ์˜ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘์— ๋™์˜ํ•˜์ง€์•Š๋Š”๋‹ค๋Š” ์ด์œ ๋กœ ์„œ๋น„์Šค ๋˜๋Š” ์žฌํ™”์˜ ์ œ๊ณต์„ ๊ฑฐ๋ถ€ํ•˜์ง€ ์•Š๋„๋ก ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ณ„์•ฝ์˜ ์ฒด๊ฒฐ ๋ฐ ์ดํ–‰์„ ๊ทผ๊ฑฐ๋กœ ์ •๋ณด์ฃผ์ฒด ๋™์˜ ์—†์ด ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๋ฉด์„œ ๊ณ„์•ฝ์˜ ์ฒด๊ฒฐ ๋ฐ ์ดํ–‰์„ ์œ„ํ•ด ๋ฐ˜๋“œ์‹œ ํ•„์š”ํ•˜์ง€ ์•Š์€ ๊ฐœ์ธ์ •๋ณด ํ•ญ๋ชฉ๊นŒ์ง€ ๊ณผ๋„ํ•˜๊ฒŒ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ •๋ณด์ฃผ์ฒด๋กœ๋ถ€ํ„ฐ ์„ ํƒ์‚ฌํ•ญ์— ๋Œ€ํ•œ ๋™์˜๋ฅผ ๋ฐ›์œผ๋ฉด์„œ ํ•ด๋‹น ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘์—๋Š” ๋™์˜ํ•˜์ง€ ์•„๋‹ˆํ•  ์ˆ˜ ์žˆ๋‹ค๋Š” ์‚ฌ์‹ค์„ ๊ตฌ์ฒด์ ์œผ๋กœ ์•Œ๋ฆฌ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ํšŒ์›๊ฐ€์ž… ์–‘์‹์—์„œ ํ•„์ˆ˜์™€ ์„ ํƒ ์ •๋ณด๋ฅผ ๊ตฌ๋ถ„ํ•˜์—ฌ ๋ณ„๋„ ๋™์˜๋ฅผ ๋ฐ›๋„๋ก ๋˜์–ด ์žˆ์—ˆ์œผ๋‚˜, ์„ ํƒ์ •๋ณด์— ๋Œ€ํ•˜์—ฌ ๋™์˜ํ•˜์ง€ ์•Š์•„๋„ ํšŒ์›๊ฐ€์ž…์ด ๊ฐ€๋Šฅํ•จ์„ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์ธ์ง€ํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ตฌ์ฒด์ ์œผ๋กœ ์•Œ๋ฆฌ์ง€ ์•Š์€ ๊ฒฝ์šฐ(๊ฐœ์ธ์ •๋ณด ์ž…๋ ฅ ์–‘์‹์— ๊ฐœ์ธ์ •๋ณด ํ•ญ๋ชฉ๋ณ„๋กœ ํ•„์ˆ˜, ์„ ํƒ ์—ฌ๋ถ€๊ฐ€ ํ‘œ์‹œ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ ๋“ฑ)","์‚ฌ๋ก€ 4 : ํ™ˆํŽ˜์ด์ง€ ํšŒ์›๊ฐ€์ž… ํ™”๋ฉด์—์„œ ์„ ํƒ์‚ฌํ•ญ์— ๋Œ€ํ•˜์—ฌ ๋™์˜ํ•˜์ง€ ์•Š๊ฑฐ๋‚˜ ์„ ํƒ์ •๋ณด๋ฅผ ์ž…๋ ฅํ•˜์ง€ ์•Š์œผ๋ฉด ๋‹ค์Œ ๋‹จ๊ณ„๋กœ ๋„˜์–ด๊ฐ€์ง€ ์•Š๊ฑฐ๋‚˜ ํšŒ์›๊ฐ€์ž…์ด ์ฐจ๋‹จ๋˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ์ฑ„์šฉ ๊ณ„์•ฝ ์‹œ ์ฑ„์šฉ ์˜ˆ์ • ์ง๋ฌด์™€ ์ง์ ‘ ๊ด€๋ จ์ด ์—†๋Š” ๊ฐ€์กฑ์‚ฌํ•ญ ๋“ฑ ๊ณผ๋„ํ•œ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ16์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ์ˆ˜์ง‘์ œํ•œ), ์ œ22์กฐ(๋™์˜๋ฅผ ๋ฐ›๋Š” ๋ฐฉ๋ฒ•)"]}],"description": "๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฒฝ์šฐ ์ฒ˜๋ฆฌ ๋ชฉ์ ์— ํ•„์š”ํ•œ ์ตœ์†Œํ•œ์˜ ๊ฐœ์ธ์ •๋ณด๋งŒ์„ ์ˆ˜์ง‘ํ•˜์—ฌ์•ผ ํ•˜๋ฉฐ, ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์„ ํƒ์ ์œผ๋กœ ๋™์˜ํ•  ์ˆ˜ ์žˆ๋Š” ์‚ฌํ•ญ ๋“ฑ์— ๋™์˜ํ•˜์ง€ ์•„๋‹ˆํ•œ๋‹ค๋Š” ์ด์œ ๋กœ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์žฌํ™” ๋˜๋Š” ์„œ๋น„์Šค์˜ ์ œ๊ณต์„ ๊ฑฐ๋ถ€ํ•˜์ง€ ์•Š์•„์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.3": {"name": "์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ฒ˜๋ฆฌ ์ œํ•œ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.1.3 ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ฒ˜๋ฆฌ ์ œํ•œ","Subdomain": "3.1. ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์–‘์‹(ํ™ˆํŽ˜์ด์ง€ ํšŒ์›๊ฐ€์ž… ํ™”๋ฉด, ์ด๋ฒคํŠธ ์ฐธ์—ฌ, ๋ฉค๋ฒ„์‹ญ ๊ฐ€์ž…์‹ ์ฒญ์„œ ๋“ฑ)","์˜จ๋ผ์ธ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์–‘์‹(๋ณธ์ธํ™•์ธ ๋“ฑ ๋Œ€์ฒด๊ฐ€์ž…์ˆ˜๋‹จ ์ œ๊ณต ํ™”๋ฉด)","์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ๋ฅผ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ฒฝ์šฐ ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ฒ˜๋ฆฌ ๊ทผ๊ฑฐ ์ฆ๊ฑฐ์ž๋ฃŒ","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ"],"AuditChecklist": ["์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ๋Š” ๋ช…ํ™•ํ•œ ๋ฒ•์  ๊ทผ๊ฑฐ๊ฐ€ ์žˆ๋Š” ๊ฒฝ์šฐ์—๋งŒ ์ฒ˜๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ์˜ ์ˆ˜์ง‘ ๊ทผ๊ฑฐ๊ฐ€ ๋˜๋Š” ๋ฒ•์กฐํ•ญ์„ ๊ตฌ์ฒด์ ์œผ๋กœ ์‹๋ณ„ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ฒ•์  ๊ทผ๊ฑฐ์— ๋”ฐ๋ผ ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ๋ฅผ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ฒฝ์šฐ์—๋„ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์ธํ„ฐ๋„ท ํ™ˆํŽ˜์ด์ง€๋ฅผ ํ†ตํ•˜์—ฌ ํšŒ์›์œผ๋กœ ๊ฐ€์ž…ํ•˜๋Š” ๋‹จ๊ณ„์—์„œ๋Š” ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ๋ฅผ ์‚ฌ์šฉํ•˜์ง€ ์•„๋‹ˆํ•˜๊ณ ๋„ ํšŒ์›์œผ๋กœ ๊ฐ€์ž…ํ•  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ๋ฒ•์„ ์ œ๊ณตํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ํ™ˆํŽ˜์ด์ง€ ๊ฐ€์ž…๊ณผ ๊ด€๋ จํ•˜์—ฌ ์‹ค๋ช…ํ™•์ธ ๋“ฑ ๋‹จ์ˆœ ํšŒ์›๊ด€๋ฆฌ ๋ชฉ์ ์„ ์œ„ํ•˜์—ฌ ์ •๋ณด์ฃผ์ฒด์˜ ๋™์˜์— ๊ทผ๊ฑฐํ•˜์—ฌ ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ๋ฅผ ์ˆ˜์ง‘ํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ •๋ณด์ฃผ์ฒด์˜ ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ๋ฅผ ์‹œํ–‰๊ทœ์น™์ด๋‚˜ ์ง€๋ฐฉ์ž์น˜๋‹จ์ฒด์˜ ์กฐ๋ก€์— ๊ทผ๊ฑฐํ•˜์—ฌ ์ˆ˜์ง‘ํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋น„๋ฐ€๋ฒˆํ˜ธ ๋ถ„์‹ค ์‹œ ๋ณธ์ธํ™•์ธ ๋“ฑ์˜ ๋ชฉ์ ์œผ๋กœ ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ๋’ค 6์ž๋ฆฌ๋ฅผ ์ˆ˜์ง‘ํ•˜์ง€๋งŒ, ๊ด€๋ จ๋œ ๋ฒ•์  ๊ทผ๊ฑฐ๊ฐ€ ์—†๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์ฑ„์šฉ์ „ํ˜• ์ง„ํ–‰๋‹จ๊ณ„์—์„œ ๋ฒ•์  ๊ทผ๊ฑฐ ์—†์ด ์ž…์‚ฌ์ง€์›์ž์˜ ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ๋ฅผ ์ˆ˜์ง‘ํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ์ฝœ์„ผํ„ฐ์— ์ƒํ’ˆ, ์„œ๋น„์Šค ๊ด€๋ จ ๋ฌธ์˜ ์‹œ ๋ณธ์ธํ™•์ธ์„ ์œ„ํ•˜์—ฌ ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ๋ฅผ ์ˆ˜์ง‘ํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 6 : ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ˆ˜์ง‘์˜ ๋ฒ•์  ๊ทผ๊ฑฐ๊ฐ€ ์žˆ๋‹ค๋Š” ์‚ฌ์œ ๋กœ ํ™ˆํŽ˜์ด์ง€ ํšŒ์›๊ฐ€์ž… ๋‹จ๊ณ„์—์„œ ๋Œ€์ฒด๊ฐ€์ž…์ˆ˜๋‹จ์„ ์ œ๊ณตํ•˜์ง€ ์•Š๊ณ  ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ๋ฅผ ์ž…๋ ฅ๋ฐ›๋Š” ๋ณธ์ธํ™•์ธ ๋ฐ ํšŒ์›๊ฐ€์ž… ๋ฐฉ๋ฒ•๋งŒ์„ ์ œ๊ณตํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ24์กฐ์˜2(์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ฒ˜๋ฆฌ์˜ ์ œํ•œ)","์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ23์กฐ์˜2(์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ์˜ ์‚ฌ์šฉ ์ œํ•œ)"]}],"description": "์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ๋Š” ๋ฒ•์  ๊ทผ๊ฑฐ๊ฐ€ ์žˆ๋Š” ๊ฒฝ์šฐ๋ฅผ ์ œ์™ธํ•˜๊ณ ๋Š” ์ˆ˜์ง‘ยท์ด์šฉ ๋“ฑ ์ฒ˜๋ฆฌํ•  ์ˆ˜ ์—†์œผ๋ฉฐ, ์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ์˜ ์ฒ˜๋ฆฌ๊ฐ€ ํ—ˆ์šฉ๋œ ๊ฒฝ์šฐ๋ผ ํ•˜๋”๋ผ๋„ ์ธํ„ฐ๋„ท ํ™ˆํŽ˜์ด์ง€ ๋“ฑ์—์„œ ๋Œ€์ฒด์ˆ˜๋‹จ์„ ์ œ๊ณตํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.4": {"name": "๋ฏผ๊ฐ์ •๋ณด ๋ฐ ๊ณ ์œ ์‹๋ณ„์ •๋ณด์˜ ์ฒ˜๋ฆฌ ์ œํ•œ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.1.4 ๋ฏผ๊ฐ์ •๋ณด ๋ฐ ๊ณ ์œ ์‹๋ณ„์ •๋ณด์˜ ์ฒ˜๋ฆฌ ์ œํ•œ","Subdomain": "3.1. ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["์˜จ๋ผ์ธ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์–‘์‹(ํ™ˆํŽ˜์ด์ง€ ํšŒ์›๊ฐ€์ž… ํ™”๋ฉด, ์ด๋ฒคํŠธ ์ฐธ์—ฌ ๋“ฑ)","์˜คํ”„๋ผ์ธ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์–‘์‹(ํšŒ์›๊ฐ€์ž…์‹ ์ฒญ์„œ ๋“ฑ)","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ"],"AuditChecklist": ["๋ฏผ๊ฐ์ •๋ณด๋Š” ์ •๋ณด์ฃผ์ฒด๋กœ๋ถ€ํ„ฐ ๋ณ„๋„์˜ ๋™์˜๋ฅผ ๋ฐ›๊ฑฐ๋‚˜ ๊ด€๋ จ ๋ฒ•๋ น์— ๊ทผ๊ฑฐ๊ฐ€ ์žˆ๋Š” ๊ฒฝ์šฐ์—๋งŒ์ฒ˜๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ณ ์œ ์‹๋ณ„์ •๋ณด(์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ œ์™ธ)๋Š” ์ •๋ณด์ฃผ์ฒด๋กœ๋ถ€ํ„ฐ ๋ณ„๋„์˜ ๋™์˜๋ฅผ ๋ฐ›๊ฑฐ๋‚˜ ๊ด€๋ จ ๋ฒ•๋ น์— ๊ตฌ์ฒด์ ์ธ ๊ทผ๊ฑฐ๊ฐ€ ์žˆ๋Š” ๊ฒฝ์šฐ์—๋งŒ ์ฒ˜๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์žฌํ™” ๋˜๋Š” ์„œ๋น„์Šค๋ฅผ ์ œ๊ณตํ•˜๋Š” ๊ณผ์ •์—์„œ ๊ณต๊ฐœ๋˜๋Š” ์ •๋ณด์— ์ •๋ณด์ฃผ์ฒด์˜ ๋ฏผ๊ฐ์ •๋ณด๊ฐ€ ํฌํ•จ๋จ์œผ๋กœ์จ ์‚ฌ์ƒํ™œ ์นจํ•ด์˜ ์œ„ํ—˜์„ฑ์ด ์žˆ๋‹ค๊ณ  ํŒ๋‹จํ•˜๋Š” ๋•Œ์—๋Š” ์žฌํ™” ๋˜๋Š” ์„œ๋น„์Šค์˜ ์ œ๊ณต ์ „์— ๋ฏผ๊ฐ์ •๋ณด์˜ ๊ณต๊ฐœ ๊ฐ€๋Šฅ์„ฑ ๋ฐ ๋น„๊ณต๊ฐœ๋ฅผ ์„ ํƒํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์•Œ์•„๋ณด๊ธฐ ์‰ฝ๊ฒŒ ์•Œ๋ฆฌ๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์žฅ์• ์ธ์— ๋Œ€ํ•œ ์š”๊ธˆ๊ฐ๋ฉด ๋“ฑ ํ˜œํƒ ๋ถ€์—ฌ๋ฅผ ์œ„ํ•˜์—ฌ ์žฅ์•  ์—ฌ๋ถ€ ๋“ฑ ๊ฑด๊ฐ•์— ๊ด€ํ•œ ๋ฏผ๊ฐ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๋ฉด์„œ ๋‹ค๋ฅธ ๊ฐœ์ธ์ •๋ณด ํ•ญ๋ชฉ์— ํฌํ•จํ•˜์—ฌ ์ผ๊ด„ ๋™์˜๋ฅผ ๋ฐ›์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ํšŒ์›๊ฐ€์ž… ์‹œ ์™ธ๊ตญ์ธ์— ํ•œํ•˜์—ฌ ์™ธ๊ตญ์ธ๋“ฑ๋ก๋ฒˆํ˜ธ๋ฅผ ์ˆ˜์ง‘ํ•˜๋ฉด์„œ ๋‹ค๋ฅธ ๊ฐœ์ธ์ •๋ณด ํ•ญ๋ชฉ์— ํฌํ•จํ•˜์—ฌ ์ผ๊ด„ ๋™์˜๋ฅผ ๋ฐ›์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ฏผ๊ฐ์ •๋ณด ๋˜๋Š” ๊ณ ์œ ์‹๋ณ„์ •๋ณด์˜ ์ˆ˜์ง‘์— ๋Œ€ํ•ด ๋ณ„๋„์˜ ๋™์˜๋ฅผ ๋ฐ›์œผ๋ฉด์„œ ๊ณ ์ง€ํ•˜์—ฌ์•ผ ํ•  4๊ฐ€์ง€ ์‚ฌํ•ญ ์ค‘์— ์ผ๋ถ€๋ฅผ ๋ˆ„๋ฝํ•˜๊ฑฐ๋‚˜ ์ž˜๋ชป๋œ ๋‚ด์šฉ์œผ๋กœ ๊ณ ์ง€ํ•˜๋Š” ๊ฒฝ์šฐ(๋™์˜ ๊ฑฐ๋ถ€ ๊ถŒ๋ฆฌ ๋ฐ ๋™์˜ ๊ฑฐ๋ถ€์— ๋”ฐ๋ฅธ ๋ถˆ์ด์ต ์‚ฌํ•ญ์„ ๊ณ ์ง€ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ ๋“ฑ)"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ23์กฐ(๋ฏผ๊ฐ์ •๋ณด์˜ ์ฒ˜๋ฆฌ์ œํ•œ), ์ œ24์กฐ(๊ณ ์œ ์‹๋ณ„์ •๋ณด์˜ ์ฒ˜๋ฆฌ ์ œํ•œ)"]}],"description": "๋ฏผ๊ฐ์ •๋ณด์™€ ๊ณ ์œ ์‹๋ณ„์ •๋ณด(์ฃผ๋ฏผ๋“ฑ๋ก๋ฒˆํ˜ธ ์ œ์™ธ)๋ฅผ ์ฒ˜๋ฆฌํ•˜๊ธฐ ์œ„ํ•ด์„œ๋Š” ๋ฒ•๋ น์—์„œ ๊ตฌ์ฒด์ ์œผ๋กœ ์ฒ˜๋ฆฌ๋ฅผ ์š”๊ตฌํ•˜๊ฑฐ๋‚˜ ํ—ˆ์šฉํ•˜๋Š” ๊ฒฝ์šฐ๋ฅผ ์ œ์™ธํ•˜๊ณ ๋Š” ์ •๋ณด์ฃผ์ฒด์˜ ๋ณ„๋„ ๋™์˜๋ฅผ ๋ฐ›์•„์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.5": {"name": "๊ฐœ์ธ์ •๋ณด ๊ฐ„์ ‘์ˆ˜์ง‘","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.1.5 ๊ฐœ์ธ์ •๋ณด ๊ฐ„์ ‘์ˆ˜์ง‘","Subdomain": "3.1. ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ์ œ๊ณต ๊ด€๋ จ ๊ณ„์•ฝ์„œ(์ œ๊ณตํ•˜๋Š” ์ž์™€์˜ ๊ณ„์•ฝ ์‚ฌํ•ญ)","๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘์ถœ์ฒ˜์— ๋Œ€ํ•œ ์ •๋ณด์ฃผ์ฒด ํ†ต์ง€ ๋‚ด์—ญ","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ"],"AuditChecklist": ["์ •๋ณด์ฃผ์ฒด ์ด์™ธ์˜ ์ œ3์ž๋กœ๋ถ€ํ„ฐ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ๊ณต๋ฐ›๋Š” ๊ฒฝ์šฐ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘์— ๋Œ€ํ•œ ๋™์˜ํš๋“ ์ฑ…์ž„์ด ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ๊ณตํ•˜๋Š” ์ž์—๊ฒŒ ์žˆ์Œ์„ ๊ณ„์•ฝ์„ ํ†ตํ•˜์—ฌ ๋ช…์‹œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ณต๊ฐœ๋œ ๋งค์ฒด ๋ฐ ์žฅ์†Œ์—์„œ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฒฝ์šฐ ์ •๋ณด์ฃผ์ฒด์˜ ๊ณต๊ฐœ ๋ชฉ์ ยท๋ฒ”์œ„ ๋ฐ ์‚ฌํšŒ ํ†ต๋…์ƒ ๋™์˜ ์˜์‚ฌ๊ฐ€ ์žˆ๋‹ค๊ณ  ์ธ์ •๋˜๋Š” ๋ฒ”์œ„ ๋‚ด์—์„œ๋งŒ ์ˆ˜์ง‘ยท์ด์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์„œ๋น„์Šค ๊ณ„์•ฝ ์ดํ–‰์„ ์œ„ํ•ด ํ•„์š”ํ•œ ๊ฒฝ์šฐ๋กœ์„œ, ์„œ๋น„์Šค ์ œ๊ณต ๊ณผ์ •์—์„œ ์ž๋™์ˆ˜์ง‘์žฅ์น˜ ๋“ฑ์— ์˜ํ•˜์—ฌ ์ˆ˜์ง‘ยท์ƒ์„ฑํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด์˜ ๊ฒฝ์šฐ์—๋„ ์ตœ์†Œ์ˆ˜์ง‘ ์›์น™์„ ์ ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด ์ด์™ธ๋กœ๋ถ€ํ„ฐ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด์— ๋Œ€ํ•ด ์ •๋ณด์ฃผ์ฒด์˜ ์š”๊ตฌ๊ฐ€ ์žˆ๋Š” ๊ฒฝ์šฐ ์ฆ‰์‹œ ํ•„์š”ํ•œ ์‚ฌํ•ญ์„ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์•Œ๋ฆฌ๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด ์ด์™ธ๋กœ๋ถ€ํ„ฐ ์ˆ˜์ง‘ํ•œ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ฒฝ์šฐ ๊ฐœ์ธ์ •๋ณด์˜ ์ข…๋ฅ˜ยท๊ทœ๋ชจ ๋“ฑ์ด ๋ฒ•์  ์š”๊ฑด์— ํ•ด๋‹นํ•˜๋Š” ๊ฒฝ์šฐ ํ•„์š”ํ•œ ์‚ฌํ•ญ์„ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์•Œ๋ฆฌ๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์ˆ˜์ง‘ ์ถœ์ฒ˜์— ๋Œ€ํ•ด ์•Œ๋ฆฐ ๊ธฐ๋ก์„ ํ•ด๋‹น ๊ฐœ์ธ์ •๋ณด์˜ ํŒŒ๊ธฐ ์‹œ๊นŒ์ง€ ๋ณด๊ด€ ๋ฐ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ธํ„ฐ๋„ท ํ™ˆํŽ˜์ด์ง€, SNS์— ๊ณต๊ฐœ๋œ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๊ณ  ์žˆ๋Š” ์ƒํƒœ์—์„œ ์ •๋ณด์ฃผ์ฒด์˜ ์ˆ˜์ง‘ ์ถœ์ฒ˜ ์š”๊ตฌ์— ๋Œ€ํ•œ ์ฒ˜๋ฆฌ์ ˆ์ฐจ๊ฐ€ ์กด์žฌํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ17์กฐ์ œ1ํ•ญ์ œ1ํ˜ธ์— ๋”ฐ๋ผ ๋‹ค๋ฅธ ์‚ฌ์—…์ž๋กœ๋ถ€ํ„ฐ ๊ฐœ์ธ์ •๋ณด ์ œ๊ณต๋™์˜๋ฅผ ๊ทผ๊ฑฐ๋กœ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ๊ณต๋ฐ›์•˜์œผ๋‚˜, ์ด์— ๋Œ€ํ•˜์—ฌ ํ•ด๋‹น ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ 3๊ฐœ์›” ๋‚ด์— ํ†ต์ง€ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ(๋‹ค๋งŒ ์ œ๊ณต๋ฐ›์€ ์ž๊ฐ€ 5๋งŒ ๋ช… ์ด์ƒ ์ •๋ณด์ฃผ์ฒด์˜ ๋ฏผ๊ฐ์ •๋ณด ๋˜๋Š” ๊ณ ์œ ์‹๋ณ„์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๊ฑฐ๋‚˜ 100๋งŒ ๋ช… ์ด์ƒ ์ •๋ณด์ฃผ์ฒด์˜ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ฒฝ์šฐ)","์‚ฌ๋ก€ 3 : ๋ฒ•์  ์˜๋ฌด ๋Œ€์ƒ์ž์— ํ•ด๋‹น๋˜์–ด ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์ถœ์ฒ˜๋ฅผ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ํ†ต์ง€ํ•˜๋ฉด์„œ ๊ฐœ์ธ์ •๋ณด์˜ ์ฒ˜๋ฆฌ๋ชฉ์  ๋˜๋Š” ๋™์˜๋ฅผ ์ฒ ํšŒํ•  ๊ถŒ๋ฆฌ๊ฐ€ ์žˆ๋‹ค๋Š” ์‚ฌ์‹ค ๋“ฑ ํ•„์ˆ˜ ํ†ต์ง€์‚ฌํ•ญ์„ ์ผ๋ถ€ ๋ˆ„๋ฝํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๋ฒ•์  ์˜๋ฌด ๋Œ€์ƒ์ž์— ํ•ด๋‹น๋˜์–ด ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์ถœ์ฒ˜๋ฅผ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ํ†ต์ง€ํ•˜์˜€์œผ๋‚˜, ์ˆ˜์ง‘ ์ถœ์ฒ˜ ํ†ต์ง€์— ๊ด€ํ•œ ๊ธฐ๋ก์„ ํ•ด๋‹น ๊ฐœ์ธ์ •๋ณด์˜ ํŒŒ๊ธฐ ์‹œ๊นŒ์ง€ ๋ณด๊ด€ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ16์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ์ˆ˜์ง‘ ์ œํ•œ), ์ œ19์กฐ(๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ๊ณต๋ฐ›์€ ์ž์˜ ์ด์šฉยท์ œ๊ณต ์ œํ•œ), ์ œ20์กฐ(์ •๋ณด์ฃผ์ฒด ์ด์™ธ๋กœ๋ถ€ํ„ฐ ์ˆ˜์ง‘ํ•œ ๊ฐœ์ธ์ •๋ณด์˜ ์ˆ˜์ง‘ ์ถœ์ฒ˜ ๋“ฑ ํ†ต์ง€)"]}],"description": "์ •๋ณด์ฃผ์ฒด ์ด์™ธ๋กœ๋ถ€ํ„ฐ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๊ฑฐ๋‚˜ ์ œ3์ž๋กœ๋ถ€ํ„ฐ ์ œ๊ณต๋ฐ›๋Š” ๊ฒฝ์šฐ์—๋Š” ์—…๋ฌด์— ํ•„์š”ํ•œ ์ตœ์†Œํ•œ์˜ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๊ฑฐ๋‚˜ ์ œ๊ณต๋ฐ›์•„์•ผ ํ•˜๋ฉฐ, ๋ฒ•๋ น์— ๊ทผ๊ฑฐํ•˜๊ฑฐ๋‚˜ ์ •๋ณด์ฃผ์ฒด์˜ ์š”๊ตฌ๊ฐ€ ์žˆ์œผ๋ฉด ๊ฐœ์ธ์ •๋ณด์˜ ์ˆ˜์ง‘ ์ถœ์ฒ˜, ์ฒ˜๋ฆฌ๋ชฉ์ , ์ฒ˜๋ฆฌ์ •์ง€์˜ ์š”๊ตฌ๊ถŒ๋ฆฌ๋ฅผ ์•Œ๋ ค์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.6": {"name": "์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์„ค์น˜ยท์šด์˜","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.1.6 ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์„ค์น˜ยท์šด์˜","Subdomain": "3.1. ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์šด์˜ ํ˜„ํ™ฉ","์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์•ˆ๋‚ดํŒ","์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์šด์˜ยท๊ด€๋ฆฌ๋ฐฉ์นจ","์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ๊ด€๋ฆฌํ™”๋ฉด(๊ณ„์ •ยท๊ถŒํ•œ ๋‚ด์—ญ, ์˜์ƒ์ •๋ณด ๋ณด์กด๊ธฐ๊ฐ„ ๋“ฑ)","์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์šด์˜ ์ˆ˜ํƒ์ž์™€์˜ ๊ณ„์•ฝ์„œ ๋ฐ ์ ๊ฒ€ ์ด๋ ฅ"],"AuditChecklist": ["๊ณต๊ฐœ๋œ ์žฅ์†Œ์— ๊ณ ์ •ํ˜• ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ๋ฅผ ์„ค์น˜ยท์šด์˜ํ•  ๊ฒฝ์šฐ ๋ฒ•์  ํ—ˆ์šฉ ์š”๊ฑด์— ํ•ด๋‹นํ•˜๋Š”์ง€๋ฅผ ๊ฒ€ํ† ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ณต๊ณต๊ธฐ๊ด€ ๋“ฑ์ด ๊ณต๊ฐœ๋œ ์žฅ์†Œ์— ๊ณ ์ •ํ˜• ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ๋ฅผ ์„ค์น˜ยท์šด์˜ํ•˜๋ ค๋Š” ๊ฒฝ์šฐ ๊ณต์ฒญํšŒยท์„ค๋ช…ํšŒ ๊ฐœ์ตœ ๋“ฑ์˜ ๋ฒ•๋ น์— ๋”ฐ๋ฅธ ์ ˆ์ฐจ๋ฅผ ๊ฑฐ์ณ ๊ด€๊ณ„ ์ „๋ฌธ๊ฐ€ ๋ฐ ์ดํ•ด๊ด€๊ณ„์ž์˜ ์˜๊ฒฌ์„ ์ˆ˜๋ ดํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ณ ์ •ํ˜• ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์„ค์น˜ยท์šด์˜ ์‹œ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์‰ฝ๊ฒŒ ์ธ์‹ํ•  ์ˆ˜ ์žˆ๋„๋ก ์•ˆ๋‚ดํŒ ์„ค์น˜ ๋“ฑ ํ•„์š”ํ•œ ์กฐ์น˜๋ฅผ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์—…๋ฌด๋ฅผ ๋ชฉ์ ์œผ๋กœ ๊ณต๊ฐœ๋œ ์žฅ์†Œ์—์„œ ์ด๋™ํ˜• ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ๋ฅผ ์šด์˜ํ•˜๋Š” ๊ฒฝ์šฐ ๋ฒ•์  ํ—ˆ์šฉ ์š”๊ฑด์— ํ•ด๋‹นํ•˜๋Š”์ง€๋ฅผ ๊ฒ€ํ† ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์—…๋ฌด๋ฅผ ๋ชฉ์ ์œผ๋กœ ๊ณต๊ฐœ๋œ ์žฅ์†Œ์—์„œ ์ด๋™ํ˜• ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ๋กœ ์‚ฌ๋žŒ ๋˜๋Š” ๊ทธ ์‚ฌ๋žŒ๊ณผ ๊ด€๋ จ๋œ ์‚ฌ๋ฌผ์˜ ์˜์ƒ์„ ์ดฌ์˜ํ•˜๋Š” ๊ฒฝ์šฐ ๋ถˆ๋น›, ์†Œ๋ฆฌ, ์•ˆ๋‚ดํŒ ๋“ฑ์˜ ๋ฐฉ๋ฒ•์œผ๋กœ ์ดฌ์˜ ์‚ฌ์‹ค์„ ํ‘œ์‹œํ•˜๊ณ  ์•Œ๋ฆฌ๊ณ  ์žˆ๋Š”๊ฐ€?","์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ๋ฐ ์˜์ƒ์ •๋ณด์˜ ์•ˆ์ „ํ•œ ๊ด€๋ฆฌ๋ฅผ ์œ„ํ•œ ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์šด์˜ยท๊ด€๋ฆฌ ๋ฐฉ์นจ์„ ๋งˆ๋ จํ•˜์—ฌ ์‹œํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์˜์ƒ์ •๋ณด์˜ ๋ณด๊ด€ ๊ธฐ๊ฐ„์„ ์ •ํ•˜๊ณ  ์žˆ์œผ๋ฉฐ, ๋ณด๊ด€ ๊ธฐ๊ฐ„ ๋งŒ๋ฃŒ ์‹œ ์ง€์ฒด ์—†์ด ํŒŒ๊ธฐํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์„ค์น˜ยท์šด์˜์— ๊ด€ํ•œ ์‚ฌ๋ฌด๋ฅผ ์œ„ํƒํ•˜๋Š” ๊ฒฝ์šฐ ๊ด€๋ จ ์ ˆ์ฐจ ๋ฐ ์š”๊ฑด์— ๋”ฐ๋ผ ๊ณ„์•ฝ์„œ์— ๋ฐ˜์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์•ˆ๋‚ดํŒ์˜ ๊ณ ์ง€ ๋ฌธ๊ตฌ๊ฐ€ ์ผ๋ถ€ ๋ˆ„๋ฝ๋˜์–ด ์šด์˜๋˜๊ณ  ์žˆ๊ฑฐ๋‚˜, ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์šด์˜ ๋ฐ ๊ด€๋ฆฌ ๋ฐฉ์นจ์„ ์ˆ˜๋ฆฝยท์šด์˜ํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์šด์˜ยท๊ด€๋ฆฌ ๋ฐฉ์นจ์„ ์ˆ˜๋ฆฝ ์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๋ฐฉ์นจ ๋‚ด์šฉ๊ณผ ๋‹ฌ๋ฆฌ ๋ณด๊ด€๊ธฐ๊ฐ„์„ ์ค€์ˆ˜ํ•˜์ง€ ์•Š๊ณ  ์šด์˜๋˜๊ฑฐ๋‚˜, ์˜์ƒ์ •๋ณด ๋ณดํ˜ธ๋ฅผ ์œ„ํ•œ ์ ‘๊ทผํ†ต์ œ ๋ฐ ๋กœ๊น… ๋“ฑ ๋ฐฉ์นจ์— ๊ธฐ์ˆ ํ•œ ์‚ฌํ•ญ์ด ์ค€์ˆ˜๋˜์ง€ ์•Š๋Š” ๋“ฑ ๊ด€๋ฆฌ๊ฐ€ ๋ฏธํกํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ์˜ ์„ค์น˜ยท์šด์˜ ์‚ฌ๋ฌด๋ฅผ ์™ธ๋ถ€์—…์ฒด์— ์œ„ํƒํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์˜์ƒ์ •๋ณด์˜ ๊ด€๋ฆฌ ํ˜„ํ™ฉ ์ ๊ฒ€์— ๊ด€ํ•œ ์‚ฌํ•ญ, ์†ํ•ด๋ฐฐ์ƒ ์ฑ…์ž„์— ๊ด€ํ•œ ์‚ฌํ•ญ ๋“ฑ ๋ฒ•๋ น์—์„œ ์š”๊ตฌํ•˜๋Š” ๋‚ด์šฉ์„ ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์—…๋ฌด ์œ„ํƒ ๊ณ„์•ฝ์„œ์— ๋ช…์‹œํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ์˜ ์„ค์น˜ยท์šด์˜ ์‚ฌ๋ฌด๋ฅผ ์™ธ๋ถ€์—…์ฒด์— ์œ„ํƒํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ ์•ˆ๋‚ดํŒ์— ์ˆ˜ํƒ์ž์˜ ๋ช…์นญ๊ณผ ์—ฐ๋ฝ์ฒ˜๋ฅผ ๋ˆ„๋ฝํ•˜์—ฌ ๊ณ ์ง€ํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ25์กฐ(๊ณ ์ •ํ˜• ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ์˜ ์„ค์น˜ยท์šด์˜ ์ œํ•œ), ์ œ25์กฐ์˜2(์ด๋™ํ˜• ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ์˜ ์šด์˜ ์ œํ•œ)"]}],"description": "๊ณ ์ •ํ˜• ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ๋ฅผ ๊ณต๊ฐœ๋œ ์žฅ์†Œ์— ์„ค์น˜ยท์šด์˜ํ•˜๊ฑฐ๋‚˜ ์ด๋™ํ˜• ์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ๋ฅผ ๊ณต๊ฐœ๋œ ์žฅ์†Œ์—์„œ ์—…๋ฌด๋ฅผ ๋ชฉ์ ์œผ๋กœ ์šด์˜ํ•˜๋Š” ๊ฒฝ์šฐ ์„ค์น˜ ๋ชฉ์  ๋ฐ ์œ„์น˜์— ๋”ฐ๋ผ ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์„ ์ค€์ˆ˜ํ•˜๊ณ , ์ ์ ˆํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.1.7": {"name": "๋งˆ์ผ€ํŒ… ๋ชฉ์ ์˜ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ยท์ด์šฉ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.1.7 ๋งˆ์ผ€ํŒ… ๋ชฉ์ ์˜ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ยท์ด์šฉ","Subdomain": "3.1. ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["์˜จ๋ผ์ธ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์–‘์‹(ํ™ˆํŽ˜์ด์ง€ ํšŒ์›๊ฐ€์ž… ํ™”๋ฉด, ๋ชจ๋ฐ”์ผ์•ฑ ํšŒ์›๊ฐ€์ž… ํ™”๋ฉด, ์ด๋ฒคํŠธ ์ฐธ์—ฌ ๋“ฑ)","์˜คํ”„๋ผ์ธ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์–‘์‹(ํšŒ์›๊ฐ€์ž…์‹ ์ฒญ์„œ ๋“ฑ)","๋งˆ์ผ€ํŒ… ๋™์˜ ๊ธฐ๋ก","๊ด‘๊ณ ์„ฑ ์ •๋ณด์ „์†ก ์ˆ˜์‹ ๋™์˜ ๊ธฐ๋ก ๋ฐ ์ˆ˜์‹ ๋™์˜ ์˜์‚ฌํ™•์ธ ๊ธฐ๋ก","๊ด‘๊ณ ์„ฑ ์ •๋ณด ๋ฐœ์†ก ์‹œ์Šคํ…œ ๊ด€๋ฆฌ์ž ํ™”๋ฉด(๋ฉ”์ผ, SMS, ์•ฑ ํ‘ธ์‹œ ๋“ฑ)","๊ด‘๊ณ ์„ฑ ์ •๋ณด ๋ฐœ์†ก ๋ฌธ๊ตฌ","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ"],"AuditChecklist": ["์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์žฌํ™”๋‚˜ ์„œ๋น„์Šค๋ฅผ ํ™๋ณดํ•˜๊ฑฐ๋‚˜ ํŒ๋งค๋ฅผ ๊ถŒ์œ ํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์— ๋Œ€ํ•œ ๋™์˜๋ฅผ ๋ฐ›๋Š” ๊ฒฝ์šฐ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์ด๋ฅผ ๋ช…ํ™•ํ•˜๊ฒŒ ์ธ์ง€ํ•  ์ˆ˜ ์žˆ๋„๋ก ์•Œ๋ฆฌ๊ณ  ๋ณ„๋„์˜ ๋™์˜๋ฅผ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?","์ „์ž์  ์ „์†ก๋งค์ฒด๋ฅผ ์ด์šฉํ•˜์—ฌ ์˜๋ฆฌ๋ชฉ์ ์˜ ๊ด‘๊ณ ์„ฑ ์ •๋ณด๋ฅผ ์ „์†กํ•˜๋Š” ๊ฒฝ์šฐ ์ˆ˜์‹ ์ž์˜ ๋ช…์‹œ์ ์ธ ์‚ฌ์ „ ๋™์˜๋ฅผ ๋ฐ›๊ณ  ์žˆ์œผ๋ฉฐ, 2๋…„๋งˆ๋‹ค ์ •๊ธฐ์ ์œผ๋กœ ์ˆ˜์‹ ์ž์˜ ์ˆ˜์‹ ๋™์˜ ์—ฌ๋ถ€๋ฅผ ํ™•์ธํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ „์ž์  ์ „์†ก๋งค์ฒด๋ฅผ ์ด์šฉํ•œ ์˜๋ฆฌ๋ชฉ์ ์˜ ๊ด‘๊ณ ์„ฑ ์ •๋ณด ์ „์†ก์— ๋Œ€ํ•˜์—ฌ ์ˆ˜์‹ ์ž๊ฐ€ ์ˆ˜์‹ ๊ฑฐ๋ถ€์˜์‚ฌ๋ฅผ ํ‘œ์‹œํ•˜๊ฑฐ๋‚˜ ์‚ฌ์ „ ๋™์˜๋ฅผ ์ฒ ํšŒํ•œ ๊ฒฝ์šฐ ์˜๋ฆฌ๋ชฉ์ ์˜ ๊ด‘๊ณ ์„ฑ ์ •๋ณด ์ „์†ก์„ ์ค‘๋‹จํ•˜๋„๋ก ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์˜๋ฆฌ๋ชฉ์ ์˜ ๊ด‘๊ณ ์„ฑ ์ •๋ณด๋ฅผ ์ „์†กํ•˜๋Š” ๊ฒฝ์šฐ ์ „์†ก์ž์˜ ๋ช…์นญ, ์ˆ˜์‹ ๊ฑฐ๋ถ€ ๋ฐฉ๋ฒ• ๋“ฑ์„ ๊ตฌ์ฒด์ ์œผ๋กœ ๋ฐํžˆ๊ณ  ์žˆ์œผ๋ฉฐ, ์•ผ๊ฐ„์‹œ๊ฐ„์—๋Š” ์ „์†กํ•˜์ง€ ์•Š๋„๋ก ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : สปํ™๋ณด ๋ฐ ๋งˆ์ผ€ํŒ…สผ ๋ชฉ์ ์œผ๋กœ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ํ•˜๋ฉด์„œ สป๋ถ€๊ฐ€์„œ๋น„์Šค ์ œ๊ณตสผ, สป์ œํœด ์„œ๋น„์Šค ์ œ๊ณตสผ ๋“ฑ๊ณผ ๊ฐ™์ด ๋ชฉ์ ์„ ๋ชจํ˜ธํ•˜๊ฒŒ ์•ˆ๋‚ดํ•˜๋Š” ๊ฒฝ์šฐ ๋˜๋Š” ๋‹ค๋ฅธ ๋ชฉ์ ์œผ๋กœ ์ˆ˜์ง‘ํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด์™€ ๊ตฌ๋ถ„ํ•˜์ง€ ์•Š๊ณ  ํฌ๊ด„ ๋™์˜๋ฅผ ๋ฐ›๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋ชจ๋ฐ”์ผ ์•ฑ์—์„œ ๊ด‘๊ณ ์„ฑ ์ •๋ณด์ „์†ก(์•ฑ ํ‘ธ์‹œ)์— ๋Œ€ํ•˜์—ฌ ๊ฑฐ๋ถ€ ์˜์‚ฌ๋ฅผ ๋ฐํ˜”์œผ๋‚˜, ํ”„๋กœ๊ทธ๋žจ ์˜ค๋ฅ˜ ๋“ฑ์˜ ์ด์œ ๋กœ ๊ด‘๊ณ ์„ฑ ์•ฑ ํ‘ธ์‹œ๊ฐ€ ์ด๋ฃจ์–ด์ง€๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์˜จ๋ผ์ธ ํšŒ์›๊ฐ€์ž… ํ™”๋ฉด์—์„œ ๋ฌธ์ž, ์ด๋ฉ”์ผ์— ์˜ํ•œ ๊ด‘๊ณ ์„ฑ ์ •๋ณด ์ „์†ก์— ๋Œ€ํ•˜์—ฌ ๋””ํดํŠธ๋กœ ์ฒดํฌ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ด‘๊ณ ์„ฑ ์ •๋ณด ์ˆ˜์‹ ๋™์˜ ์—ฌ๋ถ€์— ๋Œ€ํ•˜์—ฌ 2๋…„๋งˆ๋‹ค ํ™•์ธํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ์˜๋ฆฌ๋ชฉ์ ์˜ ๊ด‘๊ณ ์„ฑ ์ •๋ณด๋ฅผ ์ „์ž์šฐํŽธ์œผ๋กœ ์ „์†กํ•˜๋ฉด์„œ ์ œ๋ชฉ์ด ์‹œ์ž‘๋˜๋Š” ๋ถ€๋ถ„์— สป(๊ด‘๊ณ )สผ ํ‘œ์‹œ๋ฅผ ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ22์กฐ(๋™์˜๋ฅผ ๋ฐ›๋Š” ๋ฐฉ๋ฒ•)","์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ50์กฐ(๊ด‘๊ณ ์„ฑ ์ •๋ณด ์ „์†ก ์ œํ•œ)"]}],"description": "์žฌํ™”๋‚˜ ์„œ๋น„์Šค์˜ ํ™๋ณด, ํŒ๋งค ๊ถŒ์œ , ๊ด‘๊ณ ์„ฑ ์ •๋ณด์ „์†ก ๋“ฑ ๋งˆ์ผ€ํŒ… ๋ชฉ์ ์œผ๋กœ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ ๋ฐ์ด์šฉํ•˜๋Š” ๊ฒฝ์šฐ ๊ทธ ๋ชฉ์ ์„ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ๋ช…ํ™•ํ•˜๊ฒŒ ์ธ์ง€ํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ณ ์ง€ํ•˜๊ณ  ๋™์˜๋ฅผ ๋ฐ›์•„์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.2.1": {"name": "๊ฐœ์ธ์ •๋ณด ํ˜„ํ™ฉ๊ด€๋ฆฌ","checks": {"macie_is_enabled": "PASS","s3_bucket_lifecycle_enabled": "FAIL"},"status": "FAIL","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.2.1 ๊ฐœ์ธ์ •๋ณด ํ˜„ํ™ฉ๊ด€๋ฆฌ","Subdomain": "3.2. ๊ฐœ์ธ์ •๋ณด ๋ณด์œ  ๋ฐ ์ด์šฉ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ํ˜„ํ™ฉํ‘œ","๊ฐœ์ธ์ •๋ณด ํ๋ฆ„ํ‘œยทํ๋ฆ„๋„","๊ฐœ์ธ์ •๋ณดํŒŒ์ผ ๋“ฑ๋ก ํ˜„ํ™ฉ","๊ฐœ์ธ์ •๋ณดํŒŒ์ผ ๊ด€๋ฆฌ๋Œ€์žฅ","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ๊ด€ํ•œ ์‚ฌํ•ญ์„ ๊ธฐ๋กํ•œ ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ","๏ฝข์กฐ์„ธ๋ฒ”์ฒ˜๋ฒŒ๋ฒ•๏ฝฃ์— ๋”ฐ๋ฅธ ๋ฒ”์น™ํ–‰์œ„ ์กฐ์‚ฌ ๋ฐ ๏ฝข๊ด€์„ธ๋ฒ•๏ฝฃ์— ๋”ฐ๋ฅธ ๋ฒ”์น™ํ–‰์œ„ ์กฐ์‚ฌ์— ๊ด€ํ•œ ์‚ฌํ•ญ์„ ๊ธฐ๋กํ•œ ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ","์ผํšŒ์„ฑ์œผ๋กœ ์šด์˜๋˜๋Š” ํŒŒ์ผ ๋“ฑ ์ง€์†์ ์œผ๋กœ ๊ด€๋ฆฌํ•  ํ•„์š”๊ฐ€ ๋‚ฎ๋‹ค๊ณ  ์ธ์ •๋˜์–ด ๋Œ€ํ†ต๋ น๋ น์œผ๋กœ ์ •ํ•˜๋Š” ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ","ํšŒ์˜ ์ฐธ์„ ์ˆ˜๋‹น ์ง€๊ธ‰, ์ž๋ฃŒยท๋ฌผํ’ˆ์˜ ์†ก๋ถ€, ๊ธˆ์ „์˜ ์ •์‚ฐ ๋“ฑ ๋‹จ์ˆœ ์—…๋ฌด ์ˆ˜ํ–‰์„ ์œ„ํ•ด ์šด์˜๋˜๋Š” ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ๋กœ์„œ ์ง€์†์  ๊ด€๋ฆฌ ํ•„์š”์„ฑ์ด ๋‚ฎ์€ ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ","๊ณต์ค‘์œ„์ƒ ๋“ฑ ๊ณต๊ณต์˜ ์•ˆ์ „๊ณผ ์•ˆ๋…•์„ ์œ„ํ•˜์—ฌ ๊ธด๊ธ‰ํžˆ ํ•„์š”ํ•œ ๊ฒฝ์šฐ๋กœ์„œ ์ผ์‹œ์ ์œผ๋กœ ์ฒ˜๋ฆฌ๋˜๋Š” ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ","๊ทธ ๋ฐ–์— ์ผํšŒ์  ์—…๋ฌด ์ฒ˜๋ฆฌ๋งŒ์„ ์œ„ํ•ด ์ˆ˜์ง‘๋œ ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ๋กœ์„œ ์ €์žฅ๋˜๊ฑฐ๋‚˜ ๊ธฐ๋ก๋˜์ง€ ์•Š๋Š” ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ","๋‹ค๋ฅธ ๋ฒ•๋ น์— ๋”ฐ๋ผ ๋น„๋ฐ€๋กœ ๋ถ„๋ฅ˜๋œ ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ","๊ตญ๊ฐ€์•ˆ์ „๋ณด์žฅ๊ณผ ๊ด€๋ จ๋œ ์ •๋ณด ๋ถ„์„์„ ๋ชฉ์ ์œผ๋กœ ์ˆ˜์ง‘ ๋˜๋Š” ์ œ๊ณต ์š”์ฒญ๋˜๋Š” ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ","์˜์ƒ์ •๋ณด์ฒ˜๋ฆฌ๊ธฐ๊ธฐ๋ฅผ ํ†ตํ•˜์—ฌ ์ฒ˜๋ฆฌ๋˜๋Š” ๊ฐœ์ธ์˜์ƒ์ •๋ณดํŒŒ์ผ","๏ฝข๊ธˆ์œต์‹ค๋ช…๊ฑฐ๋ž˜ ๋ฐ ๋น„๋ฐ€๋ณด์žฅ์— ๊ด€ํ•œ ๋ฒ•๋ฅ ๏ฝฃ์— ๋”ฐ๋ฅธ ๊ธˆ์œต๊ธฐ๊ด€์ด ๊ธˆ์œต์—…๋ฌด ์ทจ๊ธ‰์„ ์œ„ํ•˜์—ฌ ๋ณด์œ ํ•˜๋Š” ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ"],"AuditChecklist": ["์ˆ˜์ง‘ยท๋ณด์œ ํ•˜๊ณ  ์žˆ๋Š” ๊ฐœ์ธ์ •๋ณด์˜ ํ•ญ๋ชฉ, ๋ณด์œ ๋Ÿ‰, ์ฒ˜๋ฆฌ ๋ชฉ์  ๋ฐ ๋ฐฉ๋ฒ•, ๋ณด์œ ๊ธฐ๊ฐ„ ๋“ฑ ํ˜„ํ™ฉ์„ ์ •๊ธฐ์ ์œผ๋กœ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ณต๊ณต๊ธฐ๊ด€์ด ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ์„ ์šด์šฉํ•˜๊ฑฐ๋‚˜ ๋ณ€๊ฒฝํ•˜๋Š” ๊ฒฝ์šฐ ๊ด€๋ จ๋œ ์‚ฌํ•ญ์„ ๋ฒ•๋ฅ ์—์„œ ์ •ํ•œ ๊ด€๊ณ„๊ธฐ๊ด€์˜ ์žฅ์—๊ฒŒ ๋“ฑ๋กํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ณต๊ณต๊ธฐ๊ด€์€ ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ์˜ ๋ณด์œ  ํ˜„ํ™ฉ์„ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ๊ณต๊ฐœํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ์„ ํ™ˆํŽ˜์ด์ง€์˜ ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ ๋“ฑ๋ก ๋ฉ”๋‰ด๋ฅผ ํ†ตํ•˜์—ฌ ๋ชฉ๋ก์„ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๊ทธ ์ค‘ ์ผ๋ถ€ ํ™ˆํŽ˜์ด์ง€ ์„œ๋น„์Šค์™€ ๊ด€๋ จ๋œ ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ์˜ ๋‚ด์šฉ์ด ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ๋ˆ„๋ฝ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์‹ ๊ทœ ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ์„ ๊ตฌ์ถ•ํ•œ ์ง€ 2๊ฐœ์›”์ด ๊ฒฝ๊ณผํ•˜์˜€์œผ๋‚˜, ํ•ด๋‹น ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ์„ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์œ„์›ํšŒ์— ๋“ฑ๋กํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์œ„์›ํšŒ์— ๋“ฑ๋ก๋˜์–ด ๊ณต๊ฐœ๋œ ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ์˜ ๋‚ด์šฉ(์ˆ˜์ง‘ํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด์˜ ํ•ญ๋ชฉ ๋“ฑ)์ด ์‹ค์ œ ์ฒ˜๋ฆฌํ•˜๊ณ  ์žˆ๋Š” ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ ํ˜„ํ™ฉ๊ณผ ์ƒ์ดํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ณต๊ณต๊ธฐ๊ด€์ด ์ž„์ง์›์˜ ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ, ํ†ต๊ณ„๋ฒ•์— ๋”ฐ๋ผ ์ˆ˜์ง‘๋˜๋Š” ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ์— ๋Œ€ํ•ด ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ ๋“ฑ๋ก ์˜ˆ์™ธ์‚ฌํ•ญ์— ํ•ด๋‹น๋˜์ง€ ์•Š์Œ์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ  ํ•ด๋‹น ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ์„ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์œ„์›ํšŒ์— ๋“ฑ๋กํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ32์กฐ(๊ฐœ์ธ์ •๋ณดํŒŒ์ผ์˜ ๋“ฑ๋ก ๋ฐ ๊ณต๊ฐœ)"]}],"description": "์ˆ˜์ง‘ยท๋ณด์œ ํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด์˜ ํ•ญ๋ชฉ, ๋ณด์œ ๋Ÿ‰, ์ฒ˜๋ฆฌ ๋ชฉ์  ๋ฐ ๋ฐฉ๋ฒ•, ๋ณด์œ ๊ธฐ๊ฐ„ ๋“ฑ ํ˜„ํ™ฉ์„ ์ •๊ธฐ์ ์œผ๋กœ ๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•˜๋ฉฐ, ๊ณต๊ณต๊ธฐ๊ด€์˜ ๊ฒฝ์šฐ ์ด๋ฅผ ๋ฒ•๋ฅ ์—์„œ ์ •ํ•œ ๊ด€๊ณ„๊ธฐ๊ด€์˜ ์žฅ์—๊ฒŒ ๋“ฑ๋กํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 1,"pass": 1,"total": 2,"manual": 0}},"3.2.2": {"name": "๊ฐœ์ธ์ •๋ณด ํ’ˆ์งˆ๋ณด์žฅ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.2.2 ๊ฐœ์ธ์ •๋ณด ํ’ˆ์งˆ๋ณด์žฅ","Subdomain": "3.2. ๊ฐœ์ธ์ •๋ณด ๋ณด์œ  ๋ฐ ์ด์šฉ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["์ •๋ณด์ฃผ์ฒด ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ •ยท๋ณ€๊ฒฝ ์–‘์‹(์˜จ๋ผ์ธ, ์˜คํ”„๋ผ์ธ)","๊ฐœ์ธ์ •๋ณด ์ตœ์‹ ์„ฑ ์œ ์ง€ ์ ˆ์ฐจ"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด๋ฅผ ์ตœ์‹ ์˜ ์ƒํƒœ๋กœ ์ •ํ™•ํ•˜๊ฒŒ ์œ ์ง€ํ•˜๊ธฐ ์œ„ํ•œ ์ ˆ์ฐจ ๋ฐ ๋ฐฉ์•ˆ์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด๊ฐ€ ๋ณธ์ธ์˜ ๊ฐœ์ธ์ •๋ณด์— ๋Œ€ํ•˜์—ฌ ์ •ํ™•์„ฑ, ์™„์ „์„ฑ ๋ฐ ์ตœ์‹ ์„ฑ์„ ์œ ์ง€ํ•  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ๋ฒ•์„ ์ œ๊ณตํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ธํ„ฐ๋„ท ํ™ˆํŽ˜์ด์ง€๋ฅผ ํ†ตํ•˜์—ฌ ํšŒ์›์ •๋ณด๋ฅผ ๋ณ€๊ฒฝํ•  ๋•Œ๋Š” ๋ณธ์ธํ™•์ธ ์ ˆ์ฐจ๋ฅผ ๊ฑฐ์น˜๊ณ  ์žˆ์œผ๋‚˜, ๊ณ ๊ฐ์„ผํ„ฐ ์ƒ๋‹ด์›๊ณผ์˜ ํ†ตํ™”๋ฅผ ํ†ตํ•œ ํšŒ์› ์ •๋ณด ๋ณ€๊ฒฝ ์‹œ์—๋Š” ๋ณธ์ธํ™•์ธ ์ ˆ์ฐจ๊ฐ€ ๋ฏธํกํ•˜์—ฌ ํšŒ์›์ •๋ณด์˜ ๋ถˆ๋ฒ•์ ์ธ ๋ณ€๊ฒฝ์ด ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์˜จ๋ผ์ธ ํšŒ์›์— ๋Œ€ํ•ด์„œ๋Š” ๊ฐœ์ธ์ •๋ณด๋ฅผ ๋ณ€๊ฒฝํ•  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ๋ฒ•์„ ์ œ๊ณตํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์˜คํ”„๋ผ์ธ ํšŒ์›์— ๋Œ€ํ•ด์„œ๋Š” ๊ฐœ์ธ์ •๋ณด๋ฅผ ๋ณ€๊ฒฝํ•  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ๋ฒ•์„ ์ œ๊ณตํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ3์กฐ(๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ ์›์น™)"]}],"description": "์ˆ˜์ง‘๋œ ๊ฐœ์ธ์ •๋ณด๋Š” ์ฒ˜๋ฆฌ ๋ชฉ์ ์— ํ•„์š”ํ•œ ๋ฒ”์œ„์—์„œ ๊ฐœ์ธ์ •๋ณด์˜ ์ •ํ™•์„ฑยท์™„์ „์„ฑยท์ตœ์‹ ์„ฑ์ด ๋ณด์žฅ๋˜๋„๋ก ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ๊ด€๋ฆฌ์ ˆ์ฐจ๋ฅผ ์ œ๊ณตํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.2.3": {"name": "์ด์šฉ์ž ๋‹จ๋ง๊ธฐ ์ ‘๊ทผ ๋ณดํ˜ธ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.2.3 ์ด์šฉ์ž ๋‹จ๋ง๊ธฐ ์ ‘๊ทผ ๋ณดํ˜ธ","Subdomain": "3.2. ๊ฐœ์ธ์ •๋ณด ๋ณด์œ  ๋ฐ ์ด์šฉ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["์•ฑ ์ ‘๊ทผ๊ถŒํ•œ ๋™์˜ ํ™”๋ฉด","์•ฑ ์ ‘๊ทผ๊ถŒํ•œ ์„ค์ • ํ˜„ํ™ฉ"],"AuditChecklist": ["์ •๋ณด์ฃผ์ฒด(์ด์šฉ์ž)์˜ ์ด๋™ํ†ต์‹ ๋‹จ๋ง์žฅ์น˜ ๋‚ด์— ์ €์žฅ๋˜์–ด ์žˆ๋Š” ์ •๋ณด ๋ฐ ์ด๋™ํ†ต์‹ ๋‹จ๋ง์žฅ์น˜์— ์„ค์น˜๋œ ๊ธฐ๋Šฅ์— ๋Œ€ํ•˜์—ฌ ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋Š” ๊ถŒํ•œ์ด ํ•„์š”ํ•œ ๊ฒฝ์šฐ ๋ช…ํ™•ํ•˜๊ฒŒ ์ธ์ง€ํ•  ์ˆ˜ ์žˆ๋„๋ก ์•Œ๋ฆฌ๊ณ  ์ •๋ณด์ฃผ์ฒด(์ด์šฉ์ž)์˜ ๋™์˜๋ฅผ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?","์ด๋™ํ†ต์‹ ๋‹จ๋ง์žฅ์น˜ ๋‚ด์—์„œ ํ•ด๋‹น ์„œ๋น„์Šค๋ฅผ ์ œ๊ณตํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ๋ฐ˜๋“œ์‹œ ํ•„์š”ํ•œ ์ ‘๊ทผ๊ถŒํ•œ์ด ์•„๋‹Œ ๊ฒฝ์šฐ, ์ •๋ณด์ฃผ์ฒด(์ด์šฉ์ž)๊ฐ€ ๋™์˜ํ•˜์ง€ ์•Š์•„๋„ ์„œ๋น„์Šค ์ œ๊ณต์„ ๊ฑฐ๋ถ€ํ•˜์ง€ ์•Š๋„๋ก ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ด๋™ํ†ต์‹ ๋‹จ๋ง์žฅ์น˜ ๋‚ด์—์„œ ํ•ด๋‹น ์ ‘๊ทผ๊ถŒํ•œ์— ๋Œ€ํ•œ ์ •๋ณด์ฃผ์ฒด(์ด์šฉ์ž)์˜ ๋™์˜ ๋ฐ ์ฒ ํšŒ๋ฐฉ๋ฒ•์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์Šค๋งˆํŠธํฐ ์•ฑ์—์„œ ์„œ๋น„์Šค์— ๋ถˆํ•„์š”ํ•จ์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ  ์ฃผ์†Œ๋ก, ์‚ฌ์ง„, ๋ฌธ์ž ๋“ฑ ์Šค๋งˆํŠธํฐ ๋‚ด ๊ฐœ์ธ์ •๋ณด ์˜์—ญ์— ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋Š” ๊ถŒํ•œ์„ ๊ณผ๋„ํ•˜๊ฒŒ ์„ค์ •ํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ •๋ณดํ†ต์‹ ์„œ๋น„์Šค ์ œ๊ณต์ž์˜ ์Šค๋งˆํŠธํฐ ์•ฑ์—์„œ ์Šค๋งˆํŠธํฐ ๋‚ด์— ์ €์žฅ๋˜์–ด ์žˆ๋Š” ์ •๋ณด ๋ฐ ์„ค์น˜๋œ ๊ธฐ๋Šฅ์— ์ ‘๊ทผํ•˜๋ฉด์„œ ์ ‘๊ทผ๊ถŒํ•œ์— ๋Œ€ํ•œ ๊ณ ์ง€ ๋ฐ ๋™์˜๋ฅผ ๋ฐ›์ง€ ์•Š๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์Šค๋งˆํŠธํฐ ์•ฑ์˜ ์ ‘๊ทผ๊ถŒํ•œ์— ๋Œ€ํ•œ ๋™์˜๋ฅผ ๋ฐ›์œผ๋ฉด์„œ ์„ ํƒ์‚ฌํ•ญ์— ํ•ด๋‹นํ•˜๋Š” ๊ถŒํ•œ์„ ํ•„์ˆ˜๊ถŒํ•œ์œผ๋กœ ๊ณ ์ง€ํ•˜์—ฌ ๋™์˜๋ฅผ ๋ฐ›๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์ ‘๊ทผ๊ถŒํ•œ์— ๋Œ€ํ•œ ๊ฐœ๋ณ„๋™์˜๊ฐ€ ๋ถˆ๊ฐ€๋Šฅํ•œ ์•ˆ๋“œ๋กœ์ด๋“œ 6.0 ๋ฏธ๋งŒ ๋ฒ„์ „์„ ์ง€์›ํ•˜๋Š” ์Šค๋งˆํŠธํฐ ์•ฑ์„ ๋ฐฐํฌํ•˜๋ฉด์„œ ์„ ํƒ์  ์ ‘๊ทผ๊ถŒํ•œ์„ ํ•จ๊ป˜ ์„ค์ •ํ•˜์—ฌ, ์„ ํƒ์  ์ ‘๊ทผ๊ถŒํ•œ์— ๋Œ€ํ•˜์—ฌ ๊ฑฐ๋ถ€ํ•  ์ˆ˜ ์—†๋„๋ก ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ22์กฐ์˜2(์ ‘๊ทผ๊ถŒํ•œ์— ๋Œ€ํ•œ ๋™์˜)"]}],"description": "์ •๋ณด์ฃผ์ฒด(์ด์šฉ์ž)์˜ ์ด๋™ํ†ต์‹ ๋‹จ๋ง์žฅ์น˜ ๋‚ด์— ์ €์žฅ๋˜์–ด ์žˆ๋Š” ์ •๋ณด ๋ฐ ์ด๋™ํ†ต์‹ ๋‹จ๋ง์žฅ์น˜์— ์„ค์น˜๋œ ๊ธฐ๋Šฅ์— ์ ‘๊ทผ์ด ํ•„์š”ํ•œ ๊ฒฝ์šฐ ์ด๋ฅผ ๋ช…ํ™•ํ•˜๊ฒŒ ์ธ์ง€ํ•  ์ˆ˜ ์žˆ๋„๋ก ์•Œ๋ฆฌ๊ณ  ์ •๋ณด์ฃผ์ฒด(์ด์šฉ์ž)์˜ ๋™์˜๋ฅผ ๋ฐ›์•„์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.2.4": {"name": "๊ฐœ์ธ์ •๋ณด ๋ชฉ์  ์™ธ ์ด์šฉ ๋ฐ ์ œ๊ณต","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.2.4 ๊ฐœ์ธ์ •๋ณด ๋ชฉ์  ์™ธ ์ด์šฉ ๋ฐ ์ œ๊ณต","Subdomain": "3.2. ๊ฐœ์ธ์ •๋ณด ๋ณด์œ  ๋ฐ ์ด์šฉ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ๋ชฉ์  ์™ธ ์ด์šฉ ๋ฐ ์ œ3์ž ์ œ๊ณต ๋‚ด์—ญ(์š”์ฒญ์„œ ๋“ฑ ๊ด€๋ จ ์ฆ๊ฑฐ์ž๋ฃŒ ํฌํ•จ)","๊ฐœ์ธ์ •๋ณด ๋ชฉ์  ์™ธ ์ด์šฉ ๋ฐ ์ œ3์ž ์ œ๊ณต ๋Œ€์žฅ(๊ณต๊ณต๊ธฐ๊ด€์ธ ๊ฒฝ์šฐ)","ํ™ˆํŽ˜์ด์ง€ ๋˜๋Š” ๊ด€๋ณด ๊ฒŒ์žฌ ๋‚ด์—ญ(๊ณต๊ณต๊ธฐ๊ด€์ธ ๊ฒฝ์šฐ)","์ž๋ฃŒ ์ œ๊ณต ์š”์ฒญ ๋Œ€์‘ ์ง€์นจ","์ž๋ฃŒ ์ œ๊ณต ์š”์ฒญ ๊ณต๋ฌธ ๋ฐ ๊ฐœ์ธ์ •๋ณด ์ œ๊ณต๋‚ด์—ญ, ๋Œ€์žฅ ๋“ฑ"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด๋Š” ์ตœ์ดˆ ์ˆ˜์ง‘ ์‹œ ์ •๋ณด์ฃผ์ฒด๋กœ๋ถ€ํ„ฐ ๋™์˜๋ฐ›์€ ๋ชฉ์  ๋˜๋Š” ๋ฒ•๋ น์— ๊ทผ๊ฑฐํ•œ ๋ฒ”์œ„ ๋‚ด์—์„œ๋งŒ ์ด์šฉยท์ œ๊ณตํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์ž๋กœ๋ถ€ํ„ฐ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ๊ณต๋ฐ›์€ ๊ฒฝ์šฐ ์ œ๊ณต๋ฐ›์€ ๋ชฉ์ ์˜ ๋ฒ”์œ„ ๋‚ด์—์„œ๋งŒ ์ด์šฉยท์ œ๊ณตํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด๋ฅผ ์ˆ˜์ง‘ ๋ชฉ์  ๋˜๋Š” ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์ž๋กœ๋ถ€ํ„ฐ ์ œ๊ณต๋ฐ›์€ ๋ชฉ์ ์˜ ๋ฒ”์œ„๋ฅผ ์ดˆ๊ณผํ•˜์—ฌ ์ด์šฉํ•˜๊ฑฐ๋‚˜ ์ œ๊ณตํ•˜๋Š” ๊ฒฝ์šฐ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ๋ณ„๋„์˜ ๋™์˜๋ฅผ ๋ฐ›๊ฑฐ๋‚˜ ๋ฒ•์  ๊ทผ๊ฑฐ๊ฐ€ ์žˆ๋Š” ๊ฒฝ์šฐ๋กœ ์ œํ•œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด๋ฅผ ๋ชฉ์  ์™ธ์˜ ์šฉ๋„๋กœ ์ œ3์ž์—๊ฒŒ ์ œ๊ณตํ•˜๋Š” ๊ฒฝ์šฐ ์ œ๊ณต๋ฐ›๋Š” ์ž์—๊ฒŒ ์ด์šฉ๋ชฉ์  ๋ฐ ๋ฐฉ๋ฒ• ๋“ฑ์„ ์ œํ•œํ•˜๊ฑฐ๋‚˜ ์•ˆ์ „์„ฑ ํ™•๋ณด๋ฅผ ์œ„ํ•˜์—ฌ ํ•„์š”ํ•œ ์กฐ์น˜๋ฅผ ๋งˆ๋ จํ•˜๋„๋ก ์š”์ฒญํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ณต๊ณต๊ธฐ๊ด€์ด ๊ฐœ์ธ์ •๋ณด๋ฅผ ๋ชฉ์  ์™ธ์˜ ์šฉ๋„๋กœ ์ด์šฉํ•˜๊ฑฐ๋‚˜ ์ œ3์ž์—๊ฒŒ ์ œ๊ณตํ•˜๋Š” ๊ฒฝ์šฐ ๊ทธ ์ด์šฉ ๋˜๋Š” ์ œ๊ณต์˜ ๋ฒ•์  ๊ทผ๊ฑฐ, ๋ชฉ์  ๋ฐ ๋ฒ”์œ„ ๋“ฑ์— ๊ด€ํ•˜์—ฌ ํ•„์š”ํ•œ ์‚ฌํ•ญ์„ ๊ด€๋ณด ๋˜๋Š” ์ธํ„ฐ๋„ท ํ™ˆํŽ˜์ด์ง€ ๋“ฑ์— ๊ฒŒ์žฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ณต๊ณต๊ธฐ๊ด€ ๋“ฑ์ด ๊ฐœ์ธ์ •๋ณด๋ฅผ ๋ชฉ์  ์™ธ์˜ ์šฉ๋„๋กœ ์ด์šฉํ•˜๊ฑฐ๋‚˜ ์ œ3์ž์—๊ฒŒ ์ œ๊ณตํ•˜๋Š” ๊ฒฝ์šฐ ๋ชฉ์  ์™ธ ์ด์šฉ ๋ฐ ์ œ3์ž ์ œ๊ณต๋Œ€์žฅ์— ๊ธฐ๋กยท๊ด€๋ฆฌํ•˜๋Š” ๋“ฑ ์ ˆ์ฐจ๋ฅผ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ƒํ’ˆ๋ฐฐ์†ก์„ ๋ชฉ์ ์œผ๋กœ ์ˆ˜์ง‘ํ•œ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์‚ฌ์ „์— ๋™์˜ ๋ฐ›์ง€ ์•Š์€ ์ž์‚ฌ ์ƒํ’ˆ์˜ ํ†ต์‹ ํŒ๋งค ๊ด‘๊ณ ์— ์ด์šฉํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ณ ๊ฐ ๋งŒ์กฑ๋„ ์กฐ์‚ฌ, ๊ฒฝํ’ˆ ํ–‰์‚ฌ์— ์‘๋ชจํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์ˆ˜์ง‘ํ•œ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ž์‚ฌ์˜ ํ• ์ธํŒ๋งคํ–‰์‚ฌ ์•ˆ๋‚ด์šฉ ๊ด‘๊ณ  ๋ฐœ์†ก์— ์ด์šฉํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ณต๊ณต๊ธฐ๊ด€์ด ๋‹ค๋ฅธ ๋ฒ•๋ฅ ์— ๊ทผ๊ฑฐํ•˜์—ฌ ๋ฏผ์›์ธ์˜ ๊ฐœ์ธ์ •๋ณด๋ฅผ ๋ชฉ์  ์™ธ๋กœ ํƒ€ ๊ธฐ๊ด€์— ์ œ๊ณตํ•˜๋ฉด์„œ ๊ด€๋ จ ์‚ฌํ•ญ์„ ๊ด€๋ณด ๋˜๋Š” ์ธํ„ฐ๋„ท ํ™ˆํŽ˜์ด์ง€์— ๊ฒŒ์‹œํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ณต๊ณต๊ธฐ๊ด€์ด ๋ฒ”์ฃ„ ์ˆ˜์‚ฌ์˜ ๋ชฉ์ ์œผ๋กœ ๊ฒฝ์ฐฐ์„œ์— ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ๊ณตํ•˜๋ฉด์„œ สป๊ฐœ์ธ์ •๋ณด ๋ชฉ์  ์™ธ ์ด์šฉ ๋ฐ ์ œ3์ž ์ œ๊ณต ๋Œ€์žฅสผ์— ๊ด€๋ จ ์‚ฌํ•ญ์„ ๊ธฐ๋กํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ18์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ๋ชฉ์  ์™ธ ์ด์šฉยท์ œ๊ณต ์ œํ•œ), ์ œ19์กฐ(๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ๊ณต๋ฐ›์€ ์ž์˜ ์ด์šฉยท์ œ๊ณต ์ œํ•œ)"]}],"description": "๊ฐœ์ธ์ •๋ณด๋Š” ์ˆ˜์ง‘ ์‹œ์˜ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ๊ณ ์ง€ยท๋™์˜๋ฅผ ๋ฐ›์€ ๋ชฉ์  ๋˜๋Š” ๋ฒ•๋ น์— ๊ทผ๊ฑฐํ•œ ๋ฒ”์œ„ ๋‚ด์—์„œ๋งŒ ์ด์šฉ ๋˜๋Š” ์ œ๊ณตํ•˜์—ฌ์•ผ ํ•˜๋ฉฐ, ์ด๋ฅผ ์ดˆ๊ณผํ•˜์—ฌ ์ด์šฉยท์ œ๊ณตํ•˜๋ ค๋Š” ๋•Œ์—๋Š” ์ •๋ณด์ฃผ์ฒด์˜ ์ถ”๊ฐ€ ๋™์˜๋ฅผ ๋ฐ›๊ฑฐ๋‚˜ ๊ด€๊ณ„ ๋ฒ•๋ น์— ๋”ฐ๋ฅธ ์ ๋ฒ•ํ•œ ๊ฒฝ์šฐ์ธ์ง€ ํ™•์ธํ•˜๊ณ  ์ ์ ˆํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.2.5": {"name": "๊ฐ€๋ช…์ •๋ณด ์ฒ˜๋ฆฌ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.2.5 ๊ฐ€๋ช…์ •๋ณด ์ฒ˜๋ฆฌ","Subdomain": "3.2. ๊ฐœ์ธ์ •๋ณด ๋ณด์œ  ๋ฐ ์ด์šฉ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["๊ฐ€๋ช…์ฒ˜๋ฆฌยท์ต๋ช…์ฒ˜๋ฆฌ ์ ์ •์„ฑ ํ‰๊ฐ€ ์ ˆ์ฐจ ๋ฐ ๊ฒฐ๊ณผ","๊ฐ€๋ช…์ •๋ณด ์ฒ˜๋ฆฌ ๊ธฐ๋ก","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ(๊ฐ€๋ช…์ •๋ณด ์ด์šฉยท์ œ๊ณต์— ๊ด€ํ•œ ์‚ฌํ•ญ) ๋“ฑ"],"AuditChecklist": ["๊ฐ€๋ช…์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ฒฝ์šฐ ๋ชฉ์  ์ œํ•œ, ๊ฐ€๋ช…์ฒ˜๋ฆฌ ๋ฐฉ๋ฒ• ๋ฐ ๊ธฐ์ค€, ์ ์ •์„ฑ ๊ฒ€ํ† , ์žฌ์‹๋ณ„ ๊ธˆ์ง€ ๋ฐ ์žฌ์‹๋ณ„ ๋ฐœ์ƒ ์‹œ ์กฐ์น˜์‚ฌํ•ญ ๋“ฑ ๊ฐ€๋ช…์ •๋ณด๋ฅผ ์ ์ •ํ•˜๊ฒŒ ์ฒ˜๋ฆฌํ•˜๊ธฐ ์œ„ํ•œ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด๋ฅผ ๊ฐ€๋ช…์ฒ˜๋ฆฌํ•˜์—ฌ ์ด์šฉยท์ œ๊ณต ์‹œ ์ถ”๊ฐ€ ์ •๋ณด์˜ ์‚ฌ์šฉยท๊ฒฐํ•ฉ ์—†์ด๋Š” ๊ฐœ์ธ์„ ์•Œ์•„๋ณผ ์ˆ˜ ์—†๋„๋ก ์ ์ •ํ•œ ์ˆ˜์ค€์œผ๋กœ ๊ฐ€๋ช…์ฒ˜๋ฆฌ๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋‹ค๋ฅธ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์ž์™€ ๊ฐ€๋ช…์ •๋ณด๋ฅผ ๊ฒฐํ•ฉํ•˜๋Š” ๊ฒฝ์šฐ ๊ฒฐํ•ฉ์ „๋ฌธ๊ธฐ๊ด€ ๋˜๋Š” ๋ฐ์ดํ„ฐ์ „๋ฌธ๊ธฐ๊ด€์„ ํ†ตํ•ด ๊ฒฐํ•ฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐ€๋ช…์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ฒฝ์šฐ ์ถ”๊ฐ€ ์ •๋ณด๋ฅผ ์‚ญ์ œ ๋˜๋Š” ๋ณ„๋„๋กœ ๋ถ„๋ฆฌํ•˜์—ฌ ๋ณด๊ด€ยท๊ด€๋ฆฌ, ๊ด€๋ จ ๊ธฐ๋ก์˜ ์ž‘์„ฑยท๋ณด๊ด€ ๋“ฑ ์•ˆ์ „์„ฑ ํ™•๋ณด์— ํ•„์š”ํ•œ ๊ธฐ์ˆ ์ ยท๊ด€๋ฆฌ์  ๋ฐ ๋ฌผ๋ฆฌ์  ์กฐ์น˜๋ฅผ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐ€๋ช…์ •๋ณด ์ฒ˜๋ฆฌ๋ชฉ์  ๋“ฑ์„ ๊ณ ๋ คํ•˜์—ฌ ๊ฐ€๋ช…์ •๋ณด์˜ ์ฒ˜๋ฆฌ ๊ธฐ๊ฐ„์„ ์ ์ •ํ•œ ๊ธฐ๊ฐ„์œผ๋กœ ์ •ํ•˜๊ณ  ์žˆ์œผ๋ฉฐ, ํ•ด๋‹น ๊ธฐ๊ฐ„์ด ๊ฒฝ๊ณผํ•œ ๊ฒฝ์šฐ ์ง€์ฒด ์—†์ด ํŒŒ๊ธฐํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด๋ฅผ ์ต๋ช…์ฒ˜๋ฆฌํ•˜๋Š” ๊ฒฝ์šฐ ์‹œ๊ฐ„ยท๋น„์šฉยท๊ธฐ์ˆ  ๋“ฑ์„ ํ•ฉ๋ฆฌ์ ์œผ๋กœ ๊ณ ๋ คํ•  ๋•Œ ๋‹ค๋ฅธ ์ •๋ณด๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ๋„ ๋” ์ด์ƒ ํŠน์ • ๊ฐœ์ธ์„ ์•Œ์•„๋ณผ ์ˆ˜ ์—†๋„๋ก ์ ์ •ํ•œ ์ˆ˜์ค€์œผ๋กœ ์ต๋ช…์ฒ˜๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ํ†ต๊ณ„์ž‘์„ฑ ๋ฐ ๊ณผํ•™์  ์—ฐ๊ตฌ๋ฅผ ์œ„ํ•˜์—ฌ ์ •๋ณด์ฃผ์ฒด ๋™์˜ ์—†์ด ๊ฐ€๋ช…์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๋ฉด์„œ ๊ฐ€๋ช…์ •๋ณด ์ฒ˜๋ฆฌ์— ๊ด€ํ•œ ๊ธฐ๋ก์„ ๋‚จ๊ธฐ๊ณ  ์žˆ์ง€ ์•Š๊ฑฐ๋‚˜, ๋˜๋Š” ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ๊ด€๋ จ ์‚ฌํ•ญ์„ ๊ณต๊ฐœํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐ€๋ช…์ •๋ณด์™€ ๋™์ผํ•œ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ๋‚ด์— ์ถ”๊ฐ€ ์ •๋ณด๋ฅผ ๋ถ„๋ฆฌํ•˜์ง€ ์•Š๊ณ  ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๊ฑฐ๋‚˜, ๋˜๋Š” ๊ฐ€๋ช… ์ •๋ณด์™€ ์ถ”๊ฐ€ ์ •๋ณด์— ๋Œ€ํ•œ ์ ‘๊ทผ๊ถŒํ•œ์ด ์ ์ ˆํžˆ ๋ถ„๋ฆฌ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ์ธ์ •๋ณด๋ฅผ ๊ฐ€๋ช…์ฒ˜๋ฆฌํ•˜์—ฌ ํ™œ์šฉํ•˜๊ณ  ์žˆ์œผ๋‚˜ ์ ์ •ํ•œ ์ˆ˜์ค€์˜ ๊ฐ€๋ช…์ฒ˜๋ฆฌ๊ฐ€ ์ˆ˜ํ–‰๋˜์ง€ ์•Š์•„ ์ถ”๊ฐ€ ์ •๋ณด์˜ ์‚ฌ์šฉ ์—†์ด๋„ ๋‹ค๋ฅธ ์ •๋ณด์™€์˜ ๊ฒฐํ•ฉ ๋“ฑ์„ ํ†ตํ•˜์—ฌ ํŠน์ • ๊ฐœ์ธ์„ ์•Œ์•„๋ณผ ์ˆ˜ ์žˆ๋Š” ๊ฐ€๋Šฅ์„ฑ์ด ์กด์žฌํ•˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ํ…Œ์ŠคํŠธ ๋ฐ์ดํ„ฐ ์ƒ์„ฑ, ์™ธ๋ถ€ ๊ณต๊ฐœ ๋“ฑ์„ ์œ„ํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ต๋ช…์ฒ˜๋ฆฌํ•˜์˜€์œผ๋‚˜, ํŠน์ด์น˜ ๋“ฑ์œผ๋กœ ์ธํ•˜์—ฌ ํŠน์ • ๊ฐœ์ธ์— ๋Œ€ํ•œ ์‹๋ณ„๊ฐ€๋Šฅ์„ฑ์ด ์กด์žฌํ•˜๋Š” ๋“ฑ ์ต๋ช…์ฒ˜๋ฆฌ๊ฐ€ ์ ์ •ํ•˜๊ฒŒ ์ˆ˜ํ–‰๋˜์—ˆ๋‹ค๊ณ  ๋ณด๊ธฐ ์–ด๋ ค์šด ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ2์กฐ(์ •์˜), ์ œ28์กฐ์˜2(๊ฐ€๋ช…์ •๋ณด์˜ ์ฒ˜๋ฆฌ ๋“ฑ), ์ œ28์กฐ์˜3(๊ฐ€๋ช…์ •๋ณด์˜ ๊ฒฐํ•ฉ ์ œํ•œ), ์ œ28์กฐ์˜4(๊ฐ€๋ช…์ •๋ณด์— ๋Œ€ํ•œ ์•ˆ์ „์กฐ์น˜์˜๋ฌด ๋“ฑ), ์ œ28์กฐ์˜5(๊ฐ€๋ช…์ •๋ณด ์ฒ˜๋ฆฌ ์‹œ ๊ธˆ์ง€์˜๋ฌด ๋“ฑ), ์ œ28์กฐ์˜7(์ ์šฉ๋ฒ”์œ„), ์ œ58์กฐ์˜2(์ ์šฉ์ œ์™ธ)"]}],"description": "๊ฐ€๋ช…์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ฒฝ์šฐ ๋ชฉ์ ์ œํ•œ, ๊ฒฐํ•ฉ์ œํ•œ, ์•ˆ์ „์กฐ์น˜, ๊ธˆ์ง€์˜๋ฌด ๋“ฑ ๋ฒ•์  ์š”๊ฑด์„ ์ค€์ˆ˜ํ•˜๊ณ  ์ ์ • ์ˆ˜์ค€์˜ ๊ฐ€๋ช…์ฒ˜๋ฆฌ๋ฅผ ๋ณด์žฅํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ฐ€๋ช…์ฒ˜๋ฆฌ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.3.1": {"name": "๊ฐœ์ธ์ •๋ณด ์ œ3์ž ์ œ๊ณต","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.3.1 ๊ฐœ์ธ์ •๋ณด ์ œ3์ž ์ œ๊ณต","Subdomain": "3.3. ๊ฐœ์ธ์ •๋ณด ์ œ๊ณต ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["์˜จ๋ผ์ธ ๊ฐœ์ธ์ •๋ณด ์ œ3์ž ์ œ๊ณต ๊ด€๋ จ ์–‘์‹(ํ™ˆํŽ˜์ด์ง€ ํšŒ์›๊ฐ€์ž… ํ™”๋ฉด, ๊ฐœ์ธ์ •๋ณด ์ œ3์ž ์ œ๊ณต ๋™์˜ ํ™”๋ฉด ๋“ฑ)","์˜คํ”„๋ผ์ธ ๊ฐœ์ธ์ •๋ณด ์ œ3์ž ์ œ๊ณต ๊ด€๋ จ ์–‘์‹(ํšŒ์›๊ฐ€์ž…์‹ ์ฒญ์„œ, ๊ฐœ์ธ์ •๋ณด ์ œ3์ž ์ œ๊ณต ๋™์˜์„œ ๋“ฑ)","์ œ3์ž ์ œ๊ณต ๋‚ด์—ญ","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ3์ž์—๊ฒŒ ์ œ๊ณตํ•˜๋Š” ๊ฒฝ์šฐ ์ •๋ณด์ฃผ์ฒด ๋™์˜, ๋ฒ•๋ น์ƒ ์˜๋ฌด์ค€์ˆ˜ ๋“ฑ ์ ๋ฒ• ์š”๊ฑด์„ ๋ช…ํ™•ํžˆ ์‹๋ณ„ํ•˜๊ณ  ์ด๋ฅผ ์ค€์ˆ˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ๊ฐœ์ธ์ •๋ณด ์ œ3์ž ์ œ๊ณต ๋™์˜๋ฅผ ๋ฐ›๋Š” ๊ฒฝ์šฐ ๊ด€๋ จ ์‚ฌํ•ญ์„ ๋ช…ํ™•ํ•˜๊ฒŒ ๊ณ ์ง€ํ•˜๊ณ  ๋‹ค๋ฅธ ๋™์˜์‚ฌํ•ญ๊ณผ ๊ตฌ๋ถ„ํ•˜์—ฌ ์ ๋ฒ•ํ•˜๊ฒŒ ๋™์˜๋ฅผ ๋ฐ›๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ๊ฐœ์ธ์ •๋ณด ์ œ3์ž ์ œ๊ณต ๋™์˜๋ฅผ ๋ฐ›๋Š” ๊ฒฝ์šฐ ๊ด€๋ จ ๋‚ด์šฉ์„ ๋ช…ํ™•ํ•˜๊ฒŒ ๊ณ ์ง€ํ•˜๊ณ  ๋ฒ•๋ น์—์„œ ์ •ํ•œ ์ค‘์š”ํ•œ ๋‚ด์šฉ์— ๋Œ€ํ•ด ๋ช…ํ™•ํžˆ ํ‘œ์‹œํ•˜์—ฌ ์•Œ์•„๋ณด๊ธฐ ์‰ฝ๊ฒŒ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ3์ž์—๊ฒŒ ์ œ๊ณตํ•˜๋Š” ๊ฒฝ์šฐ ์ œ๊ณต ๋ชฉ์ ์— ๋งž๋Š” ์ตœ์†Œํ•œ์˜ ๊ฐœ์ธ์ •๋ณด ํ•ญ๋ชฉ์œผ๋กœ ์ œํ•œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ3์ž์—๊ฒŒ ์ œ๊ณตํ•˜๋Š” ๊ฒฝ์šฐ ์•ˆ์ „ํ•œ ์ ˆ์ฐจ์™€ ๋ฐฉ๋ฒ•์„ ํ†ตํ•ด ์ œ๊ณตํ•˜๊ณ  ์ œ๊ณต ๋‚ด์—ญ์„ ๊ธฐ๋กํ•˜์—ฌ ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ œ3์ž์—๊ฒŒ ๊ฐœ์ธ์ •๋ณด์˜ ์ ‘๊ทผ์„ ํ—ˆ์šฉํ•˜๋Š” ๊ฒฝ์šฐ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์•ˆ์ „ํ•˜๊ฒŒ ๋ณดํ˜ธํ•˜๊ธฐ ์œ„ํ•œ ๋ณดํ˜ธ์ ˆ์ฐจ์— ๋”ฐ๋ผ ํ†ต์ œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์˜ ๋™์˜ ์—†์ด ๊ฐœ์ธ์ •๋ณด์˜ ์ถ”๊ฐ€์ ์ธ ์ œ๊ณต ์‹œ ๋‹น์ดˆ ์ˆ˜์ง‘ ๋ชฉ์ ๊ณผ์˜ ๊ด€๋ จ์„ฑ, ์˜ˆ์ธก๊ฐ€๋Šฅ์„ฑ, ์ด์ต ์นจํ•ด ์—ฌ๋ถ€, ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๋“ฑ์˜ ๊ณ ๋ ค์‚ฌํ•ญ์— ๋Œ€ํ•œ ํŒ๋‹จ๊ธฐ์ค€์„ ์ˆ˜๋ฆฝ ๋ฐ ์ดํ–‰ํ•˜๊ณ , ์ถ”๊ฐ€์ ์ธ ์ œ๊ณต์ด ์ง€์†์ ์œผ๋กœ ๋ฐœ์ƒํ•˜๋Š” ๊ฒฝ์šฐ ๊ณ ๋ ค์‚ฌํ•ญ์— ๋Œ€ํ•œ ํŒ๋‹จ๊ธฐ์ค€์„๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ๊ณต๊ฐœํ•˜๊ณ  ์ด๋ฅผ ์ ๊ฒ€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์ž๊ฐ€ ๊ฐœ์ธ์ •๋ณด ์ œ3์ž ์ œ๊ณต ๋™์˜๋ฅผ ๋ฐ›์„ ๋•Œ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ๊ณ ์ง€ํ•˜๋Š” ์‚ฌํ•ญ ์ค‘์— ์ผ๋ถ€ ์‚ฌํ•ญ(๋™์˜ ๊ฑฐ๋ถ€๊ถŒ, ์ œ๊ณตํ•˜๋Š” ํ•ญ๋ชฉ ๋“ฑ)์„ ๋ˆ„๋ฝํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ3์ž์—๊ฒŒ ์ œ๊ณตํ•˜๋Š” ๊ณผ์ •์—์„œ ์ œ3์ž ์ œ๊ณต ๋™์˜ ์—ฌ๋ถ€๋ฅผ ์ ์ ˆํžˆ ํ™•์ธํ•˜์ง€ ๋ชปํ•˜์—ฌ ๋™์˜ํ•˜์ง€ ์•Š์€ ์ •๋ณด์ฃผ์ฒด์˜ ๊ฐœ์ธ์ •๋ณด๊ฐ€ ํ•จ๊ป˜ ์ œ๊ณต๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ๊ณต ๋™์˜๋ฅผ ๋ฐ›์„ ๋•Œ, ์ œ๊ณต๋ฐ›๋Š” ์ž๋ฅผ ํŠน์ •ํ•˜์ง€ ์•Š๊ณ  สป~ ๋“ฑสผ๊ณผ ๊ฐ™์ด ํฌ๊ด„์ ์œผ๋กœ ์•ˆ๋‚ดํ•˜๊ณ  ๋™์˜๋ฅผ ๋ฐ›์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ํšŒ์› ๊ฐ€์ž… ๋‹จ๊ณ„์—์„œ ์„ ํƒ์‚ฌํ•ญ์œผ๋กœ ์ œ3์ž ์ œ๊ณต ๋™์˜๋ฅผ ๋ฐ›๊ณ  ์žˆ์œผ๋‚˜, ์ œ3์ž ์ œ๊ณต์— ๋™์˜ํ•˜์ง€ ์•Š์œผ๋ฉด ํšŒ์› ๊ฐ€์ž… ์ ˆ์ฐจ๊ฐ€ ๋” ์ด์ƒ ์ง„ํ–‰๋˜์ง€ ์•Š๋„๋ก ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ์ œ๊ณต๋ฐ›๋Š” ์ž์˜ ์ด์šฉ ๋ชฉ์ ๊ณผ ๊ด€๋ จ ์—†์ด ์ง€๋‚˜์น˜๊ฒŒ ๋งŽ์€ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ๊ณตํ•˜๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ17์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ์ œ๊ณต), ์ œ22์กฐ(๋™์˜๋ฅผ ๋ฐ›๋Š” ๋ฐฉ๋ฒ•)","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋ฐฉ๋ฒ•์— ๊ด€ํ•œ ๊ณ ์‹œ"]}],"description": "๊ฐœ์ธ์ •๋ณด๋ฅผ ์ œ3์ž์—๊ฒŒ ์ œ๊ณตํ•˜๋Š” ๊ฒฝ์šฐ ๋ฒ•์  ๊ทผ๊ฑฐ์— ์˜ํ•˜๊ฑฐ๋‚˜ ์ •๋ณด์ฃผ์ฒด์˜ ๋™์˜๋ฅผ ๋ฐ›์•„์•ผ ํ•˜๋ฉฐ, ์ œ3์ž์—๊ฒŒ ๊ฐœ์ธ์ •๋ณด์˜ ์ ‘๊ทผ์„ ํ—ˆ์šฉํ•˜๋Š” ๋“ฑ ์ œ๊ณต ๊ณผ์ •์—์„œ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์•ˆ์ „ํ•˜๊ฒŒ ๋ณดํ˜ธํ•˜๊ธฐ ์œ„ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.3.2": {"name": "๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ์—…๋ฌด ์œ„ํƒ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.3.2 ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ์—…๋ฌด ์œ„ํƒ","Subdomain": "3.3. ๊ฐœ์ธ์ •๋ณด ์ œ๊ณต ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ(๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด ์œ„ํƒ ๊ด€๋ จ ๊ณต๊ฐœ ๋‚ด์—ญ)","๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ ์–‘์‹","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ์œ„ํƒ ๊ณ„์•ฝ์„œ","์žฌํ™” ๋˜๋Š” ์„œ๋น„์Šค ํ™๋ณดยทํŒ๋งค ๊ถŒ์œ  ์—…๋ฌด ์œ„ํƒ ๊ด€๋ จ ์ •๋ณด์ฃผ์ฒด ํ†ต์ง€ ๋‚ด์—ญ"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด๋ฅผ ์ œ3์ž์—๊ฒŒ ์œ„ํƒ(์žฌ์œ„ํƒ ํฌํ•จ)ํ•˜๋Š” ๊ฒฝ์šฐ ์ธํ„ฐ๋„ท ํ™ˆํŽ˜์ด์ง€ ๋“ฑ์— ์œ„ํƒํ•˜๋Š” ์—…๋ฌด์˜ ๋‚ด์šฉ๊ณผ ์ˆ˜ํƒ์ž๋ฅผ ํ˜„ํ–‰ํ™”ํ•˜์—ฌ ๊ณต๊ฐœํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์žฌํ™” ๋˜๋Š” ์„œ๋น„์Šค๋ฅผ ํ™๋ณดํ•˜๊ฑฐ๋‚˜ ํŒ๋งค๋ฅผ ๊ถŒ์œ ํ•˜๋Š” ์—…๋ฌด๋ฅผ ์œ„ํƒํ•˜๋Š” ๊ฒฝ์šฐ์—๋Š” ์„œ๋ฉด, ์ „์ž์šฐํŽธ, ๋ฌธ์ž์ „์†ก ๋“ฑ์˜ ๋ฐฉ๋ฒ•์œผ๋กœ ์œ„ํƒํ•˜๋Š” ์—…๋ฌด์˜ ๋‚ด์šฉ๊ณผ ์ˆ˜ํƒ์ž๋ฅผ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์•Œ๋ฆฌ๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ํ™ˆํŽ˜์ด์ง€ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด ์œ„ํƒ ์‚ฌํ•ญ์„ ๊ณต๊ฐœํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ผ๋ถ€ ์ˆ˜ํƒ์ž์™€ ์œ„ํƒํ•˜๋Š” ์—…๋ฌด์˜ ๋‚ด์šฉ์ด ๋ˆ„๋ฝ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์žฌํ™” ๋˜๋Š” ์„œ๋น„์Šค๋ฅผ ํ™๋ณดํ•˜๊ฑฐ๋‚˜ ํŒ๋งค๋ฅผ ๊ถŒ์œ ํ•˜๋Š” ์—…๋ฌด๋ฅผ ์œ„ํƒํ•˜๋ฉด์„œ, ์œ„ํƒํ•˜๋Š” ์—…๋ฌด์˜ ๋‚ด์šฉ๊ณผ ์ˆ˜ํƒ์ž๋ฅผ ์„œ๋ฉด๋“ฑ์˜ ๋ฐฉ๋ฒ•์œผ๋กœ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์•Œ๋ฆฌ์ง€ ์•Š๊ณ  ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ๊ณต๊ฐœํ•˜๋Š” ๊ฒƒ์œผ๋กœ ๊ฐˆ์Œํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ธฐ์กด ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด ์ˆ˜ํƒ์ž์™€์˜ ๊ณ„์•ฝ ํ•ด์ง€์— ๋”ฐ๋ผ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด ์ˆ˜ํƒ์ž๊ฐ€ ๋ณ€๊ฒฝ๋˜์—ˆ์œผ๋‚˜, ์ด์— ๋Œ€ํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ์ง€์ฒด ์—†์ด ๋ฐ˜์˜ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด๋ฅผ ์œ„ํƒ๋ฐ›์€ ์ž๊ฐ€ ํ•ด๋‹น ์—…๋ฌด๋ฅผ ์ œ3์ž์—๊ฒŒ ์žฌ์œ„ํƒ์„ ํ•˜๊ณ  ์žˆ์ง€๋งŒ, ์žฌ์œ„ํƒ์— ๊ด€ํ•œ ์‚ฌํ•ญ์„ ์ธํ„ฐ๋„ท ํ™ˆํŽ˜์ด์ง€ ๋“ฑ์— ๊ณต๊ฐœํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ26์กฐ(์—…๋ฌด์œ„ํƒ์— ๋”ฐ๋ฅธ ๊ฐœ์ธ์ •๋ณด์˜ ์ฒ˜๋ฆฌ ์ œํ•œ)"]}],"description": "๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด๋ฅผ ์ œ3์ž์—๊ฒŒ ์œ„ํƒํ•˜๋Š” ๊ฒฝ์šฐ ์œ„ํƒํ•˜๋Š” ์—…๋ฌด์˜ ๋‚ด์šฉ๊ณผ ์ˆ˜ํƒ์ž ๋“ฑ ๊ด€๋ จ์‚ฌํ•ญ์„ ๊ณต๊ฐœํ•˜์—ฌ์•ผ ํ•œ๋‹ค. ๋˜ํ•œ ์žฌํ™” ๋˜๋Š” ์„œ๋น„์Šค๋ฅผ ํ™๋ณดํ•˜๊ฑฐ๋‚˜ ํŒ๋งค๋ฅผ ๊ถŒ์œ ํ•˜๋Š” ์—…๋ฌด๋ฅผ ์œ„ํƒํ•˜๋Š” ๊ฒฝ์šฐ ์œ„ํƒํ•˜๋Š” ์—…๋ฌด์˜ ๋‚ด์šฉ๊ณผ ์ˆ˜ํƒ์ž๋ฅผ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์•Œ๋ ค์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.3.3": {"name": "์˜์—…์˜ ์–‘๋„ ๋“ฑ์— ๋”ฐ๋ฅธ ๊ฐœ์ธ์ •๋ณด ์ด์ „","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.3.3 ์˜์—…์˜ ์–‘๋„ ๋“ฑ์— ๋”ฐ๋ฅธ ๊ฐœ์ธ์ •๋ณด ์ด์ „","Subdomain": "3.3. ๊ฐœ์ธ์ •๋ณด ์ œ๊ณต ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ์ด์ „ ๊ด€๋ จ ์ •๋ณด์ฃผ์ฒด ๊ณ ์ง€ ๋‚ด์—ญ(์˜์—… ์–‘์ˆ˜๋„ ์‹œ)","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ"],"AuditChecklist": ["์˜์—…์˜ ์ „๋ถ€ ๋˜๋Š” ์ผ๋ถ€์˜ ์–‘๋„ยทํ•ฉ๋ณ‘ ๋“ฑ์œผ๋กœ ๊ฐœ์ธ์ •๋ณด๋ฅผ ๋‹ค๋ฅธ ์‚ฌ๋žŒ์—๊ฒŒ ์ด์ „ํ•˜๋Š” ๊ฒฝ์šฐ ํ•„์š”ํ•œ ์‚ฌํ•ญ์„ ์‚ฌ์ „์— ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์•Œ๋ฆฌ๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด๋ฅผ ์ด์ „๋ฐ›๋Š” ์ž๋Š” ๋ฒ•์  ํ†ต์ง€ ์š”๊ฑด์— ํ•ด๋‹น๋  ๊ฒฝ์šฐ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ด์ „๋ฐ›์€ ์‚ฌ์‹ค ๋“ฑ ํ•„์š”ํ•œ ์‚ฌํ•ญ์„ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์ง€์ฒด ์—†์ด ์•Œ๋ฆฌ๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด๋ฅผ ์ด์ „๋ฐ›๋Š” ์ž๋Š” ์ด์ „ ๋‹น์‹œ์˜ ๋ณธ๋ž˜ ๋ชฉ์ ์œผ๋กœ๋งŒ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ด์šฉํ•˜๊ฑฐ๋‚˜ ์ œ3์ž์—๊ฒŒ ์ œ๊ณตํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์ž๊ฐ€ ์˜์—… ์–‘์ˆ˜๋ฅผ ํ†ตํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ด์ „๋ฐ›์œผ๋ฉด์„œ ์–‘๋„์ž๊ฐ€ ๊ฐœ์ธ์ •๋ณด ์ด์ „ ์‚ฌ์‹ค์„ ์•Œ๋ฆฌ์ง€ ์•Š์•˜์Œ์—๋„ ๊ฐœ์ธ์ •๋ณด ์ด์ „ ์‚ฌ์‹ค์„ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์•Œ๋ฆฌ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์˜์—… ์–‘์ˆ˜๋„ ๋“ฑ์— ์˜ํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ด์ „๋ฐ›์œผ๋ฉด์„œ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์ด์ „์„ ์›ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ ์กฐ์น˜ํ•  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ๋ฒ•๊ณผ ์ ˆ์ฐจ๋ฅผ ๋งˆ๋ จํ•˜์ง€ ์•Š๊ฑฐ๋‚˜, ์ด๋ฅผ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์•Œ๋ฆฌ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ27์กฐ(์˜์—…์–‘๋„ ๋“ฑ์— ๋”ฐ๋ฅธ ๊ฐœ์ธ์ •๋ณด์˜ ์ด์ „ ์ œํ•œ)"]}],"description": "์˜์—…์˜ ์–‘๋„ยทํ•ฉ๋ณ‘ ๋“ฑ์œผ๋กœ ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ด์ „ํ•˜๊ฑฐ๋‚˜ ์ด์ „๋ฐ›๋Š” ๊ฒฝ์šฐ ์ •๋ณด์ฃผ์ฒด ํ†ต์ง€ ๋“ฑ ์ ์ ˆํ•œ ๋ณดํ˜ธ์กฐ์น˜๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.3.4": {"name": "๊ฐœ์ธ์ •๋ณด ๊ตญ์™ธ์ด์ „","checks": {"s3_bucket_cross_region_replication": "FAIL"},"status": "FAIL","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.3.4 ๊ฐœ์ธ์ •๋ณด ๊ตญ์™ธ์ด์ „","Subdomain": "3.3. ๊ฐœ์ธ์ •๋ณด ์ œ๊ณต ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ๊ตญ์™ธ ์ด์ „ ๊ด€๋ จ ๋™์˜ ์–‘์‹","๊ฐœ์ธ์ •๋ณด ๊ตญ์™ธ ์ด์ „ ๊ด€๋ จ ๊ณ„์•ฝ์„œ","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ","๊ฐœ์ธ์ •๋ณด ๊ตญ์™ธ ์ฒ˜๋ฆฌ์œ„ํƒยท๋ณด๊ด€ ๊ด€๋ จ ํ†ต์ง€ ๋˜๋Š” ๊ณต๊ฐœ ๋‚ด์—ญ"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด๋ฅผ ๊ตญ์™ธ๋กœ ์ด์ „ํ•˜๋Š” ๊ฒฝ์šฐ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ๊ตญ์™ธ ์ด์ „์— ๊ด€ํ•œ ๊ณ ์ง€ ์‚ฌํ•ญ์„ ๋ชจ๋‘ ์•Œ๋ฆฌ๊ณ  ๋ณ„๋„ ๋™์˜๋ฅผ ๋ฐ›๊ฑฐ๋‚˜, ์ธ์ฆ ๋˜๋Š” ์ธ์ • ๋“ฑ ์ ๋ฒ• ์š”๊ฑด์„ ์ค€์ˆ˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์™€์˜ ๊ณ„์•ฝ์˜ ์ฒด๊ฒฐ ๋ฐ ์ดํ–‰์„ ์œ„ํ•œ ๊ฐœ์ธ์ •๋ณด์˜ ๊ตญ์™ธ ์ฒ˜๋ฆฌ์œ„ํƒยท๋ณด๊ด€์— ๋Œ€ํ•ด ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์•Œ๋ฆฌ๋Š” ๊ฒฝ์šฐ ํ•„์š”ํ•œ ์‚ฌํ•ญ์„ ๋ชจ๋‘ ํฌํ•จํ•˜์—ฌ ์ ์ ˆํ•œ ๋ฐฉ๋ฒ•์œผ๋กœ ์•Œ๋ฆฌ๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ ๊ด€๋ จ ๋ฒ•๋ น ์ค€์ˆ˜ ๋ฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ ๋“ฑ์— ๊ด€ํ•œ ์‚ฌํ•ญ์„ ํฌํ•จํ•˜์—ฌ ๊ตญ์™ธ ์ด์ „์— ๊ด€ํ•œ ๊ณ„์•ฝ์„ ์ฒด๊ฒฐํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด๋ฅผ ๊ตญ์™ธ๋กœ ์ด์ „ํ•˜๋Š” ๊ฒฝ์šฐ ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฅผ ์œ„ํ•˜์—ฌ ํ•„์š”ํ•œ ์กฐ์น˜๋ฅผ ์ทจํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ์ธ์ •๋ณด๋ฅผ ์ฒ˜๋ฆฌํ•˜๋Š” ๊ณผ์ •์—์„œ ๊ตญ์™ธ ์‚ฌ์—…์ž์—๊ฒŒ ๊ฐœ์ธ์ •๋ณด ์ œ3์ž ์ œ๊ณต์ด ๋ฐœ์ƒํ•˜์˜€์œผ๋‚˜, ์ธ์ฆ, ๋Œ€์ƒ๊ตญ ์ธ์ • ๋“ฑ ๋™์˜ ์˜ˆ์™ธ ์š”๊ฑด์— ํ•ด๋‹น๋˜์ง€ ์•Š์Œ์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ  ๊ฐœ์ธ์ •๋ณด ๊ตญ์™ธ ์ด์ „์— ๋Œ€ํ•œ ๋ณ„๋„ ๋™์˜๋ฅผ ๋ฐ›์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ตญ์™ธ ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค(๊ตญ์™ธ ๋ฆฌ์ „)๋ฅผ ์ด์šฉํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์œ„ํƒ ๋ฐ ๋ณด๊ด€์„ ํ•˜๋ฉด์„œ ์ด์ „๋˜๋Š” ๊ตญ๊ฐ€, ์ด์ „ ๋ฐฉ๋ฒ• ๋“ฑ ๊ด€๋ จ ์‚ฌํ•ญ์„ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ๊ณต๊ฐœํ•˜๊ฑฐ๋‚˜ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์•Œ๋ฆฌ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ์ธ์ •๋ณด ๊ตญ์™ธ ์ด์ „์— ๋Œ€ํ•œ ๋™์˜๋ฅผ ๋ฐ›์œผ๋ฉด์„œ ์ด์ „๋ฐ›๋Š” ์ž์˜ ๋ช…์นญ(์—…์ฒด๋ช…)๋งŒ ๊ณ ์ง€ํ•˜๊ณ  ์ด์ „๋˜๋Š” ๊ตญ๊ฐ€ ๋“ฑ์— ๋Œ€ํ•˜์—ฌ ์•Œ๋ฆฌ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ28์กฐ์˜8(๊ฐœ์ธ์ •๋ณด์˜ ๊ตญ์™ธ ์ด์ „), ์ œ28์กฐ์˜9(๊ฐœ์ธ์ •๋ณด์˜ ๊ตญ์™ธ ์ด์ „ ์ค‘์ง€ ๋ช…๋ น), ์ œ28์กฐ์˜10(์ƒํ˜ธ์ฃผ์˜), ์ œ28์กฐ์˜11(์ค€์šฉ๊ทœ์ •)","๊ฐœ์ธ์ •๋ณด ๊ตญ์™ธ ์ด์ „ ์šด์˜ ๋“ฑ์— ๊ด€ํ•œ ๊ทœ์ •"]}],"description": "๊ฐœ์ธ์ •๋ณด๋ฅผ ๊ตญ์™ธ๋กœ ์ด์ „ํ•˜๋Š” ๊ฒฝ์šฐ ๊ตญ์™ธ ์ด์ „์— ๋Œ€ํ•œ ๋™์˜, ๊ด€๋ จ ์‚ฌํ•ญ์— ๋Œ€ํ•œ ๊ณต๊ฐœ ๋“ฑ ์ ์ ˆํ•œ ๋ณดํ˜ธ์กฐ์น˜๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.4.1": {"name": "๊ฐœ์ธ์ •๋ณด ํŒŒ๊ธฐ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.4.1 ๊ฐœ์ธ์ •๋ณด ํŒŒ๊ธฐ","Subdomain": "3.4. ๊ฐœ์ธ์ •๋ณด ํŒŒ๊ธฐ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ๋ณด์œ ๊ธฐ๊ฐ„ ๋ฐ ํŒŒ๊ธฐ ๊ด€๋ จ ๊ทœ์ •","๊ฐœ์ธ์ •๋ณด ํŒŒ๊ธฐ ๊ฒฐ๊ณผ(ํšŒ์› ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ๋“ฑ)","๊ฐœ์ธ์ •๋ณด ํŒŒ๊ธฐ๊ด€๋ฆฌ๋Œ€์žฅ"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด์˜ ๋ณด์œ ๊ธฐ๊ฐ„ ๋ฐ ํŒŒ๊ธฐ์™€ ๊ด€๋ จ๋œ ๋‚ด๋ถ€ ์ •์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด์˜ ์ฒ˜๋ฆฌ๋ชฉ์ ์ด ๋‹ฌ์„ฑ๋˜๊ฑฐ๋‚˜ ๋ณด์œ ๊ธฐ๊ฐ„์ด ๊ฒฝ๊ณผํ•œ ๊ฒฝ์šฐ ์ง€์ฒด ์—†์ด ํ•ด๋‹น ๊ฐœ์ธ์ •๋ณด๋ฅผ ํŒŒ๊ธฐํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด๋ฅผ ํŒŒ๊ธฐํ•  ๋•Œ์—๋Š” ๋ณต๊ตฌยท์žฌ์ƒ๋˜์ง€ ์•Š๋„๋ก ์•ˆ์ „ํ•œ ๋ฐฉ๋ฒ•์œผ๋กœ ํŒŒ๊ธฐํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด ํŒŒ๊ธฐ์— ๋Œ€ํ•œ ๊ธฐ๋ก์„ ๋‚จ๊ธฐ๊ณ  ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ํšŒ์› ํƒˆํ‡ด ๋“ฑ ๋ชฉ์ ์ด ๋‹ฌ์„ฑ๋˜๊ฑฐ๋‚˜ ๋ณด์œ ๊ธฐ๊ฐ„์ด ๊ฒฝ๊ณผ๋œ ๊ฒฝ์šฐ ํšŒ์› ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์—์„œ๋Š” ํ•ด๋‹น ๊ฐœ์ธ์ •๋ณด๋ฅผ ํŒŒ๊ธฐํ•˜์˜€์œผ๋‚˜, CRMยทDW ๋“ฑ ์—ฐ๊ณ„๋œ ๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์— ๋ณต์ œ๋˜์–ด ์ €์žฅ๋˜์–ด ์žˆ๋Š” ๊ฐœ์ธ์ •๋ณด๋ฅผ ํŒŒ๊ธฐํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ํŠน์ • ๊ธฐ๊ฐ„ ๋™์•ˆ ์ด๋ฒคํŠธ๋ฅผ ํ•˜๋ฉด์„œ ์ˆ˜์ง‘๋œ ๊ฐœ์ธ์ •๋ณด์— ๋Œ€ํ•˜์—ฌ ์ด๋ฒคํŠธ๊ฐ€ ์ข…๋ฃŒ๋œ ์ดํ›„์—๋„ ํŒŒ๊ธฐ ๊ธฐ์ค€์ด ์ˆ˜๋ฆฝ๋˜์–ด ์žˆ์ง€ ์•Š๊ฑฐ๋‚˜ ํŒŒ๊ธฐ๊ฐ€ ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ฝœ์„ผํ„ฐ์—์„œ ์ˆ˜์ง‘๋˜๋Š” ๋ฏผ์›์ฒ˜๋ฆฌ ๊ด€๋ จ ๊ฐœ์ธ์ •๋ณด(์ƒ๋‹ด์ด๋ ฅ, ๋…น์ทจ ๋“ฑ)๋ฅผ ์ „์ž์ƒ๊ฑฐ๋ž˜๋ฒ•์„ ๊ทผ๊ฑฐ๋กœ 3๋…„๊ฐ„ ๋ณด์กดํ•˜๊ณ  ์žˆ์œผ๋‚˜, 3๋…„์ด ๊ฒฝ๊ณผํ•œ ํ›„์—๋„ ํŒŒ๊ธฐํ•˜์ง€ ์•Š๊ณ  ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๋ธ”๋ก์ฒด์ธ ๋“ฑ ๊ธฐ์ˆ ์  ํŠน์„ฑ์œผ๋กœ ์ธํ•˜์—ฌ ๋ชฉ์ ์ด ๋‹ฌ์„ฑ๋œ ๊ฐœ์ธ์ •๋ณด์˜ ์™„์ „ ํŒŒ๊ธฐ๊ฐ€ ์–ด๋ ค์›Œ ์™„์ „ํŒŒ๊ธฐ ๋Œ€์‹  ์ต๋ช…์ฒ˜๋ฆฌ๋ฅผ ํ•˜์˜€์œผ๋‚˜, ์ต๋ช…์ฒ˜๋ฆฌ๊ฐ€ ์ ์ ˆํ•˜๊ฒŒ ์ˆ˜ํ–‰๋˜์ง€ ์•Š์•„ ์ผ๋ถ€ ๊ฐœ์ธ์ •๋ณด์˜ ์žฌ์‹๋ณ„ ๋“ฑ ๋ณต์›์ด ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ21์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ํŒŒ๊ธฐ)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ13์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ํŒŒ๊ธฐ)"]}],"description": "๊ฐœ์ธ์ •๋ณด์˜ ๋ณด์œ ๊ธฐ๊ฐ„ ๋ฐ ํŒŒ๊ธฐ ๊ด€๋ จ ๋‚ด๋ถ€ ์ •์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ๊ฐœ์ธ์ •๋ณด์˜ ๋ณด์œ ๊ธฐ๊ฐ„ ๊ฒฝ๊ณผ, ์ฒ˜๋ฆฌ๋ชฉ์  ๋‹ฌ์„ฑ ๋“ฑ ํŒŒ๊ธฐ ์‹œ์ ์ด ๋„๋‹ฌํ•œ ๋•Œ์—๋Š” ํŒŒ๊ธฐ์˜ ์•ˆ์ „์„ฑ ๋ฐ ์™„์ „์„ฑ์ด ๋ณด์žฅ๋  ์ˆ˜ ์žˆ๋Š” ๋ฐฉ๋ฒ•์œผ๋กœ ์ง€์ฒด ์—†์ด ํŒŒ๊ธฐํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.4.2": {"name": "์ฒ˜๋ฆฌ๋ชฉ์  ๋‹ฌ์„ฑ ํ›„ ๋ณด์œ  ์‹œ ์กฐ์น˜","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.4.2 ์ฒ˜๋ฆฌ๋ชฉ์  ๋‹ฌ์„ฑ ํ›„ ๋ณด์œ  ์‹œ ์กฐ์น˜","Subdomain": "3.4. ๊ฐœ์ธ์ •๋ณด ํŒŒ๊ธฐ ์‹œ ๋ณดํ˜ธ์กฐ์น˜","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ๋ณด์œ ๊ธฐ๊ฐ„ ๋ฐ ํŒŒ๊ธฐ ๊ด€๋ จ ๊ทœ์ •","๋ถ„๋ฆฌ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ํ˜„ํ™ฉ(ํ…Œ์ด๋ธ” ๊ตฌ์กฐ ๋“ฑ)","๋ถ„๋ฆฌ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ ‘๊ทผ๊ถŒํ•œ ํ˜„ํ™ฉ"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด์˜ ๋ณด์œ ๊ธฐ๊ฐ„ ๊ฒฝ๊ณผ ๋˜๋Š” ์ฒ˜๋ฆฌ๋ชฉ์  ๋‹ฌ์„ฑ ํ›„์—๋„ ๊ด€๋ จ ๋ฒ•๋ น ๋“ฑ์— ๋”ฐ๋ผ ํŒŒ๊ธฐํ•˜์ง€ ์•Š๊ณ  ๋ณด์กดํ•˜๋Š” ๊ฒฝ์šฐ, ๊ด€๋ จ ๋ฒ•๋ น์— ๋”ฐ๋ฅธ ์ตœ์†Œํ•œ์˜ ๊ธฐ๊ฐ„์œผ๋กœ ํ•œ์ •ํ•˜์—ฌ ์ตœ์†Œํ•œ์˜ ์ •๋ณด๋งŒ์„ ๋ณด์กดํ•˜๋„๋ก ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด์˜ ๋ณด์œ ๊ธฐ๊ฐ„ ๊ฒฝ๊ณผ ๋˜๋Š” ์ฒ˜๋ฆฌ๋ชฉ์  ๋‹ฌ์„ฑ ํ›„์—๋„ ๊ด€๋ จ ๋ฒ•๋ น ๋“ฑ์— ๋”ฐ๋ผ ํŒŒ๊ธฐํ•˜์ง€ ์•Š๊ณ  ๋ณด์กดํ•˜๋Š” ๊ฒฝ์šฐ ํ•ด๋‹น ๊ฐœ์ธ์ •๋ณด ๋˜๋Š” ๊ฐœ์ธ์ •๋ณดํŒŒ์ผ์„ ๋‹ค๋ฅธ ๊ฐœ์ธ์ •๋ณด์™€ ๋ถ„๋ฆฌํ•˜์—ฌ ์ €์žฅยท๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ถ„๋ฆฌ ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š” ๊ฐœ์ธ์ •๋ณด์— ๋Œ€ํ•˜์—ฌ ๋ฒ•๋ น์—์„œ ์ •ํ•œ ๋ชฉ์  ๋ฒ”์œ„ ๋‚ด์—์„œ๋งŒ ์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•˜๋„๋ก ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ถ„๋ฆฌ ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š” ๊ฐœ์ธ์ •๋ณด์— ๋Œ€ํ•˜์—ฌ ์ ‘๊ทผ๊ถŒํ•œ์„ ์ตœ์†Œํ•œ์˜ ์ธ์›์œผ๋กœ ์ œํ•œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ํƒˆํ‡ดํšŒ์› ์ •๋ณด๋ฅผ ํŒŒ๊ธฐํ•˜์ง€ ์•Š๊ณ  ์ „์ž์ƒ๊ฑฐ๋ž˜๋ฒ•์— ๋”ฐ๋ผ ์ผ์ •๊ธฐ๊ฐ„ ๋ณด๊ด€ํ•˜๋ฉด์„œ Flag๊ฐ’๋งŒ ๋ณ€๊ฒฝํ•˜์—ฌ ๋‹ค๋ฅธ ํšŒ์›์ •๋ณด์™€ ๋™์ผํ•œ ํ…Œ์ด๋ธ”์— ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ „์ž์ƒ๊ฑฐ๋ž˜๋ฒ•์— ๋”ฐ๋ฅธ ์†Œ๋น„์ž ๋ถˆ๋งŒ ๋ฐ ๋ถ„์Ÿ์ฒ˜๋ฆฌ์— ๊ด€ํ•œ ๊ธฐ๋ก์— ๋Œ€ํ•ด ๊ด€๋ จ ๋ฒ•์  ์š”๊ฑด์„ ์ž˜๋ชป ์ ์šฉํ•˜์—ฌ 3๋…„์ด ์•„๋‹Œ 5๋…„๊ฐ„ ๋ณด์กดํ•˜๋„๋ก ์ •ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ถ„๋ฆฌ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค๋ฅผ ๊ตฌ์„ฑํ•˜์˜€์œผ๋‚˜ ์ ‘๊ทผ๊ถŒํ•œ์„ ๋ณ„๋„๋กœ ์„ค์ •ํ•˜์ง€ ์•Š์•„ ์—…๋ฌด์ƒ ์ ‘๊ทผ์ด ๋ถˆํ•„์š”ํ•œ ์ธ์›๋„ ๋ถ„๋ฆฌ ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค์— ์ž์œ ๋กญ๊ฒŒ ์ ‘๊ทผ์ด ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ํƒˆํ‡ดํšŒ์› ์ •๋ณด๋ฅผ ํŒŒ๊ธฐํ•˜์ง€ ์•Š๊ณ  ์ „์ž์ƒ๊ฑฐ๋ž˜๋ฒ•์— ๋”ฐ๋ผ ๊ณ„์•ฝ ๋˜๋Š” ์ฒญ์•ฝ์ฒ ํšŒ, ๋Œ€๊ธˆ๊ฒฐ์ œ ๋ฐ ์žฌํ™” ๊ณต๊ธ‰์— ๊ด€ํ•œ ๊ธฐ๋ก์„ ๋ถ„๋ฆฌํ•˜์—ฌ ๋ณด์กดํ•˜์˜€์œผ๋‚˜, ์ „์ž์ƒ๊ฑฐ๋ž˜๋ฒ•์— ๋”ฐ๋ฅธ ๋ณด์กด์˜๋ฌด๊ฐ€ ์—†๋Š” ์„ ํƒ์ •๋ณด๊นŒ์ง€ ๊ณผ๋„ํ•˜๊ฒŒ ๋ณด์กดํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ21์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ํŒŒ๊ธฐ)"]}],"description": "๊ฐœ์ธ์ •๋ณด์˜ ๋ณด์œ ๊ธฐ๊ฐ„ ๊ฒฝ๊ณผ ๋˜๋Š” ์ฒ˜๋ฆฌ๋ชฉ์  ๋‹ฌ์„ฑ ํ›„์—๋„ ๊ด€๋ จ ๋ฒ•๋ น ๋“ฑ์— ๋”ฐ๋ผ ํŒŒ๊ธฐํ•˜์ง€ ์•Š๊ณ  ๋ณด์กดํ•˜๋Š” ๊ฒฝ์šฐ์—๋Š” ํ•ด๋‹น ๋ชฉ์ ์— ํ•„์š”ํ•œ ์ตœ์†Œํ•œ์˜ ํ•ญ๋ชฉ์œผ๋กœ ์ œํ•œํ•˜๊ณ  ๋‹ค๋ฅธ ๊ฐœ์ธ์ •๋ณด์™€ ๋ถ„๋ฆฌํ•˜์—ฌ ์ €์žฅยท๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.5.1": {"name": "๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ ๊ณต๊ฐœ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.5.1 ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ ๊ณต๊ฐœ","Subdomain": "3.5. ์ •๋ณด์ฃผ์ฒด ๊ถŒ๋ฆฌ๋ณดํ˜ธ","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ ๊ฐœ์ • ๊ด€๋ จ ๊ณต์ง€ ๋‚ด์—ญ(๊ฒŒ์‹œํŒ ๋“ฑ)"],"AuditChecklist": ["๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์„ ๋ฒ•๋ น์—์„œ ์š”๊ตฌํ•˜๋Š” ๋‚ด์šฉ์„ ๋ชจ๋‘ ํฌํ•จํ•˜์—ฌ ์•Œ๊ธฐ ์‰ฌ์šด ์šฉ์–ด๋กœ ๊ตฌ์ฒด์ ์ด๊ณ  ๋ช…ํ™•ํ•˜๊ฒŒ ์ž‘์„ฑํ•˜์˜€๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์„ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์‰ฝ๊ฒŒ ํ™•์ธํ•  ์ˆ˜ ์žˆ๋„๋ก ์ธํ„ฐ๋„ท ํ™ˆํŽ˜์ด์ง€ ๋“ฑ์— ์ง€์†์ ์œผ๋กœ ํ˜„ํ–‰ํ™”ํ•˜์—ฌ ๊ณต๊ฐœํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์ด ๋ณ€๊ฒฝ๋˜๋Š” ๊ฒฝ์šฐ ์‚ฌ์œ  ๋ฐ ๋ณ€๊ฒฝ ๋‚ด์šฉ์„ ์ง€์ฒด ์—†์ด ๊ณต์ง€ํ•˜๊ณ  ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์–ธ์ œ๋“ ์ง€ ๋ณ€๊ฒฝ๋œ ์‚ฌํ•ญ์„ ์‰ฝ๊ฒŒ ์•Œ์•„ ๋ณผ ์ˆ˜ ์žˆ๋„๋ก ์กฐ์น˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ๊ณต๊ฐœ๋˜์–ด ์žˆ๋Š” ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘, ์ œ3์ž ์ œ๊ณต ๋‚ด์—ญ์ด ์‹ค์ œ ์ˆ˜์ง‘ ๋ฐ ์ œ๊ณตํ•˜๋Š” ๋‚ด์—ญ๊ณผ ๋‹ค๋ฅธ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ฑ…์ž„์ž์˜ ๋ณ€๊ฒฝ, ์ˆ˜ํƒ์ž ๋ณ€๊ฒฝ ๋“ฑ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ ๊ณต๊ฐœ ๋‚ด์šฉ ์ค‘์— ๋ณ€๊ฒฝ์‚ฌํ•ญ์ด ๋ฐœ์ƒํ•˜์˜€์Œ์—๋„ ์ด๋ฅผ ๋ฐ˜์˜ํ•˜์—ฌ ๋ณ€๊ฒฝํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์ด ๊ณต๊ฐœ๋Š” ๋˜์–ด ์žˆ์œผ๋‚˜, ๋ช…์นญ์ด สป๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจสผ์ด ์•„๋‹ˆ๋ผ สป๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ์ •์ฑ…สผ์œผ๋กœ ๋˜์–ด ์žˆ๊ณ  ๊ธ€์ž ํฌ๊ธฐ, ์ƒ‰์ƒ ๋“ฑ์„ ํ™œ์šฉํ•˜์—ฌ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์‰ฝ๊ฒŒ ์ฐพ์„ ์ˆ˜ ์žˆ๋„๋ก ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์ด ๋ช‡ ์ฐจ๋ก€ ๊ฐœ์ •๋˜์—ˆ์œผ๋‚˜, ์˜ˆ์ „์— ์ž‘์„ฑ๋œ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์˜ ๋‚ด์šฉ์„ ํ™•์ธํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ณต๊ฐœ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ์ „์ž์ƒ๊ฑฐ๋ž˜๋ฒ•, ์ƒ๋ฒ• ๋“ฑ ๋‹ค๋ฅธ ๋ฒ•๋ น์— ๋”ฐ๋ผ ๊ฐœ์ธ์ •๋ณด๋ฅผ ํŒŒ๊ธฐํ•˜์ง€ ์•„๋‹ˆํ•˜๊ณ  ์ผ์ •๊ธฐ๊ฐ„ ๋ณด๊ด€ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ด์— ๋”ฐ๋ฅธ ๋ณด์กด๊ทผ๊ฑฐ์™€ ๋ณด์กดํ•˜๋Š” ๊ฐœ์ธ์ •๋ณด ํ•ญ๋ชฉ์„ ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์— ๊ณต๊ฐœํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ30์กฐ(๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์˜ ์ˆ˜๋ฆฝ ๋ฐ ๊ณต๊ฐœ), ์ œ30์กฐ์˜2(๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์˜ ํ‰๊ฐ€ ๋ฐ ๊ฐœ์„ ๊ถŒ๊ณ )"]}],"description": "๊ฐœ์ธ์ •๋ณด์˜ ์ฒ˜๋ฆฌ ๋ชฉ์  ๋“ฑ ํ•„์š”ํ•œ ์‚ฌํ•ญ์„ ๋ชจ๋‘ ํฌํ•จํ•˜์—ฌ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์•Œ๊ธฐ ์‰ฝ๋„๋ก ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ์„ ์ˆ˜๋ฆฝํ•˜๊ณ , ์ด๋ฅผ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์–ธ์ œ๋“ ์ง€ ์‰ฝ๊ฒŒ ํ™•์ธํ•  ์ˆ˜ ์žˆ๋„๋ก ์ ์ ˆํ•œ ๋ฐฉ๋ฒ•์— ๋”ฐ๋ผ ๊ณต๊ฐœํ•˜๊ณ  ์ง€์†์ ์œผ๋กœ ํ˜„ํ–‰ํ™”ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.5.2": {"name": "์ •๋ณด์ฃผ์ฒด ๊ถŒ๋ฆฌ๋ณด์žฅ","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.5.2 ์ •๋ณด์ฃผ์ฒด ๊ถŒ๋ฆฌ๋ณด์žฅ","Subdomain": "3.5. ์ •๋ณด์ฃผ์ฒด ๊ถŒ๋ฆฌ๋ณดํ˜ธ","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ๋ฐฉ์นจ","๊ฐœ์ธ์ •๋ณด ์—ด๋žŒ๋“ฑ์š”๊ตฌ ์ฒ˜๋ฆฌ ์ ˆ์ฐจ, ๊ด€๋ จ ์–‘์‹","๊ฐœ์ธ์ •๋ณด ์—ด๋žŒ๋“ฑ์š”๊ตฌ ์‹œ ์กฐ์น˜ ๋‚ด์—ญ","ํšŒ์› ํƒˆํ‡ด ๋ฐ ๋™์˜ ์ฒ ํšŒ ์ ˆ์ฐจ"],"AuditChecklist": ["์ •๋ณด์ฃผ์ฒด ๋˜๋Š” ๊ทธ ๋Œ€๋ฆฌ์ธ์ด ๊ฐœ์ธ์ •๋ณด์— ๋Œ€ํ•œ ์—ด๋žŒ, ์ •์ •ยท์‚ญ์ œ, ์ฒ˜๋ฆฌ์ •์ง€ ๋ฐ ๋™์˜ ์ฒ ํšŒ ๋“ฑ(์ดํ•˜ '์—ด๋žŒ๋“ฑ์š”๊ตฌ'๋ผ ํ•จ)์„ ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘๋ฐฉ๋ฒ•ยท์ ˆ์ฐจ๋ณด๋‹ค ์–ด๋ ต์ง€ ์•„๋‹ˆํ•˜๋„๋ก ๊ถŒ๋ฆฌ ํ–‰์‚ฌ ๋ฐฉ๋ฒ•๋ฐ ์ ˆ์ฐจ๋ฅผ ๋งˆ๋ จํ•˜์—ฌ ๊ณต๊ฐœํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด ๋˜๋Š” ๊ทธ ๋Œ€๋ฆฌ์ธ์ด ๊ฐœ์ธ์ •๋ณด ์—ด๋žŒ๋“ฑ์š”๊ตฌ๋ฅผ ํ•˜๋Š” ๊ฒฝ์šฐ ๊ธฐ๊ฐ„ ๋‚ด์— ์—ด๋žŒ๋“ฑ์š”๊ตฌ์— ๋”ฐ๋ฅธ ํ•„์š”ํ•œ ์กฐ์น˜๋ฅผ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด ๋˜๋Š” ๊ทธ ๋Œ€๋ฆฌ์ธ์ด ๊ฐœ์ธ์ •๋ณด ์ˆ˜์ง‘ยท์ด์šฉยท์ œ๊ณต ๋“ฑ์˜ ๋™์˜๋ฅผ ์ฒ ํšŒํ•˜๋Š” ๊ฒฝ์šฐ ์ง€์ฒด ์—†์ด ์ˆ˜์ง‘๋œ ๊ฐœ์ธ์ •๋ณด๋ฅผ ํŒŒ๊ธฐํ•˜๋Š” ๋“ฑ ํ•„์š”ํ•œ ์กฐ์น˜๋ฅผ ์ทจํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์˜ ์—ด๋žŒ๋“ฑ์š”๊ตฌ์— ๋Œ€ํ•œ ์กฐ์น˜์— ๋ถˆ๋ณต์ด ์žˆ๋Š” ๊ฒฝ์šฐ ์ด์˜๋ฅผ ์ œ๊ธฐํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•„์š”ํ•œ ์ ˆ์ฐจ๋ฅผ ๋งˆ๋ จํ•˜์—ฌ ์•ˆ๋‚ดํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณด์ฃผ์ฒด์˜ ์—ด๋žŒ๋“ฑ์š”๊ตฌ ๋ฐ ์ฒ˜๋ฆฌ ๊ฒฐ๊ณผ์— ๋Œ€ํ•˜์—ฌ ๊ธฐ๋ก์„ ๋‚จ๊ธฐ๊ณ  ์žˆ๋Š”๊ฐ€?","์ •๋ณดํ†ต์‹ ๋ง์—์„œ ์‚ฌ์ƒํ™œ ์นจํ•ด ๋˜๋Š” ๋ช…์˜ˆํ›ผ์† ๋“ฑ ํƒ€์ธ์˜ ๊ถŒ๋ฆฌ๋ฅผ ์นจํ•ดํ•œ ๊ฒฝ์šฐ ์นจํ•ด๋ฅผ ๋ฐ›์€ ์ž๊ฐ€ ์ •๋ณดํ†ต์‹ ์„œ๋น„์Šค ์ œ๊ณต์ž์—๊ฒŒ ์ •๋ณด์˜ ์‚ญ์ œ ์š”์ฒญ ๋“ฑ์„ ํ•  ์ˆ˜ ์žˆ๋Š” ์ ˆ์ฐจ๋ฅผ ๋งˆ๋ จํ•˜์—ฌ ์‹œํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๊ฐœ์ธ์ •๋ณด์˜ ์—ด๋žŒ, ์ •์ •ยท์‚ญ์ œ, ์ฒ˜๋ฆฌ์ •์ง€ ์š”๊ตฌ ๋ฐฉ๋ฒ•์„ ์ •๋ณด์ฃผ์ฒด๊ฐ€ ์•Œ ์ˆ˜ ์žˆ๋„๋ก ๊ณต๊ฐœํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด์˜ ์—ด๋žŒ ์š”๊ตฌ์— ๋Œ€ํ•˜์—ฌ ์ •๋‹นํ•œ ์‚ฌ์œ ์˜ ํ†ต์ง€ ์—†์ด ์—ด๋žŒ ์š”๊ตฌ๋ฅผ ์ ‘์ˆ˜๋ฐ›์€ ๋‚ ๋กœ๋ถ€ํ„ฐ 10์ผ์„ ์ดˆ๊ณผํ•˜์—ฌ ํšŒ์‹ ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ์ธ์ •๋ณด์˜ ์—ด๋žŒ ๋ฏผ์›์— ๋Œ€ํ•œ ์ฒ˜๋ฆฌ ๋‚ด์—ญ ๊ธฐ๋ก ๋ฐ ๋ณด๊ด€์ด ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์ •๋ณด์ฃผ์ฒด ๋‹น์‚ฌ์ž ๋˜๋Š” ์ •๋‹นํ•œ ๋Œ€๋ฆฌ์ธ์ด ๋งž๋Š”์ง€์— ๋Œ€ํ•œ ํ™•์ธ ์ ˆ์ฐจ ์—†์ด ์—ด๋žŒ ํ†ต์ง€๊ฐ€ ์ด๋ฃจ์–ด์ง€๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ๊ฐœ์ธ์ •๋ณด์˜ ์ •์ •ยท์‚ญ์ œ ์š”๊ตฌ์— ๋Œ€ํ•˜์—ฌ ์ •์ •ยท์‚ญ์ œ ์š”๊ตฌ๋ฅผ ์ ‘์ˆ˜๋ฐ›์€ ๋‚ ๋กœ๋ถ€ํ„ฐ 10์ผ์„ ์ดˆ๊ณผํ•˜์—ฌ ํšŒ์‹ ํ•˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 6 : ํšŒ์› ๊ฐ€์ž… ์‹œ์—๋Š” ์˜จ๋ผ์ธ์„ ํ†ตํ•˜์—ฌ ์‰ฝ๊ฒŒ ํšŒ์› ๊ฐ€์ž…์ด ๊ฐ€๋Šฅํ•˜์˜€์œผ๋‚˜, ํšŒ์› ํƒˆํ‡ด ์‹œ์—๋Š” ์‹ ๋ถ„์ฆ ๋“ฑ ์ถ”๊ฐ€ ์„œ๋ฅ˜๋ฅผ ์ œ์ถœํ•˜๊ฒŒ ํ•˜๊ฑฐ๋‚˜ ์˜คํ”„๋ผ์ธ ๋ฐฉ๋ฌธ์„ ํ†ตํ•ด์„œ๋งŒ ๊ฐ€๋Šฅํ•˜๋„๋ก ํ•˜๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ34์กฐ์˜2(๋…ธ์ถœ๋œ ๊ฐœ์ธ์ •๋ณด์˜ ์‚ญ์ œยท์ฐจ๋‹จ), ์ œ35์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ์—ด๋žŒ), ์ œ35์กฐ์˜2(๊ฐœ์ธ์ •๋ณด์˜ ์ „์†ก ์š”๊ตฌ), ์ œ36์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ์ •์ •ยท์‚ญ์ œ), ์ œ37์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ์ฒ˜๋ฆฌ์ •์ง€ ๋“ฑ), ์ œ37์กฐ์˜2(์ž๋™ํ™”๋œ ๊ฒฐ์ •์— ๋Œ€ํ•œ ์ •๋ณด์ฃผ์ฒด์˜ ๊ถŒ๋ฆฌ ๋“ฑ), ์ œ38์กฐ(๊ถŒ๋ฆฌํ–‰์‚ฌ์˜ ๋ฐฉ๋ฒ• ๋ฐ ์ ˆ์ฐจ)","์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ44์กฐ(์ •๋ณดํ†ต์‹ ๋ง์—์„œ์˜ ๊ถŒ๋ฆฌ๋ณดํ˜ธ), ์ œ44์กฐ์˜2(์ •๋ณด์˜ ์‚ญ์ œ์š”์ฒญ ๋“ฑ), ์ œ44์กฐ์˜3(์ž„์˜์˜ ์ž„์‹œ์กฐ์น˜)"]}],"description": "์ •๋ณด์ฃผ์ฒด๊ฐ€ ๊ฐœ์ธ์ •๋ณด์˜ ์—ด๋žŒ, ์ •์ •ยท์‚ญ์ œ, ์ฒ˜๋ฆฌ์ •์ง€, ์ด์˜์ œ๊ธฐ, ๋™์˜์ฒ ํšŒ ๋“ฑ ์š”๊ตฌ๋ฅผ ์ˆ˜์ง‘ ๋ฐฉ๋ฒ•ยท์ ˆ์ฐจ๋ณด๋‹ค ์‰ฝ๊ฒŒ ํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ถŒ๋ฆฌํ–‰์‚ฌ ๋ฐฉ๋ฒ• ๋ฐ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ , ์ •๋ณด์ฃผ์ฒด์˜ ์š”๊ตฌ๋ฅผ ๋ฐ›์€ ๊ฒฝ์šฐ ์ง€์ฒด ์—†์ด ์ฒ˜๋ฆฌํ•˜๊ณ  ๊ด€๋ จ ๊ธฐ๋ก์„ ๋‚จ๊ฒจ์•ผ ํ•œ๋‹ค. ๋˜ํ•œ ์ •๋ณด์ฃผ์ฒด์˜ ์‚ฌ์ƒํ™œ ์นจํ•ด, ๋ช…์˜ˆํ›ผ์† ๋“ฑ ํƒ€์ธ์˜ ๊ถŒ๋ฆฌ๋ฅผ ์นจํ•ดํ•˜๋Š” ์ •๋ณด๊ฐ€ ์œ ํ†ต๋˜์ง€ ์•Š๋„๋ก ์‚ญ์ œ์š”์ฒญ, ์ž„์‹œ์กฐ์น˜ ๋“ฑ์˜ ๊ธฐ์ค€์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"3.5.3": {"name": "์ •๋ณด์ฃผ์ฒด์— ๋Œ€ํ•œ ํ†ต์ง€","checks": {},"status": "PASS","attributes": [{"Domain": "3. ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ ๋‹จ๊ณ„๋ณ„ ์š”๊ตฌ์‚ฌํ•ญ","Section": "3.5.3 ์ •๋ณด์ฃผ์ฒด์— ๋Œ€ํ•œ ํ†ต์ง€","Subdomain": "3.5. ์ •๋ณด์ฃผ์ฒด ๊ถŒ๋ฆฌ๋ณดํ˜ธ","AuditEvidence": ["๊ฐœ์ธ์ •๋ณด ์ด์šฉยท์ œ๊ณต ๋‚ด์—ญ ํ†ต์ง€ ๊ธฐ๋ก","๊ฐœ์ธ์ •๋ณด ์ด์šฉยท์ œ๊ณต ๋‚ด์—ญ ํ†ต์ง€ ์–‘์‹ ๋ฐ ๋ฌธ๊ตฌ"],"AuditChecklist": ["๋ฒ•์  ์˜๋ฌด ๋Œ€์ƒ์ž์— ํ•ด๋‹นํ•˜๋Š” ๊ฒฝ์šฐ ๊ฐœ์ธ์ •๋ณด ์ด์šฉยท์ œ๊ณต ๋‚ด์—ญ ๋˜๋Š” ๊ทธ ๋‚ด์—ญ์„ ํ™•์ธํ•  ์ˆ˜ ์žˆ๋Š” ์ •๋ณด์‹œ์Šคํ…œ์— ์ ‘์†ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์ฃผ๊ธฐ์ ์œผ๋กœ ํ†ต์ง€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด ์ด์šฉยท์ œ๊ณต ๋‚ด์—ญ ํ†ต์ง€ ํ•ญ๋ชฉ์€ ๋ฒ•์  ์š”๊ตฌํ•ญ๋ชฉ์„ ๋ชจ๋‘ ํฌํ•จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ „๋…„๋„ ๋ง ๊ธฐ์ค€ ์ง์ „ 3๊ฐœ์›” ๊ฐ„ ์ผ์ผ ํ‰๊ท  ์ €์žฅยท๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š” ๊ฐœ์ธ์ •๋ณด๊ฐ€ 100๋งŒ๋ช… ์ด์ƒ์œผ๋กœ์„œ ๊ฐœ์ธ์ •๋ณด ์ด์šฉ์ œ๊ณต ๋‚ด์—ญ ํ†ต์ง€ ์˜๋ฌด ๋Œ€์ƒ์ž์— ํ•ด๋‹น ๋จ์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ  ๊ธˆ๋…„๋„์— ๊ฐœ์ธ์ •๋ณด ์ด์šฉ ๋ฐ๋‚ด์—ญ์„ ํ†ต์ง€ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด ์ด์šฉยท์ œ๊ณต ๋‚ด์—ญ์„ ๊ฐœ๋ณ„ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ์ง์ ‘์ ์œผ๋กœ ํ†ต์ง€ํ•˜๋Š” ๋Œ€์‹  ํ™ˆํŽ˜์ด์ง€์—์„œ ๋‹จ์ˆœ ํŒ์—…์ฐฝ์ด๋‚˜ ๋ณ„๋„ ๊ณต์ง€์‚ฌํ•ญ์œผ๋กœ ์•ˆ๋‚ด๋งŒ ํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ20์กฐ์˜2(๊ฐœ์ธ์ •๋ณด ์ด์šฉยท์ œ๊ณต ๋‚ด์—ญ์˜ ํ†ต์ง€)"]}],"description": "๊ฐœ์ธ์ •๋ณด์˜ ์ด์šฉยท์ œ๊ณต ๋‚ด์—ญ ๋“ฑ ์ •๋ณด์ฃผ์ฒด์—๊ฒŒ ํ†ต์ง€ํ•˜์—ฌ์•ผ ํ•  ์‚ฌํ•ญ์„ ํŒŒ์•…ํ•˜์—ฌ ๊ทธ ๋‚ด์šฉ์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ํ†ต์ง€ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.10.1": {"name": "๋ณด์•ˆ์‹œ์Šคํ…œ ์šด์˜","checks": {"kms_cmk_are_used": null,"macie_is_enabled": "PASS","securityhub_enabled": "PASS","fms_policy_compliant": null,"guardduty_is_enabled": "PASS","inspector2_is_enabled": "FAIL","elbv2_waf_acl_attached": "FAIL","kms_cmk_rotation_enabled": null,"ec2_securitygroup_not_used": "FAIL","guardduty_centrally_managed": "FAIL","wafv2_webacl_logging_enabled": "FAIL","ssm_managed_compliant_patching": "FAIL","kms_key_not_publicly_accessible": null,"ssmincidents_enabled_with_plans": null,"inspector2_active_findings_exist": "FAIL","cloudfront_distributions_using_waf": null,"cognito_user_pool_waf_acl_attached": null,"trustedadvisor_errors_and_warnings": null,"apigateway_restapi_waf_acl_attached": "FAIL","config_recorder_all_regions_enabled": null,"guardduty_no_high_severity_findings": "FAIL","ec2_securitygroup_from_launch_wizard": "FAIL","ec2_networkacl_allow_ingress_any_port": "FAIL","organizations_delegated_administrators": null,"ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_instance_port_ftp_exposed_to_internet": "PASS","ec2_instance_port_rdp_exposed_to_internet": "PASS","ec2_instance_port_ssh_exposed_to_internet": "PASS","secretsmanager_automatic_rotation_enabled": "FAIL","ec2_instance_port_cifs_exposed_to_internet": "PASS","ec2_instance_port_ldap_exposed_to_internet": "PASS","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","ec2_instance_port_kafka_exposed_to_internet": "PASS","ec2_instance_port_mysql_exposed_to_internet": "PASS","ec2_instance_port_redis_exposed_to_internet": "PASS","ec2_instance_port_oracle_exposed_to_internet": "PASS","ec2_instance_port_telnet_exposed_to_internet": "PASS","ec2_instance_port_mongodb_exposed_to_internet": "PASS","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","ec2_instance_port_kerberos_exposed_to_internet": "PASS","ec2_instance_port_cassandra_exposed_to_internet": "PASS","ec2_instance_port_memcached_exposed_to_internet": "PASS","ec2_instance_port_sqlserver_exposed_to_internet": "PASS","cloudwatch_log_metric_filter_sign_in_without_mfa": null,"ec2_instance_port_postgresql_exposed_to_internet": "PASS","ec2_securitygroup_with_many_ingress_egress_rules": "PASS","shield_advanced_protection_in_global_accelerators": null,"ec2_instance_internet_facing_with_instance_profile": "FAIL","shield_advanced_protection_in_route53_hosted_zones": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_authentication_failures": null,"shield_advanced_protection_in_associated_elastic_ips": null,"shield_advanced_protection_in_classic_load_balancers": null,"shield_advanced_protection_in_cloudfront_distributions": null,"ec2_securitygroup_allow_ingress_from_internet_to_any_port": "PASS","ec2_instance_port_elasticsearch_kibana_exposed_to_internet": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","shield_advanced_protection_in_internet_facing_load_balancers": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.10.1 ๋ณด์•ˆ์‹œ์Šคํ…œ ์šด์˜","Subdomain": "2.10. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ๋ณด์•ˆ๊ด€๋ฆฌ","AuditEvidence": ["๋ณด์•ˆ์‹œ์Šคํ…œ ๊ตฌ์„ฑ","๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ","๋ณด์•ˆ์‹œ์Šคํ…œ ์šด์˜์ ˆ์ฐจ","๋ฐฉํ™”๋ฒฝ ์ •์ฑ…","๋ฐฉํ™”๋ฒฝ ์ •์ฑ… ์„ค์ •ยท๋ณ€๊ฒฝ ์š”์ฒญ์„œ","๋ณด์•ˆ์‹œ์Šคํ…œ ์˜ˆ์™ธ์ž ๋ชฉ๋ก","๋ณด์•ˆ์‹œ์Šคํ…œ๋ณ„ ๊ด€๋ฆฌ ํ™”๋ฉด(๋ฐฉํ™”๋ฒฝ, IPS, ์„œ๋ฒ„์ ‘๊ทผ์ œ์–ด, DLP, DRM ๋“ฑ)","๋ณด์•ˆ์‹œ์Šคํ…œ ์ •์ฑ… ๊ฒ€ํ†  ์ด๋ ฅ"],"AuditChecklist": ["์กฐ์ง์—์„œ ์šด์˜ํ•˜๊ณ  ์žˆ๋Š” ๋ณด์•ˆ์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ์šด์˜์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ณด์•ˆ์‹œ์Šคํ…œ ๊ด€๋ฆฌ์ž ๋“ฑ ์ ‘๊ทผ์ด ํ—ˆ์šฉ๋œ ์ธ์›์„ ์ตœ์†Œํ™”ํ•˜๊ณ  ๋น„์ธ๊ฐ€์ž์˜ ์ ‘๊ทผ์„ ์—„๊ฒฉํ•˜๊ฒŒ ํ†ต์ œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ณด์•ˆ์‹œ์Šคํ…œ๋ณ„๋กœ ์ •์ฑ…์˜ ์‹ ๊ทœ ๋“ฑ๋ก, ๋ณ€๊ฒฝ, ์‚ญ์ œ ๋“ฑ์„ ์œ„ํ•œ ๊ณต์‹์ ์ธ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝ ๋ฐ ์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ณด์•ˆ์‹œ์Šคํ…œ์˜ ์˜ˆ์™ธ ์ •์ฑ… ๋“ฑ๋ก์— ๋Œ€ํ•˜์—ฌ ์ ˆ์ฐจ์— ๋”ฐ๋ผ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ์œผ๋ฉฐ, ์˜ˆ์™ธ ์ •์ฑ… ์‚ฌ์šฉ์ž์— ๋Œ€ํ•˜์—ฌ ์ตœ์†Œํ•œ์˜ ๊ถŒํ•œ์œผ๋กœ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ณด์•ˆ์‹œ์Šคํ…œ์— ์„ค์ •๋œ ์ •์ฑ…์˜ ํƒ€๋‹น์„ฑ ์—ฌ๋ถ€๋ฅผ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ๋ถˆ๋ฒ•์ ์ธ ์ ‘๊ทผ ๋ฐ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ ๋ฐฉ์ง€๋ฅผ ์œ„ํ•˜์—ฌ ๊ด€๋ จ ๋ฒ•๋ น์—์„œ ์ •ํ•œ ๊ธฐ๋Šฅ์„ ์ˆ˜ํ–‰ํ•˜๋Š” ๋ณด์•ˆ์‹œ์Šคํ…œ์„ ์„ค์น˜ํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์นจ์ž…์ฐจ๋‹จ์‹œ์Šคํ…œ ๋ณด์•ˆ์ •์ฑ…์— ๋Œ€ํ•œ ์ •๊ธฐ ๊ฒ€ํ† ๊ฐ€ ์ˆ˜ํ–‰๋˜์ง€ ์•Š์•„ ๋ถˆํ•„์š”ํ•˜๊ฑฐ๋‚˜ ๊ณผ๋„ํ•˜๊ฒŒ ํ—ˆ์šฉ๋œ ์ •์ฑ…์ด ๋‹ค์ˆ˜ ์กด์žฌํ•˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋ณด์•ˆ์‹œ์Šคํ…œ ๋ณด์•ˆ์ •์ฑ…์˜ ์‹ ์ฒญ, ๋ณ€๊ฒฝ, ์‚ญ์ œ, ์ฃผ๊ธฐ์  ๊ฒ€ํ† ์— ๋Œ€ํ•œ ์ ˆ์ฐจ ๋ฐ ๊ธฐ์ค€์ด ์—†๊ฑฐ๋‚˜, ์ ˆ์ฐจ๋Š” ์žˆ์œผ๋‚˜ ์ด๋ฅผ ์ค€์ˆ˜ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ณด์•ˆ์‹œ์Šคํ…œ์˜ ๊ด€๋ฆฌ์ž ์ง€์ • ๋ฐ ๊ถŒํ•œ ๋ถ€์—ฌ ํ˜„ํ™ฉ์— ๋Œ€ํ•œ ๊ด€๋ฆฌ๊ฐ๋…์ด ์ ์ ˆํžˆ ์ดํ–‰๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๋‚ด๋ถ€ ์ง€์นจ์—๋Š” ์ •๋ณด๋ณดํ˜ธ๋‹ด๋‹น์ž๊ฐ€ ๋ณด์•ˆ์‹œ์Šคํ…œ์˜ ๋ณด์•ˆ์ •์ฑ… ๋ณ€๊ฒฝ ์ด๋ ฅ์„ ๊ธฐ๋กยท๋ณด๊ด€ํ•˜๋„๋ก ์ •ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ •์ฑ…๊ด€๋ฆฌ๋Œ€์žฅ์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ์ž‘์„ฑํ•˜์ง€ ์•Š๊ณ  ์žˆ๊ฑฐ๋‚˜ ์ •์ฑ…๊ด€๋ฆฌ๋Œ€์žฅ์— ๊ธฐ๋ก๋œ ๋ณด์•ˆ์ •์ฑ…๊ณผ ์‹ค์ œ ์šด์˜ ์ค‘์ธ ์‹œ์Šคํ…œ์˜ ๋ณด์•ˆ์ •์ฑ…์ด ์ƒ์ดํ•œ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ)"]}],"description": "๋ณด์•ˆ์‹œ์Šคํ…œ ์œ ํ˜•๋ณ„๋กœ ๊ด€๋ฆฌ์ž ์ง€์ •, ์ตœ์‹  ์ •์ฑ… ์—…๋ฐ์ดํŠธ, ๋ฃฐ์…‹ ๋ณ€๊ฒฝ, ์ด๋ฒคํŠธ ๋ชจ๋‹ˆํ„ฐ๋ง ๋“ฑ์˜ ์šด์˜์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ๋ณด์•ˆ์‹œ์Šคํ…œ๋ณ„ ์ •์ฑ…์ ์šฉ ํ˜„ํ™ฉ์„ ๊ด€๋ฆฌํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 16,"pass": 39,"total": 75,"manual": 0}},"2.10.2": {"name": "ํด๋ผ์šฐ๋“œ ๋ณด์•ˆ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.10.2 ํด๋ผ์šฐ๋“œ ๋ณด์•ˆ","Subdomain": "2.10. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ๋ณด์•ˆ๊ด€๋ฆฌ","AuditEvidence": ["ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ๊ด€๋ จ ๊ณ„์•ฝ์„œ ๋ฐ SLA","ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ์œ„ํ—˜๋ถ„์„ ๊ฒฐ๊ณผ","ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ๋ณด์•ˆํ†ต์ œ ์ •์ฑ…","ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ๊ด€๋ฆฌ์ž ๊ถŒํ•œ ๋ถ€์—ฌ ํ˜„ํ™ฉ","ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ๊ตฌ์„ฑ๋„","ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ๋ณด์•ˆ์„ค์ • ํ˜„ํ™ฉ","ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ๋ณด์•ˆ์„ค์ • ์ ์ •์„ฑ ๊ฒ€ํ†  ์ด๋ ฅ"],"AuditChecklist": ["ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ์ œ๊ณต์ž์™€ ์ •๋ณด๋ณดํ˜ธ ๋ฐ ๊ฐœ์ธ์ •๋ณด๋ณดํ˜ธ์— ๋Œ€ํ•œ ์ฑ…์ž„๊ณผ ์—ญํ• ์„ ๋ช…ํ™•ํžˆ์ •์˜ํ•˜๊ณ  ์ด๋ฅผ ๊ณ„์•ฝ์„œ(SLA ๋“ฑ)์— ๋ฐ˜์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ์ด์šฉ ์‹œ ์„œ๋น„์Šค ์œ ํ˜•์— ๋”ฐ๋ฅธ ๋ณด์•ˆ์œ„ํ—˜์„ ํ‰๊ฐ€ํ•˜์—ฌ ๋น„์ธ๊ฐ€ ์ ‘๊ทผ,์„ค์ •์˜ค๋ฅ˜ ๋“ฑ์„ ๋ฐฉ์ง€ํ•  ์ˆ˜ ์žˆ๋„๋ก ๋ณด์•ˆ ๊ตฌ์„ฑ ๋ฐ ์„ค์ • ๊ธฐ์ค€, ๋ณด์•ˆ์„ค์ • ๋ณ€๊ฒฝ ๋ฐ ์Šน์ธ ์ ˆ์ฐจ, ์•ˆ์ „ํ•œ ์ ‘์†๋ฐฉ๋ฒ•, ๊ถŒํ•œ ์ฒด๊ณ„ ๋“ฑ ๋ณด์•ˆ ํ†ต์ œ ์ •์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ๊ด€๋ฆฌ์ž ๊ถŒํ•œ์€ ์—ญํ• ์— ๋”ฐ๋ผ ์ตœ์†Œํ™”ํ•˜์—ฌ ๋ถ€์—ฌํ•˜๊ณ  ๊ด€๋ฆฌ์ž ๊ถŒํ•œ์— ๋Œ€ํ•œ๋น„์ธ๊ฐ€ ์ ‘๊ทผ, ๊ถŒํ•œ ์˜คยท๋‚จ์šฉ ๋“ฑ์„ ๋ฐฉ์ง€ํ•  ์ˆ˜ ์žˆ๋„๋ก ๊ฐ•ํ™”๋œ ์ธ์ฆ, ์•”ํ˜ธํ™”, ์ ‘๊ทผํ†ต์ œ, ๊ฐ์‚ฌ๊ธฐ๋ก ๋“ฑ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค์˜ ๋ณด์•ˆ ์„ค์ • ๋ณ€๊ฒฝ, ์šด์˜ ํ˜„ํ™ฉ ๋“ฑ์„ ๋ชจ๋‹ˆํ„ฐ๋งํ•˜๊ณ  ๊ทธ ์ ์ ˆ์„ฑ์„ ์ •๊ธฐ์ ์œผ๋กœ๊ฒ€ํ† ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ๊ณ„์•ฝ์„œ ๋‚ด์— ๋ณด์•ˆ์— ๋Œ€ํ•œ ์ฑ…์ž„ ๋ฐ ์—ญํ•  ๋“ฑ์— ๋Œ€ํ•œ ์‚ฌํ•ญ์ด ํฌํ•จ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค์˜ ๋ณด์•ˆ์„ค์ •์„ ๋ณ€๊ฒฝํ•  ์ˆ˜ ์žˆ๋Š” ๊ถŒํ•œ์ด ์—…๋ฌด์ƒ ๋ฐ˜๋“œ์‹œ ํ•„์š”ํ•˜์ง€ ์•Š์€ ์ง์›๋“ค์—๊ฒŒ ๊ณผ๋„ํ•˜๊ฒŒ ๋ถ€์—ฌ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋‚ด๋ถ€ ์ง€์นจ์—๋Š” ํด๋ผ์šฐ๋“œ ๋‚ด ์‚ฌ์„ค ๋„คํŠธ์›Œํฌ์˜ ์ ‘๊ทผํ†ต์ œ ๋ฃฐ(Rule) ๋ณ€๊ฒฝ ์‹œ ๋ณด์•ˆ์ฑ…์ž„์ž ์Šน์ธ์„ ๋ฐ›๋„๋ก ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์Šน์ธ์ ˆ์ฐจ๋ฅผ ๊ฑฐ์น˜์ง€ ์•Š๊ณ  ๋“ฑ๋กยท๋ณ€๊ฒฝ๋œ ์ ‘๊ทผ์ œ์–ด ๋ฃฐ์ด ๋‹ค์ˆ˜ ๋ฐœ๊ฒฌ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค์˜ ๋ณด์•ˆ์„ค์ • ์˜ค๋ฅ˜๋กœ ๋‚ด๋ถ€ ๋กœ๊ทธ ํŒŒ์ผ์ด ์ธํ„ฐ๋„ท์„ ํ†ตํ•˜์—ฌ ๊ณต๊ฐœ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "ํด๋ผ์šฐ๋“œ ์„œ๋น„์Šค ์ด์šฉ ์‹œ ์„œ๋น„์Šค ์œ ํ˜•(SaaS, PaaS, IaaS ๋“ฑ)์— ๋”ฐ๋ฅธ ๋น„์ธ๊ฐ€ ์ ‘๊ทผ, ์„ค์ • ์˜ค๋ฅ˜ ๋“ฑ์— ๋”ฐ๋ผ ์ค‘์š”์ •๋ณด์™€ ๊ฐœ์ธ์ •๋ณด๊ฐ€ ์œ ยท๋…ธ์ถœ๋˜์ง€ ์•Š๋„๋ก ๊ด€๋ฆฌ์ž ์ ‘๊ทผ ๋ฐ ๋ณด์•ˆ ์„ค์ • ๋“ฑ์— ๋Œ€ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.10.3": {"name": "๊ณต๊ฐœ์„œ๋ฒ„ ๋ณด์•ˆ","checks": {"elb_ssl_listeners": "FAIL","elbv2_ssl_listeners": "FAIL","ssm_document_secrets": "PASS","elbv2_waf_acl_attached": "FAIL","elb_insecure_ssl_ciphers": "PASS","apigateway_restapi_public": "FAIL","lightsail_database_public": null,"lightsail_instance_public": null,"elbv2_insecure_ssl_ciphers": "PASS","lightsail_static_ip_unused": null,"networkfirewall_in_all_vpc": "FAIL","ec2_instance_imdsv2_enabled": "PASS","elbv2_desync_mitigation_mode": "FAIL","awslambda_function_inside_vpc": "FAIL","awslambda_function_url_public": null,"ec2_instance_secrets_user_data": "PASS","ec2_launch_template_no_secrets": "PASS","ssm_managed_compliant_patching": "FAIL","inspector2_active_findings_exist": "FAIL","acm_certificates_expiration_check": "PASS","awslambda_function_url_cors_policy": null,"cloudfront_distributions_using_waf": null,"vpc_subnet_separate_private_public": "FAIL","apigateway_restapi_waf_acl_attached": "FAIL","apigatewayv2_api_authorizers_enabled": "FAIL","awslambda_function_no_secrets_in_code": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","apigateway_restapi_authorizers_enabled": "PASS","cloudfront_distributions_https_enabled": null,"ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","apigateway_restapi_public_with_authorizer": "FAIL","ec2_instance_port_ftp_exposed_to_internet": "PASS","ec2_instance_port_rdp_exposed_to_internet": "PASS","ec2_instance_port_ssh_exposed_to_internet": "PASS","awslambda_function_no_secrets_in_variables": "PASS","awslambda_function_not_publicly_accessible": "PASS","ec2_instance_port_cifs_exposed_to_internet": "PASS","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","route53_domains_privacy_protection_enabled": null,"ec2_instance_port_kafka_exposed_to_internet": "PASS","ec2_instance_port_mysql_exposed_to_internet": "PASS","ec2_instance_port_redis_exposed_to_internet": "PASS","ec2_instance_port_oracle_exposed_to_internet": "PASS","ec2_instance_port_telnet_exposed_to_internet": "PASS","apigateway_restapi_client_certificate_enabled": "FAIL","ec2_instance_port_mongodb_exposed_to_internet": "PASS","ec2_securitygroup_allow_wide_open_public_ipv4": "PASS","ec2_instance_port_kerberos_exposed_to_internet": "PASS","ec2_instance_port_cassandra_exposed_to_internet": "PASS","ec2_instance_port_memcached_exposed_to_internet": "PASS","ec2_instance_port_sqlserver_exposed_to_internet": "PASS","kafka_cluster_mutual_tls_authentication_enabled": null,"ec2_instance_port_postgresql_exposed_to_internet": "PASS","ec2_securitygroup_with_many_ingress_egress_rules": "PASS","autoscaling_find_secrets_ec2_launch_configuration": "PASS","ec2_instance_internet_facing_with_instance_profile": "FAIL","cloudfront_distributions_using_deprecated_ssl_protocols": null,"ec2_securitygroup_allow_ingress_from_internet_to_any_port": "PASS","ec2_instance_port_elasticsearch_kibana_exposed_to_internet": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.10.3 ๊ณต๊ฐœ์„œ๋ฒ„ ๋ณด์•ˆ","Subdomain": "2.10. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ๋ณด์•ˆ๊ด€๋ฆฌ","AuditEvidence": ["๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ๋„","์›น์‚ฌ์ดํŠธ ์ •๋ณด๊ณต๊ฐœ ์ ˆ์ฐจ ๋ฐ ๋‚ด์—ญ(์‹ ์ฒญยท์Šน์ธยท๊ฒŒ์‹œ ์ด๋ ฅ ๋“ฑ)","๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด ๋…ธ์ถœ ์—ฌ๋ถ€ ์ ๊ฒ€ ์ด๋ ฅ"],"AuditChecklist": ["๊ณต๊ฐœ์„œ๋ฒ„๋ฅผ ์šด์˜ํ•˜๋Š” ๊ฒฝ์šฐ ์ด์— ๋Œ€ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ณต๊ฐœ์„œ๋ฒ„๋Š” ๋‚ด๋ถ€ ๋„คํŠธ์›Œํฌ์™€ ๋ถ„๋ฆฌ๋œ DMZ ์˜์—ญ์— ์„ค์น˜ํ•˜๊ณ  ์นจ์ž…์ฐจ๋‹จ์‹œ์Šคํ…œ ๋“ฑ ๋ณด์•ˆ์‹œ์Šคํ…œ์„ ํ†ตํ•˜์—ฌ ๋ณดํ˜ธํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ณต๊ฐœ์„œ๋ฒ„์— ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด๋ฅผ ๊ฒŒ์‹œํ•˜๊ฑฐ๋‚˜ ์ €์žฅํ•˜์—ฌ์•ผ ํ•  ๊ฒฝ์šฐ ์ฑ…์ž„์ž ์Šน์ธ ๋“ฑ ํ—ˆ๊ฐ€ ๋ฐ ๊ฒŒ์‹œ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์กฐ์ง์˜ ์ค‘์š”์ •๋ณด๊ฐ€ ์›น์‚ฌ์ดํŠธ ๋ฐ ์›น์„œ๋ฒ„๋ฅผ ํ†ตํ•˜์—ฌ ๋…ธ์ถœ๋˜๊ณ  ์žˆ๋Š”์ง€ ์—ฌ๋ถ€๋ฅผ ์ฃผ๊ธฐ์ ์œผ๋กœ ํ™•์ธํ•˜์—ฌ ์ค‘์š”์ •๋ณด ๋…ธ์ถœ์„ ์ธ์ง€ํ•œ ๊ฒฝ์šฐ ์ด๋ฅผ ์ฆ‰์‹œ ์ฐจ๋‹จํ•˜๋Š” ๋“ฑ์˜ ์กฐ์น˜๋ฅผ ์ทจํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ธํ„ฐ๋„ท์— ๊ณต๊ฐœ๋œ ์›น์‚ฌ์ดํŠธ์˜ ์ทจ์•ฝ์ ์œผ๋กœ ์ธํ•˜์—ฌ ๊ตฌ๊ธ€ ๊ฒ€์ƒ‰์„ ํ†ตํ•˜์—ฌ ์—ด๋žŒ ๊ถŒํ•œ์ด ์—†๋Š” ํƒ€์ธ์˜ ๊ฐœ์ธ์ •๋ณด์— ์ ‘๊ทผํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์›น์‚ฌ์ดํŠธ์— ๊ฐœ์ธ์ •๋ณด๋ฅผ ๊ฒŒ์‹œํ•˜๋Š” ๊ฒฝ์šฐ ์Šน์ธ ์ ˆ์ฐจ๋ฅผ ๊ฑฐ์น˜๋„๋ก ๋‚ด๋ถ€ ๊ทœ์ •์ด ๋งˆ๋ จ๋˜์–ด ์žˆ์œผ๋‚˜, ์ด๋ฅผ ์ค€์ˆ˜ํ•˜์ง€ ์•Š๊ณ  ๊ฐœ์ธ์ •๋ณด๊ฐ€ ๊ฒŒ์‹œ๋œ ์‚ฌ๋ก€๊ฐ€ ๋‹ค์ˆ˜ ์กด์žฌํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฒŒ์‹œํŒ ๋“ฑ์˜ ์›น ์‘์šฉํ”„๋กœ๊ทธ๋žจ์—์„œ ํƒ€์ธ์ด ์ž‘์„ฑํ•œ ๊ธ€์„ ์ž„์˜๋กœ ์ˆ˜์ •ยท์‚ญ์ œํ•˜๊ฑฐ๋‚˜ ๋น„๋ฐ€๋ฒˆํ˜ธ๋กœ ๋ณดํ˜ธ๋œ ๊ธ€์„ ์—ด๋žŒํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์™ธ๋ถ€ ๋„คํŠธ์›Œํฌ์— ๊ณต๊ฐœ๋˜๋Š” ์„œ๋ฒ„์˜ ๊ฒฝ์šฐ ๋‚ด๋ถ€ ๋„คํŠธ์›Œํฌ์™€ ๋ถ„๋ฆฌํ•˜๊ณ  ์ทจ์•ฝ์  ์ ๊ฒ€, ์ ‘๊ทผํ†ต์ œ, ์ธ์ฆ, ์ •๋ณด ์ˆ˜์ง‘ยท์ €์žฅยท๊ณต๊ฐœ ์ ˆ์ฐจ ๋“ฑ ๊ฐ•ํ™”๋œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 19,"pass": 47,"total": 76,"manual": 0}},"2.10.4": {"name": "์ „์ž๊ฑฐ๋ž˜ ๋ฐ ํ•€ํ…Œํฌ ๋ณด์•ˆ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.10.4 ์ „์ž๊ฑฐ๋ž˜ ๋ฐ ํ•€ํ…Œํฌ ๋ณด์•ˆ","Subdomain": "2.10. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ๋ณด์•ˆ๊ด€๋ฆฌ","AuditEvidence": ["์ „์ž๊ฑฐ๋ž˜ ๋ฐ ํ•€ํ…Œํฌ ์„œ๋น„์Šค ๋ณดํ˜ธ๋Œ€์ฑ…","๊ฒฐ์ œ์‹œ์Šคํ…œ ์—ฐ๊ณ„ ์‹œ ๋ณด์•ˆ์„ฑ ๊ฒ€ํ†  ๊ฒฐ๊ณผ"],"AuditChecklist": ["์ „์ž๊ฑฐ๋ž˜ ๋ฐ ํ•€ํ…Œํฌ ์„œ๋น„์Šค๋ฅผ ์ œ๊ณตํ•˜๋Š” ๊ฒฝ์šฐ ๊ฑฐ๋ž˜์˜ ์•ˆ์ „์„ฑ๊ณผ ์‹ ๋ขฐ์„ฑ ํ™•๋ณด๋ฅผ ์œ„ํ•œ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ „์ž๊ฑฐ๋ž˜ ๋ฐ ํ•€ํ…Œํฌ ์„œ๋น„์Šค ์ œ๊ณต์„ ์œ„ํ•˜์—ฌ ๊ฒฐ์ œ์‹œ์Šคํ…œ ๋“ฑ ์™ธ๋ถ€ ์‹œ์Šคํ…œ๊ณผ ์—ฐ๊ณ„ํ•˜๋Š” ๊ฒฝ์šฐ ์†ก์ˆ˜์‹ ๋˜๋Š” ๊ด€๋ จ ์ •๋ณด์˜ ๋ณดํ˜ธ๋ฅผ ์œ„ํ•œ ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์•ˆ์ „์„ฑ์„ ์ ๊ฒ€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ „์ž๊ฒฐ์ œ๋Œ€ํ–‰์—…์ฒด์™€ ์œ„ํƒ ๊ณ„์•ฝ์„ ๋งบ๊ณ  ์—ฐ๊ณ„๋ฅผ ํ•˜์˜€์œผ๋‚˜, ์ ์ ˆํ•œ ์ธ์ฆ ๋ฐ ์ ‘๊ทผ์ œํ•œ ์—†์ด ํŠน์ • URL์„ ํ†ตํ•˜์—ฌ ๊ฒฐ์ œ ๊ด€๋ จ ์ •๋ณด๊ฐ€ ๋ชจ๋‘ ํ‰๋ฌธ์œผ๋กœ ์ „์†ก๋˜๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ „์ž๊ฒฐ์ œ๋Œ€ํ–‰์—…์ฒด์™€ ์™ธ๋ถ€ ์—ฐ๊ณ„ ์‹œ์Šคํ…œ์ด ์ „์šฉ๋ง์œผ๋กœ ์—ฐ๊ฒฐ๋˜์–ด ์žˆ์œผ๋‚˜, ํ•ด๋‹น ์—ฐ๊ณ„ ์‹œ์Šคํ…œ์—์„œ ๋‚ด๋ถ€ ์—…๋ฌด ์‹œ์Šคํ…œ์œผ๋กœ์˜ ์ ‘๊ทผ์ด ์นจ์ž…์ฐจ๋‹จ์‹œ์Šคํ…œ ๋“ฑ์œผ๋กœ ์ ์ ˆํžˆ ํ†ต์ œ๋˜์ง€ ์•Š๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋‚ด๋ถ€ ์ง€์นจ์—๋Š” ์™ธ๋ถ€ ํ•€ํ…Œํฌ ์„œ๋น„์Šค ์—ฐ๊ณ„ ์‹œ ์ •๋ณด๋ณดํ˜ธํŒ€์˜ ๋ณด์•ˆ์„ฑ ๊ฒ€ํ† ๋ฅผ ๋ฐ›๋„๋ก ๋˜์–ด ์žˆ์œผ๋‚˜, ์ตœ๊ทผ์— ์‹ ๊ทœ ํ•€ํ…Œํฌ ์„œ๋น„์Šค๋ฅผ ์—ฐ๊ณ„ํ•˜๋ฉด์„œ ์ผ์ •์ƒ ์ด์œ ๋กœ ๋ณด์•ˆ์„ฑ ๊ฒ€ํ† ๋ฅผ ์ˆ˜ํ–‰ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์ „์ž๊ฑฐ๋ž˜ ๋ฐ ํ•€ํ…Œํฌ ์„œ๋น„์Šค ์ œ๊ณต ์‹œ ์ •๋ณด์œ ์ถœ์ด๋‚˜ ๋ฐ์ดํ„ฐ ์กฐ์ž‘ยท์‚ฌ๊ธฐ ๋“ฑ์˜ ์นจํ•ด์‚ฌ๊ณ  ์˜ˆ๋ฐฉ์„ ์œ„ํ•˜์—ฌ ์ธ์ฆยท์•”ํ˜ธํ™” ๋“ฑ์˜ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ , ๊ฒฐ์ œ์‹œ์Šคํ…œ ๋“ฑ ์™ธ๋ถ€ ์‹œ์Šคํ…œ๊ณผ ์—ฐ๊ณ„ํ•  ๊ฒฝ์šฐ ์•ˆ์ „์„ฑ์„ ์ ๊ฒ€ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.10.5": {"name": "์ •๋ณด์ „์†ก ๋ณด์•ˆ","checks": {"elb_ssl_listeners": "FAIL","elbv2_ssl_listeners": "FAIL","elb_insecure_ssl_ciphers": "PASS","elbv2_insecure_ssl_ciphers": "PASS","rds_instance_transport_encrypted": "FAIL","s3_bucket_secure_transport_policy": "FAIL","glue_database_connections_ssl_enabled": null,"cloudfront_distributions_https_enabled": null,"sns_subscription_not_using_http_endpoints": "PASS","kafka_cluster_in_transit_encryption_enabled": null,"apigateway_restapi_client_certificate_enabled": "FAIL","kafka_cluster_mutual_tls_authentication_enabled": null,"directoryservice_radius_server_security_protocol": null,"cloudfront_distributions_using_deprecated_ssl_protocols": null,"elasticache_redis_cluster_in_transit_encryption_enabled": null,"opensearch_service_domains_https_communications_enforced": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.10.5 ์ •๋ณด์ „์†ก ๋ณด์•ˆ","Subdomain": "2.10. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ๋ณด์•ˆ๊ด€๋ฆฌ","AuditEvidence": ["์ •๋ณด์ „์†ก ํ˜‘์•ฝ์„œ ๋˜๋Š” ๊ณ„์•ฝ์„œ","์ •๋ณด์ „์†ก ๊ธฐ์ˆ ํ‘œ์ค€","์ •๋ณด์ „์†ก ๊ด€๋ จ ๊ตฌ์„ฑ๋„, ์ธํ„ฐํŽ˜์ด์Šค ์ •์˜์„œ"],"AuditChecklist": ["์™ธ๋ถ€ ์กฐ์ง์— ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด๋ฅผ ์ „์†กํ•  ๊ฒฝ์šฐ ์•ˆ์ „ํ•œ ์ „์†ก ์ •์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์—…๋ฌด์ƒ ์กฐ์ง ๊ฐ„ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด๋ฅผ ์ƒํ˜ธ๊ตํ™˜ํ•˜๋Š” ๊ฒฝ์šฐ ์•ˆ์ „ํ•œ ์ „์†ก์„ ์œ„ํ•œ ํ˜‘์•ฝ์ฒด๊ฒฐ ๋“ฑ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋Œ€์™ธ ๊ธฐ๊ด€๊ณผ ์—ฐ๊ณ„ ์‹œ ์ „์šฉ๋ง ๋˜๋Š” VPN์„ ์ ์šฉํ•˜๊ณ  ์ค‘๊ณ„์„œ๋ฒ„์™€ ์ธ์ฆ์„œ ์ ์šฉ ๋“ฑ์„ ํ†ตํ•˜์—ฌ ์•ˆ์ „ํ•˜๊ฒŒ ์ •๋ณด๋ฅผ ์ „์†กํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์™ธ๋ถ€ ๊ธฐ๊ด€๋ณ„ ์—ฐ๊ณ„ ์‹œ๊ธฐ, ๋ฐฉ์‹, ๋‹ด๋‹น์ž ๋ฐ ์ฑ…์ž„์ž, ์—ฐ๊ณ„ ์ •๋ณด, ๋ฒ•์  ๊ทผ๊ฑฐ ๋“ฑ์— ๋Œ€ํ•œ ํ˜„ํ™ฉ๊ด€๋ฆฌ๊ฐ€ ์ ์ ˆํžˆ ์ด๋ฃจ์–ด์ง€์ง€ ์•Š๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ค‘๊ณ„๊ณผ์ •์—์„œ์˜ ์•”ํ˜ธ ํ•ด์ œ ๊ตฌ๊ฐ„ ๋˜๋Š” ์ทจ์•ฝํ•œ ์•”ํ˜ธํ™” ์•Œ๊ณ ๋ฆฌ์ฆ˜(DES, 3DES) ์‚ฌ์šฉ ๋“ฑ์— ๋Œ€ํ•œ ๋ณด์•ˆ์„ฑ ๊ฒ€ํ† , ๋ณด์•ˆํ‘œ์ค€ ๋ฐ ์กฐ์น˜๋ฐฉ์•ˆ ์ˆ˜๋ฆฝ ๋“ฑ์— ๋Œ€ํ•œ ํ˜‘์˜๊ฐ€ ์ดํ–‰๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "๋‹ค๋ฅธ ์กฐ์ง์— ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด๋ฅผ ์ „์†กํ•  ๊ฒฝ์šฐ ์•ˆ์ „ํ•œ ์ „์†ก ์ •์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์กฐ์ง ๊ฐ„ ํ•ฉ์˜๋ฅผ ํ†ตํ•˜์—ฌ ๊ด€๋ฆฌ ์ฑ…์ž„, ์ „์†ก๋ฐฉ๋ฒ•, ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด ๋ณดํ˜ธ๋ฅผ ์œ„ํ•œ ๊ธฐ์ˆ ์  ๋ณดํ˜ธ์กฐ์น˜ ๋“ฑ์„ ํ˜‘์•ฝํ•˜๊ณ  ์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 5,"pass": 3,"total": 17,"manual": 0}},"2.10.6": {"name": "์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ๊ธฐ ๋ณด์•ˆ","checks": {"workspaces_volume_encryption_enabled": null,"appstream_fleet_maximum_session_duration": null,"appstream_fleet_session_disconnect_timeout": null,"workspaces_vpc_2private_1public_subnets_nat": null,"appstream_fleet_session_idle_disconnect_timeout": null,"appstream_fleet_default_internet_access_disabled": null},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.10.6 ์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ๊ธฐ ๋ณด์•ˆ","Subdomain": "2.10. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ๋ณด์•ˆ๊ด€๋ฆฌ","AuditEvidence": ["์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ ๋ณด์•ˆํ†ต์ œ ์ง€์นจ ๋ฐ ์ ˆ์ฐจ","์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ ๋“ฑ๋กํ˜„ํ™ฉ","์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ ๋ณด์•ˆ์„ค์ •","์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ ๊ธฐ๊ธฐ์ธ์ฆ ๋ฐ ์Šน์ธ ์ด๋ ฅ","์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ ๋ณด์•ˆ์ ๊ฒ€ ํ˜„ํ™ฉ"],"AuditChecklist": ["PC, ๋…ธํŠธ๋ถ, ๊ฐ€์ƒPC, ํƒœ๋ธ”๋ฆฟ ๋“ฑ ์—…๋ฌด์— ์‚ฌ์šฉ๋˜๋Š” ๋‹จ๋ง๊ธฐ์— ๋Œ€ํ•˜์—ฌ ๊ธฐ๊ธฐ์ธ์ฆ, ์Šน์ธ, ์ ‘๊ทผ๋ฒ”์œ„ ์„ค์ •, ๊ธฐ๊ธฐ ๋ณด์•ˆ์„ค์ • ๋“ฑ์˜ ๋ณด์•ˆ ํ†ต์ œ ์ •์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ๋ฅผ ํ†ตํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด๊ฐ€ ์œ ์ถœ๋˜๋Š” ๊ฒƒ์„ ๋ฐฉ์ง€ํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์ž๋ฃŒ๊ณต์œ ํ”„๋กœ๊ทธ๋žจ ์‚ฌ์šฉ ๊ธˆ์ง€, ๊ณต์œ ์„ค์ • ์ œํ•œ, ๋ฌด์„ ๋ง ์ด์šฉ ํ†ต์ œ ๋“ฑ์˜ ์ •์ฑ…์„ ์ˆ˜๋ฆฝ ๋ฐ ์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์—…๋ฌด์šฉ ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ์˜ ๋ถ„์‹ค, ๋„๋‚œ ๋“ฑ์œผ๋กœ ์ธํ•œ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด์˜ ์œ ยท๋…ธ์ถœ์„ ๋ฐฉ์ง€ํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ๋ณด์•ˆ๋Œ€์ฑ…์„ ์ ์šฉํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ๊ธฐ์— ๋Œ€ํ•œ ์ ‘๊ทผํ†ต์ œ ๋Œ€์ฑ…์˜ ์ ์ ˆ์„ฑ์— ๋Œ€ํ•˜์—ฌ ์ฃผ๊ธฐ์ ์œผ๋กœ ์ ๊ฒ€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์—…๋ฌด์ ์ธ ๋ชฉ์ ์œผ๋กœ ๋…ธํŠธ๋ถ, ํƒœ๋ธ”๋ฆฟPC ๋“ฑ ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ๋ฅผ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์—…๋ฌด์šฉ ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ์— ๋Œ€ํ•œ ํ—ˆ์šฉ ๊ธฐ์ค€, ์‚ฌ์šฉ ๋ฒ”์œ„, ์Šน์ธ ์ ˆ์ฐจ, ์ธ์ฆ ๋ฐฉ๋ฒ• ๋“ฑ์— ๋Œ€ํ•œ ์ •์ฑ…์ด ์ˆ˜๋ฆฝ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ ๋ณด์•ˆ๊ด€๋ฆฌ ์ง€์นจ์—์„œ๋Š” ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ์˜ ์—…๋ฌด์šฉ ์‚ฌ์šฉ์„ ์›์น™์ ์œผ๋กœ ๊ธˆ์ง€ํ•˜๊ณ  ํ•„์š”์‹œ ์Šน์ธ ์ ˆ์ฐจ๋ฅผ ํ†ตํ•˜์—ฌ ์ œํ•œ๋œ ๊ธฐ๊ฐ„ ๋™์•ˆ ํ—ˆ๊ฐ€๋œ ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ๋งŒ ์‚ฌ์šฉํ•˜๋„๋ก ์ •ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ํ—ˆ๊ฐ€๋œ ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ๊ฐ€ ์‹๋ณ„ยท๊ด€๋ฆฌ๋˜์ง€ ์•Š๊ณ  ์Šน์ธ๋˜์ง€ ์•Š์€ ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ์—์„œ๋„ ๋‚ด๋ถ€ ์ •๋ณด์‹œ์Šคํ…œ ์ ‘์†์ด ๊ฐ€๋Šฅํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๊ฐœ์ธ์ •๋ณด ์ฒ˜๋ฆฌ์—…๋ฌด์— ์ด์šฉ๋˜๋Š” ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ์— ๋Œ€ํ•˜์—ฌ ๋น„๋ฐ€๋ฒˆํ˜ธ ์„ค์ • ๋“ฑ ๋„๋‚œยท๋ถ„์‹ค์— ๋Œ€ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์ด ์ ์šฉ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๋‚ด๋ถ€ ๊ทœ์ •์—์„œ๋Š” ์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ์˜ ๊ณต์œ ํด๋” ์‚ฌ์šฉ์„ ๊ธˆ์ง€ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ด์— ๋Œ€ํ•œ ์ฃผ๊ธฐ์ ์ธ ์ ๊ฒ€์ด ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ์ง€ ์•Š์•„ ๋‹ค์ˆ˜์˜ ์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ์—์„œ ๊ณผ๋„ํ•˜๊ฒŒ ๊ณต์œ ํด๋”๋ฅผ ์„ค์ •ํ•˜์—ฌ ์‚ฌ์šฉํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ)"]}],"description": "PC, ๋ชจ๋ฐ”์ผ ๊ธฐ๊ธฐ ๋“ฑ ๋‹จ๋ง๊ธฐ๊ธฐ๋ฅผ ์—…๋ฌด ๋ชฉ์ ์œผ๋กœ ๋„คํŠธ์›Œํฌ์— ์—ฐ๊ฒฐํ•  ๊ฒฝ์šฐ ๊ธฐ๊ธฐ ์ธ์ฆ ๋ฐ ์Šน์ธ, ์ ‘๊ทผ ๋ฒ”์œ„, ๊ธฐ๊ธฐ ๋ณด์•ˆ์„ค์ • ๋“ฑ์˜ ์ ‘๊ทผํ†ต์ œ ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์ฃผ๊ธฐ์ ์œผ๋กœ ์ ๊ฒ€ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 6,"manual": 0}},"2.10.7": {"name": "๋ณด์กฐ์ €์žฅ๋งค์ฒด ๊ด€๋ฆฌ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.10.7 ๋ณด์กฐ์ €์žฅ๋งค์ฒด ๊ด€๋ฆฌ","Subdomain": "2.10. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ๋ณด์•ˆ๊ด€๋ฆฌ","AuditEvidence": ["๋ณด์กฐ์ €์žฅ๋งค์ฒด(USB, CD ๋“ฑ) ์ฐจ๋‹จ ์ •์ฑ…","๋ณด์กฐ์ €์žฅ๋งค์ฒด ๊ด€๋ฆฌ๋Œ€์žฅ","๋ณด์กฐ์ €์žฅ๋งค์ฒด ์‹คํƒœ์ ๊ฒ€ ์ด๋ ฅ"],"AuditChecklist": ["์™ธ์žฅํ•˜๋“œ, USB๋ฉ”๋ชจ๋ฆฌ, CD ๋“ฑ ๋ณด์กฐ์ €์žฅ๋งค์ฒด ์ทจ๊ธ‰(์‚ฌ์šฉ), ๋ณด๊ด€, ํ๊ธฐ, ์žฌ์‚ฌ์šฉ์— ๋Œ€ํ•œ ์ •์ฑ… ๋ฐ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ณด์กฐ์ €์žฅ๋งค์ฒด ๋ณด์œ ํ˜„ํ™ฉ, ์‚ฌ์šฉ ๋ฐ ๊ด€๋ฆฌ์‹คํƒœ๋ฅผ ์ฃผ๊ธฐ์ ์œผ๋กœ ์ ๊ฒ€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ฃผ์š” ์ •๋ณด์‹œ์Šคํ…œ์ด ์œ„์น˜ํ•œ ํ†ต์ œ๊ตฌ์—ญ, ์ค‘์š” ์ œํ•œ๊ตฌ์—ญ ๋“ฑ์—์„œ ๋ณด์กฐ์ €์žฅ๋งค์ฒด ์‚ฌ์šฉ์„ ์ œํ•œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ณด์กฐ์ €์žฅ๋งค์ฒด๋ฅผ ํ†ตํ•œ ์•…์„ฑ์ฝ”๋“œ ๊ฐ์—ผ ๋ฐ ์ค‘์š”์ •๋ณด ์œ ์ถœ ๋ฐฉ์ง€๋ฅผ ์œ„ํ•œ ๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด ๋˜๋Š” ์ค‘์š”์ •๋ณด๊ฐ€ ํฌํ•จ๋œ ๋ณด์กฐ์ €์žฅ๋งค์ฒด๋ฅผ ์ž ๊ธˆ์žฅ์น˜๊ฐ€ ์žˆ๋Š” ์•ˆ์ „ํ•œ ์žฅ์†Œ์— ๋ณด๊ด€ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ํ†ต์ œ๊ตฌ์—ญ์ธ ์„œ๋ฒ„์‹ค์—์„œ์˜ ๋ณด์กฐ์ €์žฅ๋งค์ฒด ์‚ฌ์šฉ์„ ์ œํ•œํ•˜๋Š” ์ •์ฑ…์„ ์ˆ˜๋ฆฝํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์˜ˆ์™ธ ์Šน์ธ ์ ˆ์ฐจ๋ฅผ ์ค€์ˆ˜ํ•˜์ง€ ์•Š๊ณ  ๋ณด์กฐ์ €์žฅ๋งค์ฒด๋ฅผ ์‚ฌ์šฉํ•œ ์ด๋ ฅ์ด ๋‹ค์ˆ˜ ํ™•์ธ๋˜์—ˆ์œผ๋ฉฐ, ๋ณด์กฐ์ €์žฅ๋งค์ฒด ๊ด€๋ฆฌ์‹คํƒœ์— ๋Œ€ํ•œ ์ฃผ๊ธฐ์  ์ ๊ฒ€์ด ์‹ค์‹œ๋˜์ง€ ์•Š์•„ ๋ณด์กฐ์ €์žฅ๋งค์ฒด ๊ด€๋ฆฌ๋Œ€์žฅ์˜ ํ˜„ํ–‰ํ™”๊ฐ€ ๋ฏธํกํ•œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๊ฐœ์ธ์ •๋ณด๊ฐ€ ํฌํ•จ๋œ ๋ณด์กฐ์ €์žฅ๋งค์ฒด๋ฅผ ์ž ๊ธˆ์žฅ์น˜๊ฐ€ ์žˆ๋Š” ์•ˆ์ „ํ•œ ์žฅ์†Œ์— ๋ณด๊ด€ํ•˜์ง€ ์•Š๊ณ  ์‚ฌ๋ฌด์‹ค ์„œ๋ž ๋“ฑ์— ๋ฐฉ์น˜ํ•˜๊ณ  ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ณด์กฐ์ €์žฅ๋งค์ฒด ํ†ต์ œ ์†”๋ฃจ์…˜์„ ๋„์ž…ยท์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ผ๋ถ€ ์‚ฌ์šฉ์ž์— ๋Œ€ํ•˜์—ฌ ์ ์ ˆํ•œ ์Šน์ธ ์ ˆ์ฐจ ์—†์ด ์˜ˆ์™ธ์ฒ˜๋ฆฌ๋˜์–ด ์“ฐ๊ธฐ ๋“ฑ์ด ํ—ˆ์šฉ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์ „์‚ฐ์‹ค์— ์œ„์น˜ํ•œ ์ผ๋ถ€ ๊ณต์šฉ PC ๋ฐ ์ „์‚ฐ์žฅ๋น„์—์„œ ์ผ๋ฐ˜ USB๋ฉ”๋ชจ๋ฆฌ์— ๋Œ€ํ•œ ์“ฐ๊ธฐ๊ฐ€ ๊ฐ€๋Šฅํ•œ ์ƒํ™ฉ์ด๋‚˜ ๋งค์ฒด ๋ฐ˜์ž… ๋ฐ ์‚ฌ์šฉ ์ œํ•œ, ์‚ฌ์šฉ์ด๋ ฅ ๊ธฐ๋ก ๋ฐ ๊ฒ€ํ†  ๋“ฑ ํ†ต์ œ๊ฐ€ ์ ์šฉ๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ10์กฐ(๋ฌผ๋ฆฌ์  ์•ˆ์ „์กฐ์น˜)"]}],"description": "๋ณด์กฐ์ €์žฅ๋งค์ฒด๋ฅผ ํ†ตํ•˜์—ฌ ๊ฐœ์ธ์ •๋ณด ๋˜๋Š” ์ค‘์š”์ •๋ณด์˜ ์œ ์ถœ์ด ๋ฐœ์ƒํ•˜๊ฑฐ๋‚˜ ์•…์„ฑ์ฝ”๋“œ๊ฐ€ ๊ฐ์—ผ๋˜์ง€ ์•Š๋„๋ก ๊ด€๋ฆฌ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ , ๊ฐœ์ธ์ •๋ณด ๋˜๋Š” ์ค‘์š”์ •๋ณด๊ฐ€ ํฌํ•จ๋œ ๋ณด์กฐ์ €์žฅ ๋งค์ฒด๋Š” ์•ˆ์ „ํ•œ ์žฅ์†Œ์— ๋ณด๊ด€ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.10.8": {"name": "ํŒจ์น˜๊ด€๋ฆฌ","checks": {"ssm_managed_compliant_patching": "FAIL","kafka_cluster_uses_latest_version": null,"ec2_instance_account_imdsv2_enabled": null,"redshift_cluster_automatic_upgrades": null,"eks_cluster_uses_a_supported_version": null,"ec2_instance_older_than_specific_days": "FAIL","rds_instance_deprecated_engine_version": "PASS","rds_cluster_minor_version_upgrade_enabled": "PASS","dms_instance_minor_version_upgrade_enabled": null,"rds_instance_minor_version_upgrade_enabled": "PASS","awslambda_function_using_supported_runtimes": "FAIL","elasticache_redis_cluster_auto_minor_version_upgrades": null,"cloudfront_distributions_using_deprecated_ssl_protocols": null,"opensearch_service_domains_updated_to_the_latest_service_software_version": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.10.8 ํŒจ์น˜๊ด€๋ฆฌ","Subdomain": "2.10. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ๋ณด์•ˆ๊ด€๋ฆฌ","AuditEvidence": ["ํŒจ์น˜ ์ ์šฉ ๊ด€๋ฆฌ ์ •์ฑ…ยท์ ˆ์ฐจ","์‹œ์Šคํ…œ๋ณ„ ํŒจ์น˜ ์ ์šฉ ํ˜„ํ™ฉ","ํŒจ์น˜ ์ ์šฉ ๊ด€๋ จ ์˜ํ–ฅ๋„ ๋ถ„์„ ๊ฒฐ๊ณผ"],"AuditChecklist": ["์„œ๋ฒ„, ๋„คํŠธ์›Œํฌ์‹œ์Šคํ…œ, ๋ณด์•ˆ์‹œ์Šคํ…œ, PC ๋“ฑ ์ž์‚ฐ๋ณ„ ํŠน์„ฑ ๋ฐ ์ค‘์š”๋„์— ๋”ฐ๋ผ ์šด์˜์ฒด์ œ(OS)์™€ ์†Œํ”„ํŠธ์›จ์–ด์˜ ํŒจ์น˜๊ด€๋ฆฌ ์ •์ฑ… ๋ฐ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ฃผ์š” ์„œ๋ฒ„, ๋„คํŠธ์›Œํฌ์‹œ์Šคํ…œ, ๋ณด์•ˆ์‹œ์Šคํ…œ ๋“ฑ์˜ ๊ฒฝ์šฐ ์„ค์น˜๋œ OS, ์†Œํ”„ํŠธ์›จ์–ด ํŒจ์น˜ ์ ์šฉ ํ˜„ํ™ฉ์„ ์ฃผ๊ธฐ์ ์œผ๋กœ ๊ด€๋ฆฌํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์„œ๋น„์Šค ์˜ํ–ฅ๋„ ๋“ฑ์— ๋”ฐ๋ผ ์ทจ์•ฝ์ ์„ ์กฐ์น˜ํ•˜๊ธฐ ์œ„ํ•œ ์ตœ์‹ ์˜ ํŒจ์น˜ ์ ์šฉ์ด ์–ด๋ ค์šด ๊ฒฝ์šฐ ๋ณด์™„๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ฃผ์š” ์„œ๋ฒ„, ๋„คํŠธ์›Œํฌ์‹œ์Šคํ…œ, ๋ณด์•ˆ์‹œ์Šคํ…œ ๋“ฑ์˜ ๊ฒฝ์šฐ ๊ณต๊ฐœ ์ธํ„ฐ๋„ท ์ ‘์†์„ ํ†ตํ•œ ํŒจ์น˜๋ฅผ ์ œํ•œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","ํŒจ์น˜๊ด€๋ฆฌ์‹œ์Šคํ…œ์„ ํ™œ์šฉํ•˜๋Š” ๊ฒฝ์šฐ ์ ‘๊ทผํ†ต์ œ ๋“ฑ ์ถฉ๋ถ„ํ•œ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ผ๋ถ€ ์‹œ์Šคํ…œ์—์„œ ํƒ€๋‹นํ•œ ์‚ฌ์œ ๋‚˜ ์ฑ…์ž„์ž ์Šน์ธ ์—†์ด OSํŒจ์น˜๊ฐ€ ์žฅ๊ธฐ๊ฐ„ ์ ์šฉ๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ผ๋ถ€ ์‹œ์Šคํ…œ์— ์„œ๋น„์Šค ์ง€์›์ด ์ข…๋ฃŒ(EOS)๋œ OS๋ฒ„์ „์„ ์‚ฌ์šฉ ์ค‘์ด๋‚˜, ์ด์— ๋”ฐ๋ฅธ ๋Œ€์‘๊ณ„ํš์ด๋‚˜ ๋ณด์™„๋Œ€์ฑ…์ด ์ˆ˜๋ฆฝ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์ƒ์šฉ ์†Œํ”„ํŠธ์›จ์–ด ๋ฐ OS์— ๋Œ€ํ•ด์„œ๋Š” ์ตœ์‹  ํŒจ์น˜๊ฐ€ ์ ์šฉ๋˜๊ณ  ์žˆ์œผ๋‚˜, ์˜คํ”ˆ์†Œ์Šค ํ”„๋กœ๊ทธ๋žจ(openssl, openssh, Apache ๋“ฑ)์— ๋Œ€ํ•ด์„œ๋Š” ์ตœ์‹  ํŒจ์น˜๋ฅผ ํ™•์ธํ•˜๊ณ  ์ ์šฉํ•˜๋Š” ์ ˆ์ฐจ ๋ฐ ๋‹ด๋‹น์ž๊ฐ€ ์ง€์ •๋˜์–ด ์žˆ์ง€ ์•Š์•„ ์ตœ์‹  ๋ณด์•ˆํŒจ์น˜๊ฐ€ ์ ์šฉ๋˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ9์กฐ(์•…์„ฑํ”„๋กœ๊ทธ๋žจ ๋“ฑ ๋ฐฉ์ง€)"]}],"description": "์†Œํ”„ํŠธ์›จ์–ด, ์šด์˜์ฒด์ œ, ๋ณด์•ˆ์‹œ์Šคํ…œ ๋“ฑ์˜ ์ทจ์•ฝ์ ์œผ๋กœ ์ธํ•œ ์นจํ•ด์‚ฌ๊ณ ๋ฅผ ์˜ˆ๋ฐฉํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์ตœ์‹  ํŒจ์น˜๋ฅผ ์ ์šฉํ•˜์—ฌ์•ผ ํ•œ๋‹ค. ๋‹ค๋งŒ ์„œ๋น„์Šค ์˜ํ–ฅ์„ ๊ฒ€ํ† ํ•˜์—ฌ ์ตœ์‹  ํŒจ์น˜ ์ ์šฉ์ด ์–ด๋ ค์šธ ๊ฒฝ์šฐ ๋ณ„๋„์˜ ๋ณด์™„๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜์—ฌ ์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 3,"pass": 3,"total": 14,"manual": 0}},"2.10.9": {"name": "์•…์„ฑ์ฝ”๋“œ ํ†ต์ œ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.10.9 ์•…์„ฑ์ฝ”๋“œ ํ†ต์ œ","Subdomain": "2.10. ์‹œ์Šคํ…œ ๋ฐ ์„œ๋น„์Šค ๋ณด์•ˆ๊ด€๋ฆฌ","AuditEvidence": ["์•…์„ฑํ”„๋กœ๊ทธ๋žจ ๋Œ€์‘ ์ง€์นจยท์ ˆ์ฐจยท๋งค๋‰ด์–ผ","๋ฐฑ์‹ ํ”„๋กœ๊ทธ๋žจ ์„ค์น˜ ํ˜„ํ™ฉ","๋ฐฑ์‹ ํ”„๋กœ๊ทธ๋žจ ์„ค์ • ํ™”๋ฉด","์•…์„ฑํ”„๋กœ๊ทธ๋žจ ๋Œ€์‘ ์ด๋ ฅ(๋Œ€์‘ ๋ณด๊ณ ์„œ ๋“ฑ)"],"AuditChecklist": ["๋ฐ”์ด๋Ÿฌ์Šค, ์›œ, ํŠธ๋กœ์ด๋ชฉ๋งˆ, ๋žœ์„ฌ์›จ์–ด ๋“ฑ์˜ ์•…์„ฑ์ฝ”๋“œ๋กœ๋ถ€ํ„ฐ ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ์—…๋ฌด์šฉ๋‹จ๋ง๊ธฐ ๋“ฑ์„ ๋ณดํ˜ธํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ฐฑ์‹  ์†Œํ”„ํŠธ์›จ์–ด ๋“ฑ ๋ณด์•ˆํ”„๋กœ๊ทธ๋žจ์„ ํ†ตํ•˜์—ฌ ์ตœ์‹  ์•…์„ฑ์ฝ”๋“œ ์˜ˆ๋ฐฉยทํƒ์ง€ ํ™œ๋™์„ ์ง€์†์ ์œผ๋กœ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ฐฑ์‹  ์†Œํ”„ํŠธ์›จ์–ด ๋“ฑ ๋ณด์•ˆํ”„๋กœ๊ทธ๋žจ์€ ์ตœ์‹ ์˜ ์ƒํƒœ๋กœ ์œ ์ง€ํ•˜๊ณ  ํ•„์š”์‹œ ๊ธด๊ธ‰ ๋ณด์•ˆ ์—…๋ฐ์ดํŠธ๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์•…์„ฑ์ฝ”๋“œ ๊ฐ์—ผ ๋ฐœ๊ฒฌ ์‹œ ์•…์„ฑ์ฝ”๋“œ ํ™•์‚ฐ ๋ฐ ํ”ผํ•ด ์ตœ์†Œํ™” ๋“ฑ์˜ ๋Œ€์‘์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝยท์ดํ–‰ํ•˜๊ณ ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์ผ๋ถ€ PC ๋ฐ ์„œ๋ฒ„์— ๋ฐฑ์‹ ์ด ์„ค์น˜๋˜์–ด ์žˆ์ง€ ์•Š๊ฑฐ๋‚˜, ๋ฐฑ์‹  ์—”์ง„์ด ์žฅ๊ธฐ๊ฐ„ ์ตœ์‹  ๋ฒ„์ „์œผ๋กœ ์—…๋ฐ์ดํŠธ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋ฐฑ์‹  ํ”„๋กœ๊ทธ๋žจ์˜ ํ™˜๊ฒฝ์„ค์ •(์‹ค์‹œ๊ฐ„ ๊ฒ€์‚ฌ, ์˜ˆ์•ฝ๊ฒ€์‚ฌ, ์—…๋ฐ์ดํŠธ ์„ค์ • ๋“ฑ)์„ ์ด์šฉ์ž๊ฐ€ ์ž„์˜๋กœ ๋ณ€๊ฒฝํ•  ์ˆ˜ ์žˆ์Œ์—๋„ ๊ทธ์— ๋”ฐ๋ฅธ ์ถ”๊ฐ€ ๋ณดํ˜ธ๋Œ€์ฑ…์ด ์ˆ˜๋ฆฝ๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ฐฑ์‹  ์ค‘์•™๊ด€๋ฆฌ์‹œ์Šคํ…œ์— ์ ‘๊ทผํ†ต์ œ ๋“ฑ ๋ณดํ˜ธ๋Œ€์ฑ…์ด ๋ฏธ๋น„ํ•˜์—ฌ ์ค‘์•™๊ด€๋ฆฌ์‹œ์Šคํ…œ์„ ํ†ตํ•œ ์นจํ•ด์‚ฌ๊ณ ๋ฐœ์ƒ ๊ฐ€๋Šฅ์„ฑ์ด ์žˆ๋Š” ๊ฒฝ์šฐ ๋˜๋Š” ๋ฐฑ์‹  ํŒจํ„ด์— ๋Œ€ํ•œ ๋ฌด๊ฒฐ์„ฑ ๊ฒ€์ฆ์„ ํ•˜์ง€ ์•Š์•„ ์•…์˜์ ์ธ ์‚ฌ์šฉ์ž์— ์˜ํ•œ ์•…์„ฑ์ฝ”๋“œ ์ „ํŒŒ ๊ฐ€๋Šฅ์„ฑ์ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์ผ๋ถ€ ๋‚ด๋ถ€๋ง PC ๋ฐ ์„œ๋ฒ„์—์„œ ๋‹ค์ˆ˜์˜ ์•…์„ฑ์ฝ”๋“œ ๊ฐ์—ผ์ด๋ ฅ์ด ํ™•์ธ๋˜์—ˆ์œผ๋‚˜, ๊ฐ์—ผ ํ˜„ํ™ฉ, ๊ฐ์—ผ ๊ฒฝ๋กœ ๋ฐ ์›์ธ ๋ถ„์„, ๊ทธ์— ๋”ฐ๋ฅธ ์กฐ์น˜๋‚ด์—ญ ๋“ฑ์ด ํ™•์ธ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ9์กฐ(์•…์„ฑํ”„๋กœ๊ทธ๋žจ ๋“ฑ ๋ฐฉ์ง€)"]}],"description": "๋ฐ”์ด๋Ÿฌ์Šคยท์›œยทํŠธ๋กœ์ด๋ชฉ๋งˆยท๋žœ์„ฌ์›จ์–ด ๋“ฑ์˜ ์•…์„ฑ์ฝ”๋“œ๋กœ๋ถ€ํ„ฐ ๊ฐœ์ธ์ •๋ณด ๋ฐ ์ค‘์š”์ •๋ณด, ์ •๋ณด์‹œ์Šคํ…œ ๋ฐ ์—…๋ฌด์šฉ ๋‹จ๋ง๊ธฐ ๋“ฑ์„ ๋ณดํ˜ธํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์•…์„ฑ์ฝ”๋“œ ์˜ˆ๋ฐฉยทํƒ์ง€ยท๋Œ€์‘ ๋“ฑ์˜ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝ ๋ฐ ์ดํ–‰ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.11.1": {"name": "์‚ฌ๊ณ  ์˜ˆ๋ฐฉ ๋ฐ ๋Œ€์‘์ฒด๊ณ„ ๊ตฌ์ถ•","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.11.1 ์‚ฌ๊ณ  ์˜ˆ๋ฐฉ ๋ฐ ๋Œ€์‘์ฒด๊ณ„ ๊ตฌ์ถ•","Subdomain": "2.11. ์‚ฌ๊ณ  ์˜ˆ๋ฐฉ ๋ฐ ๋Œ€์‘","AuditEvidence": ["์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘ ์ง€์นจยท์ ˆ์ฐจยท๋งค๋‰ด์–ผ","์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘ ์กฐ์ง๋„ ๋ฐ ๋น„์ƒ์—ฐ๋ฝ๋ง","๋ณด์•ˆ๊ด€์ œ์„œ๋น„์Šค ๊ณ„์•ฝ์„œ(SLA ๋“ฑ)"],"AuditChecklist": ["์นจํ•ด์‚ฌ๊ณ  ๋ฐ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์‚ฌ๊ณ ๋ฅผ ์˜ˆ๋ฐฉํ•˜๊ณ  ์‚ฌ๊ณ  ๋ฐœ์ƒ ์‹œ ์‹ ์†ํ•˜๊ณ  ํšจ๊ณผ์ ์œผ๋กœ ๋Œ€์‘ํ•˜๊ธฐ ์œ„ํ•œ ์ฒด๊ณ„์™€ ์ ˆ์ฐจ๋ฅผ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ณด์•ˆ๊ด€์ œ์„œ๋น„์Šค ๋“ฑ ์™ธ๋ถ€ ๊ธฐ๊ด€์„ ํ†ตํ•˜์—ฌ ์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘์ฒด๊ณ„๋ฅผ ๊ตฌ์ถ•ยท์šด์˜ํ•˜๋Š” ๊ฒฝ์šฐ ์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘์ ˆ์ฐจ์˜ ์„ธ๋ถ€์‚ฌํ•ญ์„ ๊ณ„์•ฝ์„œ์— ๋ฐ˜์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์นจํ•ด์‚ฌ๊ณ ์˜ ๋ชจ๋‹ˆํ„ฐ๋ง, ๋Œ€์‘ ๋ฐ ์ฒ˜๋ฆฌ๋ฅผ ์œ„ํ•˜์—ฌ ์™ธ๋ถ€์ „๋ฌธ๊ฐ€, ์ „๋ฌธ์—…์ฒด, ์ „๋ฌธ๊ธฐ๊ด€ ๋“ฑ๊ณผ์˜ ํ˜‘์กฐ์ฒด๊ณ„๋ฅผ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์นจํ•ด์‚ฌ๊ณ ์— ๋Œ€๋น„ํ•œ ์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘ ์กฐ์ง ๋ฐ ๋Œ€์‘ ์ ˆ์ฐจ๋ฅผ ๋ช…ํ™•ํžˆ ์ •์˜ํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋‚ด๋ถ€ ์ง€์นจ ๋ฐ ์ ˆ์ฐจ์— ์นจํ•ด์‚ฌ๊ณ  ๋‹จ๊ณ„๋ณ„(์‚ฌ๊ณ  ์ „, ์ธ์ง€, ์ฒ˜๋ฆฌ, ๋ณต๊ตฌ, ๋ณด๊ณ  ๋“ฑ) ๋Œ€์‘ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝํ•˜์—ฌ ๋ช…์‹œํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์นจํ•ด์‚ฌ๊ณ  ๋ฐœ์ƒ ์‹œ ์‚ฌ๊ณ  ์œ ํ˜• ๋ฐ ์‹ฌ๊ฐ๋„์— ๋”ฐ๋ฅธ ์‹ ๊ณ ยทํ†ต์ง€ ์ ˆ์ฐจ, ๋Œ€์‘ ๋ฐ ๋ณต๊ตฌ ์ ˆ์ฐจ์˜ ์ผ๋ถ€ ๋˜๋Š” ์ „๋ถ€๋ฅผ ์ˆ˜๋ฆฝํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘ ์กฐ์ง๋„ ๋ฐ ๋น„์ƒ์—ฐ๋ฝ๋ง ๋“ฑ์„ ํ˜„ํ–‰ํ™”ํ•˜์ง€ ์•Š๊ณ  ์žˆ๊ฑฐ๋‚˜, ๋‹ด๋‹น์ž๋ณ„ ์—ญํ• ๊ณผ ์ฑ…์ž„์ด ๋ช…ํ™•ํžˆ ์ •์˜๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์นจํ•ด์‚ฌ๊ณ  ์‹ ๊ณ ยทํ†ต์ง€ ๋ฐ ๋Œ€์‘ ํ˜‘์กฐ๋ฅผ ์œ„ํ•œ ๋Œ€์™ธ๊ธฐ๊ด€ ์—ฐ๋ฝ์ฒ˜์— ๊ธฐ๊ด€๋ช…, ํ™ˆํŽ˜์ด์ง€, ์—ฐ๋ฝ์ฒ˜ ๋“ฑ์ด ์ž˜๋ชป ๋ช…์‹œ๋˜์–ด ์žˆ๊ฑฐ๋‚˜, ์ผ๋ถ€ ๊ธฐ๊ด€ ๊ด€๋ จ ์ •๋ณด๊ฐ€ ๋ˆ„๋ฝ ๋˜๋Š” ํ˜„ํ–‰ํ™”๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ์™ธ๋ถ€ ๋ณด์•ˆ๊ด€์ œ ์ „๋ฌธ์—…์ฒด ๋“ฑ ์œ ๊ด€๊ธฐ๊ด€์— ์นจํ•ด์‚ฌ๊ณ  ํƒ์ง€ ๋ฐ ๋Œ€์‘์„ ์œ„ํƒํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘์— ๋Œ€ํ•œ ์ƒํ˜ธ ๊ฐ„ ๊ด€๋ จ ์—ญํ•  ๋ฐ ์ฑ…์ž„ ๋ฒ”์œ„๊ฐ€ ๊ณ„์•ฝ์„œ๋‚˜ SLA์— ๋ช…ํ™•ํ•˜๊ฒŒ ์ •์˜๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 6 : ์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝํ•˜์˜€์œผ๋‚˜, ๊ฐœ์ธ์ •๋ณด ์นจํ•ด ์‹ ๊ณ  ๊ธฐ์ค€, ์‹œ์  ๋“ฑ์ด ๋ฒ•์  ์š”๊ตฌ์‚ฌํ•ญ์„ ์ค€์ˆ˜ํ•˜์ง€ ๋ชปํ•˜๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ34์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ์œ ์ถœ ๋“ฑ์˜ ํ†ต์ง€ยท์‹ ๊ณ )","์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ48์กฐ์˜3(์นจํ•ด์‚ฌ๊ณ ์˜ ์‹ ๊ณ  ๋“ฑ), ์ œ48์กฐ์˜4(์นจํ•ด์‚ฌ๊ณ ์˜ ์›์ธ๋ถ„์„ ๋“ฑ)"]}],"description": "์นจํ•ด์‚ฌ๊ณ  ๋ฐ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ ๋“ฑ์„ ์˜ˆ๋ฐฉํ•˜๊ณ  ์‚ฌ๊ณ  ๋ฐœ์ƒ ์‹œ ์‹ ์†ํ•˜๊ณ  ํšจ๊ณผ์ ์œผ๋กœ ๋Œ€์‘ํ•  ์ˆ˜ ์žˆ๋„๋ก ๋‚ดยท์™ธ๋ถ€ ์นจํ•ด์‹œ๋„์˜ ํƒ์ง€ยท๋Œ€์‘ยท๋ถ„์„ ๋ฐ ๊ณต์œ ๋ฅผ ์œ„ํ•œ ์ฒด๊ณ„์™€ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝํ•˜๊ณ , ๊ด€๋ จ ์™ธ๋ถ€๊ธฐ๊ด€ ๋ฐ ์ „๋ฌธ๊ฐ€๋“ค๊ณผ ํ˜‘์กฐ์ฒด๊ณ„๋ฅผ ๊ตฌ์ถ•ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.11.2": {"name": "์ทจ์•ฝ์  ์ ๊ฒ€ ๋ฐ ์กฐ์น˜","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","ssm_document_secrets": "PASS","inspector2_is_enabled": "FAIL","ec2_instance_imdsv2_enabled": "PASS","guardduty_centrally_managed": "FAIL","ec2_instance_secrets_user_data": "PASS","ec2_launch_template_no_secrets": "PASS","inspector2_active_findings_exist": "FAIL","trustedadvisor_errors_and_warnings": null,"guardduty_no_high_severity_findings": "FAIL","awslambda_function_no_secrets_in_code": "PASS","cloudwatch_log_group_no_secrets_in_logs": "FAIL","ecr_registry_scan_images_on_push_enabled": "PASS","cloudformation_stack_outputs_find_secrets": "PASS","codebuild_project_no_secrets_in_variables": "PASS","awslambda_function_no_secrets_in_variables": "PASS","ecs_task_definitions_no_environment_secrets": "PASS","ecr_repositories_scan_images_on_push_enabled": "FAIL","trustedadvisor_premium_support_plan_subscribed": null,"autoscaling_find_secrets_ec2_launch_configuration": "PASS","ecr_repositories_scan_vulnerabilities_in_latest_image": null,"codebuild_project_source_repo_url_no_sensitive_credentials": "PASS"},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.11.2 ์ทจ์•ฝ์  ์ ๊ฒ€ ๋ฐ ์กฐ์น˜","Subdomain": "2.11. ์‚ฌ๊ณ  ์˜ˆ๋ฐฉ ๋ฐ ๋Œ€์‘","AuditEvidence": ["์ทจ์•ฝ์  ์ ๊ฒ€ ๊ณ„ํš์„œ","์ทจ์•ฝ์  ์ ๊ฒ€ ๊ฒฐ๊ณผ๋ณด๊ณ ์„œ(์›น, ๋ชจ๋ฐ”์ผ ์•ฑ, ์„œ๋ฒ„, ๋„คํŠธ์›Œํฌ์‹œ์Šคํ…œ, ๋ณด์•ˆ์‹œ์Šคํ…œ, DBMS ๋“ฑ)","์ทจ์•ฝ์  ์ ๊ฒ€ ์ด๋ ฅ","์ทจ์•ฝ์  ์กฐ์น˜๊ณ„ํš์„œ","์ทจ์•ฝ์  ์กฐ์น˜์™„๋ฃŒ๋ณด๊ณ ์„œ","๋ชจ์˜ํ•ดํ‚น ๊ณ„ํš์„œยท๊ฒฐ๊ณผ๋ณด๊ณ ์„œ"],"AuditChecklist": ["์ •๋ณด์‹œ์Šคํ…œ ์ทจ์•ฝ์  ์ ๊ฒ€ ์ ˆ์ฐจ๋ฅผ ์ˆ˜๋ฆฝํ•˜๊ณ , ์ •๊ธฐ์ ์œผ๋กœ ์ ๊ฒ€์„ ์ˆ˜ํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","๋ฐœ๊ฒฌ๋œ ์ทจ์•ฝ์ ์— ๋Œ€ํ•œ ์กฐ์น˜๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ , ๊ทธ ๊ฒฐ๊ณผ๋ฅผ ์ฑ…์ž„์ž์—๊ฒŒ ๋ณด๊ณ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ตœ์‹  ๋ณด์•ˆ์ทจ์•ฝ์  ๋ฐœ์ƒ ์—ฌ๋ถ€๋ฅผ ์ง€์†์ ์œผ๋กœ ํŒŒ์•…ํ•˜๊ณ , ์ •๋ณด์‹œ์Šคํ…œ์— ๋ฏธ์น˜๋Š” ์˜ํ–ฅ์„ ๋ถ„์„ํ•˜์—ฌ ์กฐ์น˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์ทจ์•ฝ์  ์ ๊ฒ€ ์ด๋ ฅ์„ ๊ธฐ๋ก๊ด€๋ฆฌํ•˜์—ฌ ์ „๋…„๋„์— ๋„์ถœ๋œ ์ทจ์•ฝ์ ์ด ์žฌ๋ฐœ์ƒํ•˜๋Š” ๋“ฑ์˜ ๋ฌธ์ œ์ ์— ๋Œ€ํ•˜์—ฌ ๋ณดํ˜ธ๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋‚ด๋ถ€ ๊ทœ์ •์— ์—ฐ 1ํšŒ ์ด์ƒ ์ฃผ์š” ์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ๊ธฐ์ˆ ์  ์ทจ์•ฝ์  ์ ๊ฒ€์„ ํ•˜๋„๋ก ์ •ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์ฃผ์š” ์‹œ์Šคํ…œ ์ค‘ ์ผ๋ถ€๊ฐ€ ์ทจ์•ฝ์  ์ ๊ฒ€ ๋Œ€์ƒ์—์„œ ๋ˆ„๋ฝ๋œ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ทจ์•ฝ์  ์ ๊ฒ€์—์„œ ๋ฐœ๊ฒฌ๋œ ์ทจ์•ฝ์ ์— ๋Œ€ํ•œ ๋ณด์™„์กฐ์น˜๋ฅผ ์ดํ–‰ํ•˜์ง€ ์•Š์•˜๊ฑฐ๋‚˜, ๋‹จ๊ธฐ๊ฐ„ ๋‚ด์— ์กฐ์น˜ํ•  ์ˆ˜ ์—†๋Š” ์ทจ์•ฝ์ ์— ๋Œ€ํ•œ ํƒ€๋‹น์„ฑ ๊ฒ€ํ†  ๋ฐ ์Šน์ธ ์ด๋ ฅ์ด ์—†๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ4์กฐ(๋‚ด๋ถ€ ๊ด€๋ฆฌ๊ณ„ํš์˜ ์ˆ˜๋ฆฝยท์‹œํ–‰ ๋ฐ ์ ๊ฒ€), ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ)"]}],"description": "์ •๋ณด์‹œ์Šคํ…œ์˜ ์ทจ์•ฝ์ ์ด ๋…ธ์ถœ๋˜์–ด ์žˆ๋Š”์ง€๋ฅผ ํ™•์ธํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์ •๊ธฐ์ ์œผ๋กœ ์ทจ์•ฝ์  ์ ๊ฒ€์„ ์ˆ˜ํ–‰ํ•˜๊ณ , ๋ฐœ๊ฒฌ๋œ ์ทจ์•ฝ์ ์— ๋Œ€ํ•ด์„œ๋Š” ์‹ ์†ํ•˜๊ฒŒ ์กฐ์น˜ํ•˜์—ฌ์•ผ ํ•œ๋‹ค. ๋˜ํ•œ ์ตœ์‹  ๋ณด์•ˆ์ทจ์•ฝ์ ์˜ ๋ฐœ์ƒ ์—ฌ๋ถ€๋ฅผ ์ง€์†์ ์œผ๋กœ ํŒŒ์•…ํ•˜๊ณ , ์ •๋ณด์‹œ์Šคํ…œ์— ๋ฏธ์น˜๋Š” ์˜ํ–ฅ์„ ๋ถ„์„ํ•˜์—ฌ ์กฐ์น˜ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 6,"pass": 14,"total": 23,"manual": 0}},"2.11.3": {"name": "์ด์ƒํ–‰์œ„ ๋ถ„์„ ๋ฐ ๋ชจ๋‹ˆํ„ฐ๋ง","checks": {"securityhub_enabled": "PASS","fms_policy_compliant": null,"vpc_flow_logs_enabled": "FAIL","cloudtrail_insights_exist": null,"networkfirewall_in_all_vpc": "FAIL","trustedadvisor_errors_and_warnings": null,"guardduty_no_high_severity_findings": "FAIL","cloudtrail_threat_detection_enumeration": null,"cloudwatch_log_group_no_secrets_in_logs": "FAIL","cloudwatch_log_metric_filter_root_usage": null,"cloudwatch_cross_account_sharing_disabled": null,"cloudwatch_changes_to_vpcs_alarm_configured": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","cloudwatch_log_metric_filter_policy_changes": null,"cloudwatch_log_metric_filter_sign_in_without_mfa": null,"cloudwatch_changes_to_network_acls_alarm_configured": null,"cloudwatch_log_metric_filter_security_group_changes": null,"cloudwatch_log_metric_filter_unauthorized_api_calls": null,"cloudwatch_log_metric_filter_authentication_failures": null,"cloudwatch_log_metric_filter_aws_organizations_changes": null,"cognito_user_pool_client_prevent_user_existence_errors": null,"cloudwatch_changes_to_network_gateways_alarm_configured": null,"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null,"cloudwatch_changes_to_network_route_tables_alarm_configured": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL","cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null,"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null,"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.11.3 ์ด์ƒํ–‰์œ„ ๋ถ„์„ ๋ฐ ๋ชจ๋‹ˆํ„ฐ๋ง","Subdomain": "2.11. ์‚ฌ๊ณ  ์˜ˆ๋ฐฉ ๋ฐ ๋Œ€์‘","AuditEvidence": ["์ด์ƒํ–‰์œ„ ๋ถ„์„ ๋ฐ ๋ชจ๋‹ˆํ„ฐ๋ง ํ˜„ํ™ฉ","์ด์ƒํ–‰์œ„ ๋ฐœ๊ฒฌ ์‹œ ๋Œ€์‘ ์ฆ๊ฑฐ์ž๋ฃŒ"],"AuditChecklist": ["๋‚ดยท์™ธ๋ถ€์— ์˜ํ•œ ์นจํ•ด์‹œ๋„, ๊ฐœ์ธ์ •๋ณด์œ ์ถœ ์‹œ๋„, ๋ถ€์ •ํ–‰์œ„ ๋“ฑ ์ด์ƒํ–‰์œ„๋ฅผ ํƒ์ง€ํ•  ์ˆ˜ ์žˆ๋„๋ก ์ฃผ์š” ์ •๋ณด์‹œ์Šคํ…œ, ์‘์šฉํ”„๋กœ๊ทธ๋žจ, ๋„คํŠธ์›Œํฌ, ๋ณด์•ˆ์‹œ์Šคํ…œ ๋“ฑ์—์„œ ๋ฐœ์ƒํ•œ ๋„คํŠธ์›Œํฌ ํŠธ๋ž˜ํ”ฝ,๋ฐ์ดํ„ฐ ํ๋ฆ„, ์ด๋ฒคํŠธ ๋กœ๊ทธ ๋“ฑ์„ ์ˆ˜์ง‘ํ•˜์—ฌ ๋ถ„์„ ๋ฐ ๋ชจ๋‹ˆํ„ฐ๋งํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์นจํ•ด์‹œ๋„, ๊ฐœ์ธ์ •๋ณด์œ ์ถœ์‹œ๋„, ๋ถ€์ •ํ–‰์œ„ ๋“ฑ์˜ ์—ฌ๋ถ€๋ฅผ ํŒ๋‹จํ•˜๊ธฐ ์œ„ํ•œ ๊ธฐ์ค€ ๋ฐ ์ž„๊ณ„์น˜๋ฅผ ์ •์˜ํ•˜๊ณ  ์ด์— ๋”ฐ๋ผ ์ด์ƒํ–‰์œ„์˜ ํŒ๋‹จ ๋ฐ ์กฐ์‚ฌ ๋“ฑ ํ›„์† ์กฐ์น˜๊ฐ€ ์ ์‹œ์— ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์™ธ๋ถ€๋กœ๋ถ€ํ„ฐ์˜ ์„œ๋ฒ„, ๋„คํŠธ์›Œํฌ, ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค, ๋ณด์•ˆ์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ์นจํ•ด ์‹œ๋„๋ฅผ ์ธ์ง€ํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜๋Š” ์ƒ์‹œ ๋˜๋Š” ์ •๊ธฐ์  ๋ชจ๋‹ˆํ„ฐ๋ง ์ฒด๊ณ„ ๋ฐ ์ ˆ์ฐจ๋ฅผ ๋งˆ๋ จํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์™ธ๋ถ€ ๋ณด์•ˆ๊ด€์ œ ์ „๋ฌธ์—…์ฒด ๋“ฑ ์™ธ๋ถ€ ๊ธฐ๊ด€์— ์นจํ•ด์‹œ๋„ ๋ชจ๋‹ˆํ„ฐ๋ง ์—…๋ฌด๋ฅผ ์œ„ํƒํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์œ„ํƒ์—…์ฒด๊ฐ€ ์ œ๊ณตํ•œ ๊ด€๋ จ ๋ณด๊ณ ์„œ๋ฅผ ๊ฒ€ํ† ํ•œ ์ด๋ ฅ์ด ํ™•์ธ๋˜์ง€ ์•Š๊ฑฐ๋‚˜, ์œ„ํƒ ๋Œ€์ƒ์—์„œ ์ œ์™ธ๋œ ์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ์ž์ฒด ๋ชจ๋‹ˆํ„ฐ๋ง ์ฒด๊ณ„๋ฅผ ๊ฐ–์ถ”๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋‚ด๋ถ€์ ์œผ๋กœ ์ •์˜ํ•œ ์ž„๊ณ„์น˜๋ฅผ ์ดˆ๊ณผํ•˜๋Š” ์ด์ƒ ํŠธ๋ž˜ํ”ฝ์ด ์ง€์†์ ์œผ๋กœ ๋ฐœ๊ฒฌ๋˜๊ณ  ์žˆ์œผ๋‚˜, ์ด์— ๋Œ€ํ•œ ๋Œ€์‘์กฐ์น˜๊ฐ€ ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ6์กฐ(์ ‘๊ทผํ†ต์ œ)"]}],"description": "๋‚ดยท์™ธ๋ถ€์— ์˜ํ•œ ์นจํ•ด์‹œ๋„, ๊ฐœ์ธ์ •๋ณด์œ ์ถœ ์‹œ๋„, ๋ถ€์ •ํ–‰์œ„ ๋“ฑ์„ ์‹ ์†ํ•˜๊ฒŒ ํƒ์ง€ยท๋Œ€์‘ํ•  ์ˆ˜ ์žˆ๋„๋ก ๋„คํŠธ์›Œํฌ ๋ฐ ๋ฐ์ดํ„ฐ ํ๋ฆ„ ๋“ฑ์„ ์ˆ˜์ง‘ํ•˜์—ฌ ๋ถ„์„ํ•˜๋ฉฐ, ๋ชจ๋‹ˆํ„ฐ๋ง ๋ฐ ์ ๊ฒ€ ๊ฒฐ๊ณผ์— ๋”ฐ๋ฅธ ์‚ฌํ›„์กฐ์น˜๋Š” ์ ์‹œ์— ์ด๋ฃจ์–ด์ ธ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 6,"pass": 1,"total": 28,"manual": 0}},"2.11.4": {"name": "์‚ฌ๊ณ  ๋Œ€์‘ ํ›ˆ๋ จ ๋ฐ ๊ฐœ์„ ","checks": {"ssmincidents_enabled_with_plans": null},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.11.4 ์‚ฌ๊ณ  ๋Œ€์‘ ํ›ˆ๋ จ ๋ฐ ๊ฐœ์„ ","Subdomain": "2.11. ์‚ฌ๊ณ  ์˜ˆ๋ฐฉ ๋ฐ ๋Œ€์‘","AuditEvidence": ["์นจํ•ด์‚ฌ๊ณ  ๋ฐ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์‚ฌ๊ณ  ๋Œ€์‘ ๋ชจ์˜ํ›ˆ๋ จ ๊ณ„ํš์„œ","์นจํ•ด์‚ฌ๊ณ  ๋ฐ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์‚ฌ๊ณ  ๋Œ€์‘ ๋ชจ์˜ํ›ˆ๋ จ ๊ฒฐ๊ณผ์„œ","์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘ ์ ˆ์ฐจ"],"AuditChecklist": ["์นจํ•ด์‚ฌ๊ณ  ๋ฐ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์‚ฌ๊ณ  ๋Œ€์‘ ์ ˆ์ฐจ์— ๊ด€ํ•œ ๋ชจ์˜ํ›ˆ๋ จ๊ณ„ํš์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ์ด์— ๋”ฐ๋ผ ์—ฐ 1ํšŒ ์ด์ƒ ์ฃผ๊ธฐ์ ์œผ๋กœ ํ›ˆ๋ จ์„ ์‹ค์‹œํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์นจํ•ด์‚ฌ๊ณ  ๋ฐ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์‚ฌ๊ณ  ํ›ˆ๋ จ ๊ฒฐ๊ณผ๋ฅผ ๋ฐ˜์˜ํ•˜์—ฌ ์นจํ•ด์‚ฌ๊ณ  ๋ฐ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์‚ฌ๊ณ  ๋Œ€์‘์ฒด๊ณ„๋ฅผ ๊ฐœ์„ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์นจํ•ด์‚ฌ๊ณ  ๋ชจ์˜ํ›ˆ๋ จ์„ ์ˆ˜ํ–‰ํ•˜์ง€ ์•Š์•˜๊ฑฐ๋‚˜ ๊ด€๋ จ ๊ณ„ํš์„œ ๋ฐ ๊ฒฐ๊ณผ๋ณด๊ณ ์„œ๊ฐ€ ํ™•์ธ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์—ฐ๊ฐ„ ์นจํ•ด์‚ฌ๊ณ  ๋ชจ์˜ํ›ˆ๋ จ ๊ณ„ํš์„ ์ˆ˜๋ฆฝํ•˜์˜€์œผ๋‚˜ ํƒ€๋‹นํ•œ ์‚ฌ์œ  ๋˜๋Š” ์Šน์ธ ์—†์ด ํ•ด๋‹น ๊ธฐ๊ฐ„ ๋‚ด์— ์‹ค์‹œํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ๋ชจ์˜ํ›ˆ๋ จ์„ ๊ณ„ํšํ•˜์—ฌ ์‹ค์‹œํ•˜์˜€์œผ๋‚˜, ๊ด€๋ จ ๋‚ด๋ถ€ ์ง€์นจ์— ์ •ํ•œ ์ ˆ์ฐจ ๋ฐ ์„œ์‹์— ๋”ฐ๋ผ ์ˆ˜ํ–‰ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์นจํ•ด์‚ฌ๊ณ  ๋ฐ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์‚ฌ๊ณ  ๋Œ€์‘ ์ ˆ์ฐจ๋ฅผ ์ž„์ง์›๊ณผ ์ดํ•ด๊ด€๊ณ„์ž๊ฐ€ ์ˆ™์ง€ํ•˜๋„๋ก ์‹œ๋‚˜๋ฆฌ์˜ค์— ๋”ฐ๋ฅธ ๋ชจ์˜ํ›ˆ๋ จ์„ ์—ฐ 1ํšŒ ์ด์ƒ ์‹ค์‹œํ•˜๊ณ  ํ›ˆ๋ จ๊ฒฐ๊ณผ๋ฅผ ๋ฐ˜์˜ํ•˜์—ฌ ๋Œ€์‘์ฒด๊ณ„๋ฅผ ๊ฐœ์„ ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"2.11.5": {"name": "์‚ฌ๊ณ  ๋Œ€์‘ ๋ฐ ๋ณต๊ตฌ","checks": {},"status": "PASS","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.11.5 ์‚ฌ๊ณ  ๋Œ€์‘ ๋ฐ ๋ณต๊ตฌ","Subdomain": "2.11. ์‚ฌ๊ณ  ์˜ˆ๋ฐฉ ๋ฐ ๋Œ€์‘","AuditEvidence": ["์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘ ์ ˆ์ฐจ","์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘๋ณด๊ณ ์„œ","์นจํ•ด์‚ฌ๊ณ  ๊ด€๋ฆฌ๋Œ€์žฅ","๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์‹ ๊ณ ์„œ","๋น„์ƒ์—ฐ๋ฝ๋ง"],"AuditChecklist": ["์นจํ•ด์‚ฌ๊ณ  ๋ฐ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์˜ ์ง•ํ›„ ๋˜๋Š” ๋ฐœ์ƒ์„ ์ธ์ง€ํ•œ ๊ฒฝ์šฐ ์ •์˜๋œ ์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘์ ˆ์ฐจ์— ๋”ฐ๋ผ ์‹ ์†ํ•˜๊ฒŒ ๋Œ€์‘ ๋ฐ ๋ณด๊ณ ๊ฐ€ ์ด๋ฃจ์–ด์ง€๊ณ  ์žˆ๋Š”๊ฐ€?","๊ฐœ์ธ์ •๋ณด ์นจํ•ด์‚ฌ๊ณ  ๋ฐœ์ƒ ์‹œ ๊ด€๋ จ ๋ฒ•๋ น์— ๋”ฐ๋ผ ์ •๋ณด์ฃผ์ฒด ํ†ต์ง€ ๋ฐ ๊ด€๊ณ„๊ธฐ๊ด€ ์‹ ๊ณ  ์ ˆ์ฐจ๋ฅผ ์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์นจํ•ด์‚ฌ๊ณ ๊ฐ€ ์ข…๊ฒฐ๋œ ํ›„ ์‚ฌ๊ณ ์˜ ์›์ธ์„ ๋ถ„์„ํ•˜์—ฌ ๊ทธ ๊ฒฐ๊ณผ๋ฅผ ๋ณด๊ณ ํ•˜๊ณ  ๊ด€๋ จ ์กฐ์ง ๋ฐ์ธ๋ ฅ๊ณผ ๊ณต์œ ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์นจํ•ด์‚ฌ๊ณ  ๋ถ„์„์„ ํ†ตํ•˜์—ฌ ์–ป์€ ์ •๋ณด๋ฅผ ํ™œ์šฉํ•˜์—ฌ ์œ ์‚ฌ ์‚ฌ๊ณ ๊ฐ€ ์žฌ๋ฐœํ•˜์ง€ ์•Š๋„๋ก ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝํ•˜๊ณ  ํ•„์š”ํ•œ ๊ฒฝ์šฐ ์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘์ ˆ์ฐจ ๋“ฑ์„ ๋ณ€๊ฒฝํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ๋‚ด๋ถ€ ์นจํ•ด์‚ฌ๊ณ  ๋Œ€์‘์ง€์นจ์—๋Š” ์นจํ•ด์‚ฌ๊ณ  ๋ฐœ์ƒ ์‹œ ๋‚ด๋ถ€ ์ •๋ณด๋ณดํ˜ธ์œ„์›ํšŒ ๋ฐ ์ดํ•ด๊ด€๊ณ„ ๋ถ€์„œ์—๊ฒŒ ๋ณด๊ณ ํ•˜๋„๋ก ์ •ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ์นจํ•ด์‚ฌ๊ณ  ๋ฐœ์ƒ ์‹œ ๋‹ด๋‹น ๋ถ€์„œ์—์„œ ์ž์ฒด์ ์œผ๋กœ ๋Œ€์‘ ์กฐ์น˜ ํ›„ ์ •๋ณด๋ณดํ˜ธ์œ„์›ํšŒ ๋ฐ ์ดํ•ด๊ด€๊ณ„ ๋ถ€์„œ์— ๋ณด๊ณ ํ•˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์ตœ๊ทผ DDoS ๊ณต๊ฒฉ์œผ๋กœ ์˜์‹ฌ๋˜๋Š” ์นจํ•ด์‚ฌ๊ณ ๋กœ ์ธํ•˜์—ฌ ์„œ๋น„์Šค ์ผ๋ถ€๊ฐ€ ์ค‘๋‹จ๋œ ์‚ฌ๋ก€๊ฐ€ ์žˆ์œผ๋‚˜, ์ด์— ๋Œ€ํ•œ ์›์ธ๋ถ„์„ ๋ฐ ์žฌ๋ฐœ๋ฐฉ์ง€ ๋Œ€์ฑ…์ด ์ˆ˜๋ฆฝ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์™ธ๋ถ€ ํ•ดํ‚น์— ์˜ํ•ด ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์‚ฌ๊ณ ๊ฐ€ ๋ฐœ์ƒํ•˜์˜€์œผ๋‚˜, ์œ ์ถœ๋œ ๊ฐœ์ธ์ •๋ณด ๊ฑด์ˆ˜๊ฐ€ ์†Œ๋Ÿ‰์ด๋ผ๋Š” ์ด์œ ๋กœ 72์‹œ๊ฐ„ ์ด๋‚ด์— ํ†ต์ง€ ๋ฐ ์‹ ๊ณ ๊ฐ€ ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ๋‹ด๋‹น์ž์˜ ์‹ค์ˆ˜์— ์˜ํ•ด ์ธํ„ฐ๋„ท ํ™ˆํŽ˜์ด์ง€ ๊ฒŒ์‹œํŒ์„ ํ†ตํ•ด 1์ฒœ๋ช… ์ด์ƒ ์ •๋ณด์ฃผ์ฒด์— ๋Œ€ํ•œ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ์ด ๋ฐœ์ƒํ•˜์˜€์œผ๋‚˜, ํ•ด๋‹น ์ •๋ณด์ฃผ์ฒด์— ๋Œ€ํ•œ ์œ ์ถœ ํ†ต์ง€๊ฐ€ ์ด๋ฃจ์–ด์ง€์ง€ ์•Š์€ ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ34์กฐ(๊ฐœ์ธ์ •๋ณด์˜ ์œ ์ถœ ๋“ฑ์˜ ํ†ต์ง€ยท์‹ ๊ณ )","์ •๋ณดํ†ต์‹ ๋ง๋ฒ• ์ œ48์กฐ์˜3(์นจํ•ด์‚ฌ๊ณ ์˜ ์‹ ๊ณ  ๋“ฑ), ์ œ48์กฐ์˜4(์นจํ•ด์‚ฌ๊ณ ์˜ ์›์ธ๋ถ„์„ ๋“ฑ)"]}],"description": "์นจํ•ด์‚ฌ๊ณ  ๋ฐ ๊ฐœ์ธ์ •๋ณด ์œ ์ถœ ์ง•ํ›„๋‚˜ ๋ฐœ์ƒ์„ ์ธ์ง€ํ•œ ๋•Œ์—๋Š” ๋ฒ•์  ํ†ต์ง€ ๋ฐ ์‹ ๊ณ  ์˜๋ฌด๋ฅผ ์ค€์ˆ˜ํ•˜์—ฌ์•ผ ํ•˜๋ฉฐ, ์ ˆ์ฐจ์— ๋”ฐ๋ผ ์‹ ์†ํ•˜๊ฒŒ ๋Œ€์‘ ๋ฐ ๋ณต๊ตฌํ•˜๊ณ  ์‚ฌ๊ณ ๋ถ„์„ ํ›„ ์žฌ๋ฐœ๋ฐฉ์ง€ ๋Œ€์ฑ…์„ ์ˆ˜๋ฆฝํ•˜์—ฌ ๋Œ€์‘์ฒด๊ณ„์— ๋ฐ˜์˜ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 0,"pass": 0,"total": 0,"manual": 0}},"2.12.1": {"name": "์žฌํ•ดยท์žฌ๋‚œ ๋Œ€๋น„ ์•ˆ์ „์กฐ์น˜","checks": {"drs_job_exist": "FAIL","backup_plans_exist": "PASS","rds_cluster_multi_az": "FAIL","elb_is_in_multiple_az": "FAIL","rds_instance_multi_az": "FAIL","s3_bucket_object_lock": "FAIL","vpc_different_regions": null,"efs_have_backup_enabled": "FAIL","elbv2_is_in_multiple_az": "PASS","vpc_subnet_different_az": "PASS","backup_reportplans_exist": null,"neptune_cluster_multi_az": null,"elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_lifecycle_enabled": "FAIL","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"autoscaling_group_multiple_az": null,"dms_instance_multi_az_enabled": null,"ec2_ebs_volume_snapshots_exists": "FAIL","rds_cluster_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL","documentdb_cluster_backup_enabled": null,"s3_bucket_cross_region_replication": "FAIL","kms_cmk_not_deleted_unintentionally": null,"neptune_cluster_deletion_protection": null,"redshift_cluster_automated_snapshot": null,"elb_cross_zone_load_balancing_enabled": "PASS","lightsail_instance_automated_snapshots": null,"dlm_ebs_snapshot_lifecycle_policy_exists": "FAIL","documentdb_cluster_cloudwatch_log_export": null,"elasticache_redis_cluster_backup_enabled": null,"elasticache_redis_cluster_multi_az_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.12.1 ์žฌํ•ดยท์žฌ๋‚œ ๋Œ€๋น„ ์•ˆ์ „์กฐ์น˜","Subdomain": "2.12. ์žฌํ•ด ๋ณต๊ตฌ","AuditEvidence": ["IT ์žฌํ•ด ๋ณต๊ตฌ ์ง€์นจยท์ ˆ์ฐจ","IT ์žฌํ•ด ๋ณต๊ตฌ ๊ณ„ํš(RTO, RPO ์ •์˜ ํฌํ•จ)","๋น„์ƒ์—ฐ๋ฝ๋ง","๊ฐœ์ธ์ •๋ณด์ฒ˜๋ฆฌ์‹œ์Šคํ…œ ์œ„๊ธฐ๋Œ€์‘ ๋งค๋‰ด์–ผ"],"AuditChecklist": ["์กฐ์ง์˜ ํ•ต์‹ฌ ์„œ๋น„์Šค(์—…๋ฌด) ์—ฐ์†์„ฑ์„ ์œ„ํ˜‘ํ•  ์ˆ˜ ์žˆ๋Š” IT ์žฌํ•ด ์œ ํ˜•์„ ์‹๋ณ„ํ•˜๊ณ , ์œ ํ˜•๋ณ„ ํ”ผํ•ด๊ทœ๋ชจ ๋ฐ ์—…๋ฌด์— ๋ฏธ์น˜๋Š” ์˜ํ–ฅ์„ ๋ถ„์„ํ•˜์—ฌ ํ•ต์‹ฌ IT ์„œ๋น„์Šค(์—…๋ฌด) ๋ฐ ์‹œ์Šคํ…œ์„์‹๋ณ„ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","ํ•ต์‹ฌ IT ์„œ๋น„์Šค ๋ฐ ์‹œ์Šคํ…œ์˜ ์ค‘์š”๋„ ๋ฐ ํŠน์„ฑ์— ๋”ฐ๋ฅธ ๋ณต๊ตฌ ๋ชฉํ‘œ์‹œ๊ฐ„, ๋ณต๊ตฌ ๋ชฉํ‘œ์‹œ์ ์„ ์ •์˜ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์žฌํ•ดยท์žฌ๋‚œ ๋ฐœ์ƒ ์‹œ์—๋„ ํ•ต์‹ฌ ์„œ๋น„์Šค ๋ฐ ์‹œ์Šคํ…œ์˜ ์—ฐ์†์„ฑ์„ ๋ณด์žฅํ•  ์ˆ˜ ์žˆ๋„๋ก ๋ณต๊ตฌ ์ „๋žต ๋ฐ ๋Œ€์ฑ…, ๋น„์ƒ์‹œ ๋ณต๊ตฌ ์กฐ์ง, ๋น„์ƒ์—ฐ๋ฝ์ฒด๊ณ„, ๋ณต๊ตฌ ์ ˆ์ฐจ ๋“ฑ ์žฌํ•ด ๋ณต๊ตฌ ๊ณ„ํš์„ ์ˆ˜๋ฆฝ ๋ฐ ์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : IT ์žฌํ•ด ๋ณต๊ตฌ ์ ˆ์ฐจ์„œ ๋‚ด์— IT ์žฌํ•ด ๋ณต๊ตฌ ์กฐ์ง ๋ฐ ์—ญํ•  ์ •์˜, ๋น„์ƒ์—ฐ๋ฝ์ฒด๊ณ„, ๋ณต๊ตฌ ์ ˆ์ฐจ ๋ฐ ๋ฐฉ๋ฒ• ๋“ฑ ์ค‘์š”ํ•œ ๋‚ด์šฉ์ด ๋ˆ„๋ฝ๋˜์–ด ์žˆ๋Š” ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ๋น„์ƒ์‚ฌํƒœ ๋ฐœ์ƒ ์‹œ ์ •๋ณด์‹œ์Šคํ…œ์˜ ์—ฐ์†์„ฑ ํ™•๋ณด ๋ฐ ํ”ผํ•ด ์ตœ์†Œํ™”๋ฅผ ์œ„ํ•˜์—ฌ ๋ฐฑ์—…์„ผํ„ฐ๋ฅผ ๊ตฌ์ถ•ํ•˜์—ฌ ์šด์˜ํ•˜๊ณ  ์žˆ์œผ๋‚˜, ๊ด€๋ จ ์ •์ฑ…์— ๋ฐฑ์—…์„ผํ„ฐ๋ฅผ ํ™œ์šฉํ•œ ์žฌํ•ด ๋ณต๊ตฌ ์ ˆ์ฐจ ๋“ฑ์ด ์ˆ˜๋ฆฝ๋˜์–ด ์žˆ์ง€ ์•Š์•„ ์žฌํ•ด ๋ณต๊ตฌ ์‹œํ—˜ ๋ฐ ๋ณต๊ตฌ๊ฐ€ ํšจ๊ณผ์ ์œผ๋กœ ์ง„ํ–‰๋˜๊ธฐ ์–ด๋ ค์šด ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์„œ๋น„์Šค ์šด์˜๊ณผ ๊ด€๋ จ๋œ ์ผ๋ถ€ ์ค‘์š” ์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ๋ณต๊ตฌ ๋ชฉํ‘œ์‹œ๊ฐ„์ด ์ •์˜๋˜์–ด ์žˆ์ง€ ์•Š์œผ๋ฉฐ, ์ด์— ๋Œ€ํ•œ ์ ์ ˆํ•œ ๋ณต๊ตฌ ๋Œ€์ฑ…์„ ๋งˆ๋ จํ•˜๊ณ  ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 4 : ์žฌํ•ด ๋ณต๊ตฌ ๊ด€๋ จ ์ง€์นจ์„œ ๋“ฑ์— IT ์„œ๋น„์Šค ๋˜๋Š” ์‹œ์Šคํ…œ์— ๋Œ€ํ•œ ๋ณต๊ตฌ ์šฐ์„ ์ˆœ์œ„, ๋ณต๊ตฌ ๋ชฉํ‘œ์‹œ๊ฐ„, ๋ณต๊ตฌ ๋ชฉํ‘œ์‹œ์  ๋“ฑ์ด ์ •์˜๋˜์–ด ์žˆ์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 5 : ํ˜„์‹ค์  ๋Œ€์ฑ… ์—†์ด ๋ณต๊ตฌ ๋ชฉํ‘œ์‹œ๊ฐ„์„ ๊ณผ๋„ ๋˜๋Š” ๊ณผ์†Œํ•˜๊ฒŒ ์„ค์ •ํ•˜๊ณ  ์žˆ๊ฑฐ๋‚˜, ๋ณต๊ตฌ ๋ชฉํ‘œ์‹œ์ ๊ณผ ๋ฐฑ์—…์ •์ฑ…(๋Œ€์ƒ, ์ฃผ๊ธฐ ๋“ฑ)์ด ์ ์ ˆํžˆ ์—ฐ๊ณ„๋˜์ง€ ์•Š์•„ ๋ณต๊ตฌ ํšจ๊ณผ์„ฑ์„ ๋ณด์žฅํ•  ์ˆ˜ ์—†๋Š” ๊ฒฝ์šฐ"],"RelatedRegulations": ["๊ฐœ์ธ์ •๋ณด ๋ณดํ˜ธ๋ฒ• ์ œ29์กฐ(์•ˆ์ „์กฐ์น˜์˜๋ฌด)","๊ฐœ์ธ์ •๋ณด์˜ ์•ˆ์ „์„ฑ ํ™•๋ณด์กฐ์น˜ ๊ธฐ์ค€ ์ œ11์กฐ(์žฌํ•ดยท์žฌ๋‚œ ๋Œ€๋น„ ์•ˆ์ „์กฐ์น˜)"]}],"description": "์ž์—ฐ์žฌํ•ด, ํ†ต์‹ ยท์ „๋ ฅ ์žฅ์• , ํ•ดํ‚น ๋“ฑ ์กฐ์ง์˜ ํ•ต์‹ฌ ์„œ๋น„์Šค ๋ฐ ์‹œ์Šคํ…œ์˜ ์šด์˜ ์—ฐ์†์„ฑ์„ ์œ„ํ˜‘ํ•  ์ˆ˜ ์žˆ๋Š” ์žฌํ•ด ์œ ํ˜•์„ ์‹๋ณ„ํ•˜๊ณ , ์œ ํ˜•๋ณ„ ์˜ˆ์ƒ ํ”ผํ•ด๊ทœ๋ชจ ๋ฐ ์˜ํ–ฅ์„ ๋ถ„์„ํ•˜์—ฌ์•ผ ํ•œ๋‹ค. ๋˜ํ•œ ๋ณต๊ตฌ ๋ชฉํ‘œ์‹œ๊ฐ„, ๋ณต๊ตฌ ๋ชฉํ‘œ์‹œ์ ์„ ์ •์˜ํ•˜๊ณ  ๋ณต๊ตฌ ์ „๋žต ๋ฐ ๋Œ€์ฑ…, ๋น„์ƒ์‹œ ๋ณต๊ตฌ ์กฐ์ง, ๋น„์ƒ์—ฐ๋ฝ์ฒด๊ณ„, ๋ณต๊ตฌ ์ ˆ์ฐจ ๋“ฑ ์žฌํ•ด ๋ณต๊ตฌ์ฒด๊ณ„๋ฅผ ๊ตฌ์ถ•ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 14,"pass": 5,"total": 33,"manual": 0}},"2.12.2": {"name": "์žฌํ•ด ๋ณต๊ตฌ ์‹œํ—˜ ๋ฐ ๊ฐœ์„ ","checks": {"drs_job_exist": "FAIL","backup_plans_exist": "PASS","rds_cluster_multi_az": "FAIL","elb_is_in_multiple_az": "FAIL","rds_instance_multi_az": "FAIL","s3_bucket_object_lock": "FAIL","vpc_different_regions": null,"efs_have_backup_enabled": "FAIL","elbv2_is_in_multiple_az": "PASS","vpc_subnet_different_az": "PASS","backup_reportplans_exist": null,"neptune_cluster_multi_az": null,"elbv2_deletion_protection": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_lifecycle_enabled": "FAIL","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"autoscaling_group_multiple_az": null,"dms_instance_multi_az_enabled": null,"ec2_ebs_volume_snapshots_exists": "FAIL","rds_cluster_deletion_protection": "FAIL","rds_instance_deletion_protection": "FAIL","documentdb_cluster_backup_enabled": null,"s3_bucket_cross_region_replication": "FAIL","kms_cmk_not_deleted_unintentionally": null,"neptune_cluster_deletion_protection": null,"redshift_cluster_automated_snapshot": null,"elb_cross_zone_load_balancing_enabled": "PASS","lightsail_instance_automated_snapshots": null,"dlm_ebs_snapshot_lifecycle_policy_exists": "FAIL","documentdb_cluster_cloudwatch_log_export": null,"elasticache_redis_cluster_backup_enabled": null,"elasticache_redis_cluster_multi_az_enabled": null},"status": "FAIL","attributes": [{"Domain": "2. ๋ณดํ˜ธ๋Œ€์ฑ… ์š”๊ตฌ์‚ฌํ•ญ","Section": "2.12.2 ์žฌํ•ด ๋ณต๊ตฌ ์‹œํ—˜ ๋ฐ ๊ฐœ์„ ","Subdomain": "2.12. ์žฌํ•ด ๋ณต๊ตฌ","AuditEvidence": ["IT ์žฌํ•ด ๋ณต๊ตฌ ์ ˆ์ฐจ์„œ","IT ์žฌํ•ด ๋ณต๊ตฌ ์‹œํ—˜ ๊ณ„ํš์„œ","IT ์žฌํ•ด ๋ณต๊ตฌ ์‹œํ—˜ ๊ฒฐ๊ณผ์„œ"],"AuditChecklist": ["์ˆ˜๋ฆฝ๋œ IT ์žฌํ•ด ๋ณต๊ตฌ์ฒด๊ณ„์˜ ์‹คํšจ์„ฑ์„ ํŒ๋‹จํ•˜๊ธฐ ์œ„ํ•˜์—ฌ ์žฌํ•ด ๋ณต๊ตฌ ์‹œํ—˜๊ณ„ํš์„ ์ˆ˜๋ฆฝ ๋ฐ ์ดํ–‰ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?","์‹œํ—˜๊ฒฐ๊ณผ, ์ •๋ณด์‹œ์Šคํ…œ ํ™˜๊ฒฝ๋ณ€ํ™”, ๋ฒ•๋ฅ  ๋“ฑ์— ๋”ฐ๋ฅธ ๋ณ€ํ™”๋ฅผ ๋ฐ˜์˜ํ•  ์ˆ˜ ์žˆ๋„๋ก ๋ณต๊ตฌ์ „๋žต ๋ฐ ๋Œ€์ฑ…์„ ์ •๊ธฐ์ ์œผ๋กœ ๊ฒ€ํ† ยท๋ณด์™„ํ•˜๊ณ  ์žˆ๋Š”๊ฐ€?"],"NonComplianceCases": ["์‚ฌ๋ก€ 1 : ์žฌํ•ด ๋ณต๊ตฌ ํ›ˆ๋ จ์„ ๊ณ„ํšยท์‹œํ–‰ํ•˜์ง€ ์•Š์•˜๊ฑฐ๋‚˜ ๊ด€๋ จ ๊ณ„ํš์„œ ๋ฐ ๊ฒฐ๊ณผ๋ณด๊ณ ์„œ๊ฐ€ ํ™•์ธ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 2 : ์žฌํ•ด ๋ณต๊ตฌ ํ›ˆ๋ จ ๊ณ„ํš์„ ์ˆ˜๋ฆฝํ•˜์˜€์œผ๋‚˜, ํƒ€๋‹นํ•œ ์‚ฌ์œ  ๋˜๋Š” ์Šน์ธ ์—†์ด ๊ณ„ํš๋Œ€๋กœ ์‹ค์‹œํ•˜์ง€ ์•Š์•˜๊ฑฐ๋‚˜ ๊ด€๋ จ ๊ฒฐ๊ณผ๋ณด๊ณ ๊ฐ€ ํ™•์ธ๋˜์ง€ ์•Š์€ ๊ฒฝ์šฐ","์‚ฌ๋ก€ 3 : ์žฌํ•ด ๋ณต๊ตฌ ํ›ˆ๋ จ์„ ๊ณ„ํšํ•˜์—ฌ ์‹ค์‹œํ•˜์˜€์œผ๋‚˜, ๋‚ด๋ถ€ ๊ด€๋ จ ์ง€์นจ์— ์ •ํ•œ ์ ˆ์ฐจ ๋ฐ ์„œ์‹์— ๋”ฐ๋ผ ์ดํ–‰๋˜์ง€ ์•Š์•„ ์ˆ˜๋ฆฝํ•œ ์žฌํ•ด ๋ณต๊ตฌ ์ ˆ์ฐจ์˜ ์ ์ •์„ฑ ๋ฐ ํšจ๊ณผ์„ฑ์„ ํ‰๊ฐ€ํ•˜๊ธฐ ์œ„ํ•œ ํ›ˆ๋ จ์œผ๋กœ ๋ณด๊ธฐ ์–ด๋ ค์šด ๊ฒฝ์šฐ"],"RelatedRegulations": []}],"description": "์žฌํ•ด ๋ณต๊ตฌ ์ „๋žต ๋ฐ ๋Œ€์ฑ…์˜ ์ ์ •์„ฑ์„ ์ •๊ธฐ์ ์œผ๋กœ ์‹œํ—˜ํ•˜์—ฌ ์‹œํ—˜๊ฒฐ๊ณผ, ์ •๋ณด์‹œ์Šคํ…œ ํ™˜๊ฒฝ๋ณ€ํ™”, ๋ฒ•๊ทœ ๋“ฑ์— ๋”ฐ๋ฅธ ๋ณ€ํ™”๋ฅผ ๋ฐ˜์˜ํ•˜์—ฌ ๋ณต๊ตฌ์ „๋žต ๋ฐ ๋Œ€์ฑ…์„ ๋ณด์™„ํ•˜์—ฌ์•ผ ํ•œ๋‹ค.","checks_status": {"fail": 14,"pass": 5,"total": 33,"manual": 0}}},"requirements_passed": 10,"requirements_failed": 27,"requirements_manual": 64,"total_requirements": 101,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "f9e5248f-1b1d-4256-b2a1-3b571315c190","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "cis_1.4_aws","framework": "CIS","version": "1.4","description": "The CIS Benchmark for CIS Amazon Web Services Foundations Benchmark, v1.4.0, Level 1 and 2 provides prescriptive guidance for configuring security options for a subset of Amazon Web Services. It has an emphasis on foundational, testable, and architecture agnostic settings","region": "eu-west-1","requirements": {"1.1": {"name": "1.1","checks": {"account_maintain_current_contact_details": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/manage-account-payment.html#contact-info","Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization.An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.","DefaultValue": null,"AuditProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing )1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose `My Account`. 3. On the `Account Settings` page, review and verify the current details. 4. Under `Contact Information`, review and verify the current details.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question, so it is in both the customers' and AWS' best interests that prompt contact can be established. This is best achieved by setting AWS account contact details to point to resources which have multiple individuals as recipients, such as email aliases and PABX hunt groups.","RemediationProcedure": "This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:\\*Billing ).1. Sign in to the AWS Management Console and open the `Billing and Cost Management` console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose `My Account`. 3. On the `Account Settings` page, next to `Account Settings`, choose `Edit`. 4. Next to the field that you need to update, choose `Edit`. 5. After you have entered your changes, choose `Save changes`. 6. After you have made your changes, choose `Done`. 7. To edit your contact information, under `Contact Information`, choose `Edit`. 8. For the fields that you want to change, type your updated information, and then choose `Update`.","AdditionalInformation": ""}],"description": "Maintain current contact details","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.2": {"name": "1.2","checks": {"account_security_contact_information_is_registered": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "","Description": "AWS provides customers with the option of specifying the contact information for account's security team. It is recommended that this information be provided.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if security contact information is present:**From Console:**1. Click on your account name at the top right corner of the console 2. From the drop-down menu Click `My Account`3. Scroll down to the `Alternate Contacts` section 4. Ensure contact information is specified in the `Security` section","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them.","RemediationProcedure": "Perform the following to establish security contact information:**From Console:**1. Click on your account name at the top right corner of the console. 2. From the drop-down menu Click `My Account`3. Scroll down to the `Alternate Contacts` section 4. Enter contact information in the `Security` section**Note:** Consider specifying an internal email distribution list to ensure emails are regularly monitored by more than one individual.","AdditionalInformation": ""}],"description": "Ensure security contact information is registered","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.3": {"name": "1.3","checks": {"account_security_questions_are_registered_in_the_aws_account": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "","Description": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS account as the 'root' user 2. On the top right you will see the __ 3. Click on the __ 4. From the drop-down menu Click `My Account`5. In the `Configure Security Challenge Questions` section on the `Personal Information` page, configure three security challenge questions. 6. Click `Save questions` .","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "When creating a new AWS account, a default super user is automatically created. This account is referred to as the 'root user' or 'root' account. It is recommended that the use of this account be limited and highly controlled. During events in which the 'root' password is no longer accessible or the MFA token associated with 'root' is lost/destroyed it is possible, through authentication using secret questions and associated answers, to recover 'root' user login access.","RemediationProcedure": "**From Console:**1. Login to the AWS Account as the 'root' user 2. Click on the __ from the top right of the console 3. From the drop-down menu Click _My Account_ 4. Scroll down to the `Configure Security Questions` section 5. Click on `Edit`6. Click on each `Question` - From the drop-down select an appropriate question- Click on the `Answer` section- Enter an appropriate answer - Follow process for all 3 questions 7. Click `Update` when complete 8. Save Questions and Answers and place in a secure physical location","AdditionalInformation": ""}],"description": "Ensure security questions are registered in the AWS account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.4": {"name": "1.4","checks": {"iam_no_root_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html:http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetAccountSummary.html:https://aws.amazon.com/blogs/security/an-easier-way-to-determine-the-presence-of-aws-account-access-keys/","Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be removed.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has access keys:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on `Credential Report`5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `` user, ensure the `access_key_1_active` and `access_key_2_active` fields are set to `FALSE` .**From Command Line:**Run the following command: ```aws iam get-account-summary | grep \"AccountAccessKeysPresent\"``` If no 'root' access keys exist the output will show \"AccountAccessKeysPresent\": 0,. If the output shows a \"1\" than 'root' keys exist, refer to the remediation procedure below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Removing access keys associated with the 'root' user account limits vectors by which the account can be compromised. Additionally, removing the 'root' access keys encourages the creation and use of role based accounts that are least privileged.","RemediationProcedure": "Perform the following to delete or disable active 'root' user access keys**From Console:**1. Sign in to the AWS Management Console as 'root' and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. Click on __ at the top right and select `My Security Credentials` from the drop down list 3. On the pop out screen Click on `Continue to Security Credentials`4. Click on `Access Keys` _(Access Key ID and Secret Access Key)_ 5. Under the `Status` column if there are any Keys which are Active- Click on `Make Inactive` - (Temporarily disable Key - may be needed again)- Click `Delete` - (Deleted keys cannot be recovered)","AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions is not enabled by default. However, on request to AWS support enables 'root' access only through access-keys (CLI, API methods) for us-gov cloud region."}],"description": "Ensure no 'root' user account access key exists","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.5": {"name": "1.5","checks": {"iam_root_mfa_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html#enable-virt-mfa-for-root","Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device.**Note:** When virtual MFA is used for 'root' accounts, it is recommended that the device used is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (\"non-personal virtual MFA\") This lessens the risks of losing access to the MFA due to device loss, device trade-in or if the individual owning the device is no longer employed at the company.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has MFA setup:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on `Credential Report`5. This will download a `.csv` file which contains credential usage for all IAM users within an AWS Account - open this file 6. For the `` user, ensure the `mfa_active` field is set to `TRUE` .**From Command Line:**1. Run the following command: ```aws iam get-account-summary | grep \"AccountMFAEnabled\" ``` 2. Ensure the AccountMFAEnabled property is set to 1","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that emits a time-sensitive key and have knowledge of a credential.","RemediationProcedure": "Perform the following to establish MFA for the 'root' user account:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the 'root' AWS account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials.2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA`4. In the wizard, choose `A virtual MFA` device and then choose `Next Step` . 5. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes. 6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see [Virtual MFA Applications](http://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications).) If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.- In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application.When you are finished, the virtual MFA device starts generating one-time passwords.In the Manage MFA Device wizard, in the Authentication Code 1 box, type the one-time password that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second one-time password into the Authentication Code 2 box. Choose Assign Virtual MFA.","AdditionalInformation": "IAM User account \"root\" for us-gov cloud regions does not have console access. This recommendation is not applicable for us-gov cloud regions."}],"description": "Ensure MFA is enabled for the 'root' user account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.6": {"name": "1.6","checks": {"iam_root_hardware_mfa_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html#enable-hw-mfa-for-root","Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the 'root' user account be protected with a hardware MFA.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the 'root' user account has a hardware MFA setup:1. Run the following command to determine if the 'root' account has MFA setup: ```aws iam get-account-summary | grep \"AccountMFAEnabled\" ```The `AccountMFAEnabled` property is set to `1` will ensure that the 'root' user account has MFA (Virtual or Hardware) Enabled. If `AccountMFAEnabled` property is set to `0` the account is not compliant with this recommendation.2. If `AccountMFAEnabled` property is set to `1`, determine 'root' account has Hardware MFA enabled. Run the following command to list all virtual MFA devices: ```aws iam list-virtual-mfa-devices``` If the output contains one MFA with the following Serial Number, it means the MFA is virtual, not hardware and the account is not compliant with this recommendation: `\"SerialNumber\": \"arn:aws:iam::__:mfa/root-account-mfa-device\"`","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "A hardware MFA has a smaller attack surface than a virtual MFA. For example, a hardware MFA does not suffer the attack surface introduced by the mobile smartphone on which a virtual MFA resides.**Note**: Using hardware MFA for many, many AWS accounts may create a logistical device management issue. If this is the case, consider implementing this Level 2 recommendation selectively to the highest security AWS accounts and the Level 1 recommendation applied to the remaining accounts.","RemediationProcedure": "Perform the following to establish a hardware MFA for the 'root' user account:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). Note: to manage MFA devices for the AWS 'root' user account, you must use your 'root' account credentials to sign in to AWS. You cannot manage MFA devices for the 'root' account using other credentials. 2. Choose `Dashboard` , and under `Security Status` , expand `Activate MFA` on your root account. 3. Choose `Activate MFA`4. In the wizard, choose `A hardware MFA` device and then choose `Next Step` . 5. In the `Serial Number` box, enter the serial number that is found on the back of the MFA device. 6. In the `Authentication Code 1` box, enter the six-digit number displayed by the MFA device. You might need to press the button on the front of the device to display the number. 7. Wait 30 seconds while the device refreshes the code, and then enter the next six-digit number into the `Authentication Code 2` box. You might need to press the button on the front of the device again to display the second number. 8. Choose `Next Step` . The MFA device is now associated with the AWS account. The next time you use your AWS account credentials to sign in, you must type a code from the hardware MFA device.Remediation for this recommendation is not available through AWS CLI.","AdditionalInformation": "IAM User account 'root' for us-gov cloud regions does not have console access. This control is not applicable for us-gov cloud regions."}],"description": "Ensure hardware MFA is enabled for the 'root' user account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.7": {"name": "1.7","checks": {"iam_avoid_root_usage": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html:https://docs.aws.amazon.com/general/latest/gr/aws_tasks-that-require-root.html","Description": "With the creation of an AWS account, a 'root user' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console at `https://console.aws.amazon.com/iam/` 2. In the left pane, click `Credential Report` 3. Click on `Download Report` 4. Open of Save the file locally 5. Locate the `` under the user column 6. Review `password_last_used, access_key_1_last_used_date, access_key_2_last_used_date` to determine when the 'root user' was last used.**From Command Line:**Run the following CLI commands to provide a credential report for determining the last time the 'root user' was used: ``` aws iam generate-credential-report ``` ``` aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,5,11,16 | grep -B1 '' ```Review `password_last_used`, `access_key_1_last_used_date`, `access_key_2_last_used_date` to determine when the _root user_ was last used.**Note:** There are a few conditions under which the use of the 'root' user account is required. Please see the reference links for all of the tasks that require use of the 'root' user.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "The 'root user' has unrestricted access to and control over all account resources. Use of it is inconsistent with the principles of least privilege and separation of duties, and can lead to unnecessary harm due to error or account compromise.","RemediationProcedure": "If you find that the 'root' user account is being used for daily activity to include administrative tasks that do not require the 'root' user:1. Change the 'root' user password. 2. Deactivate or delete any access keys associate with the 'root' user.**Remember, anyone who has 'root' user credentials for your AWS account has unrestricted access to and control of all the resources in your account, including billing information.","AdditionalInformation": "The 'root' user for us-gov cloud regions is not enabled by default. However, on request to AWS support, they can enable the 'root' user and grant access only through access-keys (CLI, API methods) for us-gov cloud region. If the 'root' user for us-gov cloud regions is enabled, this recommendation is applicable.Monitoring usage of the 'root' user can be accomplished by implementing recommendation 3.3 Ensure a log metric filter and alarm exist for usage of the 'root' user."}],"description": "Eliminate use of the 'root' user for administrative and daily tasks","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.8": {"name": "1.8","checks": {"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy","Description": "Password policies are, in part, used to enforce password complexity requirements. IAM password policies can be used to ensure password are at least a given length. It is recommended that the password policy require a minimum password length 14.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Minimum password length\" is set to 14 or greater.**From Command Line:** ``` aws iam get-account-password-policy ``` Ensure the output of the above command includes \"MinimumPasswordLength\": 14 (or higher)","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Setting a password complexity policy increases account resiliency against brute force login attempts.","RemediationProcedure": "Perform the following to set the password policy as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Set \"Minimum password length\" to `14` or greater. 5. Click \"Apply password policy\"**From Command Line:** ```aws iam update-account-password-policy --minimum-password-length 14 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.","AdditionalInformation": ""}],"description": "Ensure IAM password policy requires minimum length of 14 or greater","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.9": {"name": "1.9","checks": {"iam_password_policy_reuse_24": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy","Description": "IAM password policies can prevent the reuse of a given password by the same user. It is recommended that the password policy prevent the reuse of passwords.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure the password policy is configured as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Ensure \"Prevent password reuse\" is checked 5. Ensure \"Number of passwords to remember\" is set to 24**From Command Line:** ``` aws iam get-account-password-policy``` Ensure the output of the above command includes \"PasswordReusePrevention\": 24","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Preventing password reuse increases account resiliency against brute force login attempts.","RemediationProcedure": "Perform the following to set the password policy as prescribed:**From Console:**1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Go to IAM Service on the AWS Console 3. Click on Account Settings on the Left Pane 4. Check \"Prevent password reuse\" 5. Set \"Number of passwords to remember\" is set to `24` **From Command Line:** ```aws iam update-account-password-policy --password-reuse-prevention 24 ``` Note: All commands starting with \"aws iam update-account-password-policy\" can be combined into a single command.","AdditionalInformation": ""}],"description": "Ensure IAM password policy prevents password reuse","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.1": {"name": "3.1","checks": {"cloudtrail_multi_region_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html?icmpid=docs_cloudtrail_console#logging-management-events:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-services.html#cloud-trail-supported-services-data-events","Description": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail provides a history of AWS API calls for an account, including API calls made via the Management Console, SDKs, command line tools, and higher-level AWS services (such as CloudFormation).","DefaultValue": null,"AuditProcedure": "Perform the following to determine if CloudTrail is enabled for all regions:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane- You will be presented with a list of trails across all regions 3. Ensure at least one Trail has `All` specified in the `Region` column 4. Click on a trail via the link in the _Name_ column 5. Ensure `Logging` is set to `ON`6. Ensure `Apply trail to all regions` is set to `Yes` 7. In section `Management Events` ensure `Read/Write Events` set to `ALL`**From Command Line:** ```aws cloudtrail describe-trails ``` Ensure `IsMultiRegionTrail` is set to `true```` aws cloudtrail get-trail-status --name  ``` Ensure `IsLogging` is set to `true` ``` aws cloudtrail get-event-selectors --trail-name  ``` Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`","ImpactStatement": "S3 lifecycle features can be used to manage the accumulation and management of logs over time. See the following AWS resource for more information on these features:1. https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html","AssessmentStatus": "Automated","RationaleStatement": "The AWS API call history produced by CloudTrail enables security analysis, resource change tracking, and compliance auditing. Additionally, - ensuring that a multi-regions trail exists will ensure that unexpected activity occurring in otherwise unused regions is detected- ensuring that a multi-regions trail exists will ensure that `Global Service Logging` is enabled for a trail by default to capture recording of events generated onAWS global services- for a multi-regions trail, ensuring that management events configured for all type of Read/Writes ensures recording of management operations that are performed on all resources in an AWS account","RemediationProcedure": "Perform the following to enable global (Multi-region) CloudTrail logging:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on _Trails_ on the left navigation pane 3. Click `Get Started Now` , if presented- Click `Add new trail` - Enter a trail name in the `Trail name` box- Set the `Apply trail to all regions` option to `Yes` - Specify an S3 bucket name in the `S3 bucket` box- Click `Create`4. If 1 or more trails already exist, select the target trail to enable for global logging 5. Click the edit icon (pencil) next to `Apply trail to all regions` , Click `Yes` and Click `Save`. 6. Click the edit icon (pencil) next to `Management Events` click `All` for setting `Read/Write Events` and Click `Save`.**From Command Line:** ``` aws cloudtrail create-trail --name  --bucket-name  --is-multi-region-trailaws cloudtrail update-trail --name  --is-multi-region-trail ```Note: Creating CloudTrail via CLI without providing any overriding options configures `Management Events` to set `All` type of `Read/Writes` by default.","AdditionalInformation": ""}],"description": "Ensure CloudTrail is enabled in all regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"3.2": {"name": "3.2","checks": {"cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-enabling.html","Description": "CloudTrail log file validation creates a digitally signed digest file containing a hash of each log that CloudTrail writes to S3. These digest files can be used to determine whether a log file was changed, deleted, or unchanged after CloudTrail delivered the log. It is recommended that file validation be enabled on all CloudTrails.","DefaultValue": null,"AuditProcedure": "Perform the following on each trail to determine if log file validation is enabled:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. For Every Trail: - Click on a trail via the link in the _Name_ column - Under the `General details` section, ensure `Log file validation` is set to `Enabled` **From Command Line:** ``` aws cloudtrail describe-trails ``` Ensure `LogFileValidationEnabled` is set to `true` for each trail","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling log file validation will provide additional integrity checking of CloudTrail logs.","RemediationProcedure": "Perform the following to enable log file validation on a given trail:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. Click on `Trails` on the left navigation pane 3. Click on target trail 4. Within the `General details` section click `edit` 5. Under the `Advanced settings` section 6. Check the enable box under `Log file validation`7. Click `Save changes` **From Command Line:** ``` aws cloudtrail update-trail --name  --enable-log-file-validation ``` Note that periodic validation of logs using these digests can be performed by running the following command: ``` aws cloudtrail validate-logs --trail-arn  --start-time  --end-time  ```","AdditionalInformation": ""}],"description": "Ensure CloudTrail log file validation is enabled","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.3": {"name": "3.3","checks": {"cloudtrail_logs_s3_bucket_is_not_publicly_accessible": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html","Description": "CloudTrail logs a record of every API call made in your AWS account. These logs file are stored in an S3 bucket. It is recommended that the bucket policy or access control list (ACL) applied to the S3 bucket that CloudTrail logs to prevent public access to the CloudTrail logs.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if any public access is granted to an S3 bucket via an ACL or S3 bucket policy:**From Console:**1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home) 2. In the `API activity history` pane on the left, click `Trails`3. In the `Trails` pane, note the bucket names in the `S3 bucket` column 4. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home) 5. For each bucket noted in step 3, right-click on the bucket and click `Properties`6. In the `Properties` pane, click the `Permissions` tab. 7. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. 8. Ensure no rows exists that have the `Grantee` set to `Everyone` or the `Grantee` set to `Any Authenticated User.`9. If the `Edit bucket policy` button is present, click it to review the bucket policy. 10. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}**From Command Line:**1. Get the name of the S3 bucket that CloudTrail is logging to: ```aws cloudtrail describe-trails --query 'trailList[*].S3BucketName' ``` 2. Ensure the `AllUsers` principal is not granted privileges to that `` : ```aws s3api get-bucket-acl --bucket  --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/AllUsers` ]' ``` 3. Ensure the `AuthenticatedUsers` principal is not granted privileges to that ``: ```aws s3api get-bucket-acl --bucket  --query 'Grants[?Grantee.URI== `https://acs.amazonaws.com/groups/global/Authenticated Users` ]' ``` 4. Get the S3 Bucket Policy ```aws s3api get-bucket-policy --bucket ``` 5. Ensure the policy does not contain a `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}**Note:** Principal set to \"\\*\" or {\"AWS\" : \"\\*\"} allows anonymous access.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Allowing public access to CloudTrail log content may aid an adversary in identifying weaknesses in the affected account's use or configuration.","RemediationProcedure": "Perform the following to remove any public access that has been granted to the bucket via an ACL or S3 bucket policy:1. Go to Amazon S3 console at [https://console.aws.amazon.com/s3/home](https://console.aws.amazon.com/s3/home) 2. Right-click on the bucket and click Properties 3. In the `Properties` pane, click the `Permissions` tab. 4. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. 5. Select the row that grants permission to `Everyone` or `Any Authenticated User`6. Uncheck all the permissions granted to `Everyone` or `Any Authenticated User` (click `x` to delete the row). 7. Click `Save` to save the ACL. 8. If the `Edit bucket policy` button is present, click it. 9. Remove any `Statement` having an `Effect` set to `Allow` and a `Principal` set to \"\\*\" or {\"AWS\" : \"\\*\"}.","AdditionalInformation": ""}],"description": "Ensure the S3 bucket used to store CloudTrail logs is not publicly accessible","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"3.4": {"name": "3.4","checks": {"cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-user-guide.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/how-cloudtrail-works.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-aws-service-specific-topics.html","Description": "AWS CloudTrail is a web service that records AWS API calls made in a given AWS account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service. CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs within a specified S3 bucket for long term analysis, realtime analysis can be performed by configuring CloudTrail to send logs to CloudWatch Logs. For a trail that is enabled in all regions in an account, CloudTrail sends log files from all those regions to a CloudWatch Logs log group. It is recommended that CloudTrail logs be sent to CloudWatch Logs.Note: The intent of this recommendation is to ensure AWS account activity is being captured, monitored, and appropriately alarmed on. CloudWatch Logs is a native way to accomplish this using AWS services but does not preclude the use of an alternate solution.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure CloudTrail is configured as prescribed:**From Console:**1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/` 2. Under `Trails` , click on the CloudTrail you wish to evaluate 3. Under the `CloudWatch Logs` section. 4. Ensure a `CloudWatch Logs` log group is configured and listed. 5. Under `General details` confirm `Last log file delivered` has a recent (~one day old) timestamp.**From Command Line:**1. Run the following command to get a listing of existing trails: ```aws cloudtrail describe-trails ``` 2. Ensure `CloudWatchLogsLogGroupArn` is not empty and note the value of the `Name` property. 3. Using the noted value of the `Name` property, run the following command: ```aws cloudtrail get-trail-status --name  ``` 4. Ensure the `LatestcloudwatchLogdDeliveryTime` property is set to a recent (~one day old) timestamp.If the `CloudWatch Logs` log group is not setup and the delivery time is not recent refer to the remediation below.","ImpactStatement": "Note: By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods:1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html","AssessmentStatus": "Automated","RationaleStatement": "Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user, API, resource, and IP address, and provides opportunity to establish alarms and notifications for anomalous or sensitivity account activity.","RemediationProcedure": "Perform the following to establish the prescribed state:**From Console:**1. Login to the CloudTrail console at `https://console.aws.amazon.com/cloudtrail/` 2. Select the `Trail` the needs to be updated. 3. Scroll down to `CloudWatch Logs` 4. Click `Edit` 5. Under `CloudWatch Logs` click the box `Enabled` 6. Under `Log Group` pick new or select an existing log group 7. Edit the `Log group name` to match the CloudTrail or pick the existing CloudWatch Group. 8. Under `IAM Role` pick new or select an existing. 9. Edit the `Role name` to match the CloudTrail or pick the existing IAM Role. 10. Click `Save changes.**From Command Line:** ``` aws cloudtrail update-trail --name  --cloudwatch-logs-log-group-arn  --cloudwatch-logs-role-arn  ```","AdditionalInformation": ""}],"description": "Ensure CloudTrail trails are integrated with CloudWatch Logs","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.5": {"name": "3.5","checks": {"config_recorder_all_regions_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/cli/latest/reference/configservice/describe-configuration-recorder-status.html","Description": "AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items (AWS resources), any configuration changes between resources. It is recommended AWS Config be enabled in all regions.","DefaultValue": null,"AuditProcedure": "Process to evaluate AWS Config configuration per region**From Console:**1. Sign in to the AWS Management Console and open the AWS Config console at [https://console.aws.amazon.com/config/](https://console.aws.amazon.com/config/). 2. On the top right of the console select target Region. 3. If presented with Setup AWS Config - follow remediation procedure: 4. On the Resource inventory page, Click on edit (the gear icon). The Set Up AWS Config page appears. 5. Ensure 1 or both check-boxes under \"All Resources\" is checked.- Include global resources related to IAM resources - which needs to be enabled in 1 region only 6. Ensure the correct S3 bucket has been defined. 7. Ensure the correct SNS topic has been defined. 8. Repeat steps 2 to 7 for each region.**From Command Line:**1. Run this command to show all AWS Config recorders and their properties: ``` aws configservice describe-configuration-recorders ``` 2. Evaluate the output to ensure that there's at least one recorder for which `recordingGroup` object includes `\"allSupported\": true` AND `\"includeGlobalResourceTypes\": true`Note: There is one more parameter \"ResourceTypes\" in recordingGroup object. We don't need to check the same as whenever we set \"allSupported\": true, AWS enforces resource types to be empty (\"ResourceTypes\":[])Sample Output:``` {\"ConfigurationRecorders\": [{\"recordingGroup\": {\"allSupported\": true,\"resourceTypes\": [],\"includeGlobalResourceTypes\": true},\"roleARN\": \"arn:aws:iam:::role/service-role/\",\"name\": \"default\"}] } ```3. Run this command to show the status for all AWS Config recorders: ``` aws configservice describe-configuration-recorder-status ``` 4. In the output, find recorders with `name` key matching the recorders that met criteria in step 2. Ensure that at least one of them includes `\"recording\": true` and `\"lastStatus\": \"SUCCESS\"`","ImpactStatement": "It is recommended AWS Config be enabled in all regions.","AssessmentStatus": "Automated","RationaleStatement": "The AWS configuration item history captured by AWS Config enables security analysis, resource change tracking, and compliance auditing.","RemediationProcedure": "To implement AWS Config configuration:**From Console:**1. Select the region you want to focus on in the top right of the console 2. Click `Services`3. Click `Config`4. Define which resources you want to record in the selected region 5. Choose to include global resources (IAM resources) 6. Specify an S3 bucket in the same account or in another managed AWS account 7. Create an SNS Topic from the same AWS account or another managed AWS account**From Command Line:**1. Ensure there is an appropriate S3 bucket, SNS topic, and IAM role per the [AWS Config Service prerequisites](http://docs.aws.amazon.com/config/latest/developerguide/gs-cli-prereq.html). 2. Run this command to set up the configuration recorder ``` aws configservice subscribe --s3-bucket my-config-bucket --sns-topic arn:aws:sns:us-east-1:012345678912:my-config-notice --iam-role arn:aws:iam::012345678912:role/myConfigRole ``` 3. Run this command to start the configuration recorder: ``` start-configuration-recorder --configuration-recorder-name  ```","AdditionalInformation": ""}],"description": "Ensure AWS Config is enabled in all regions","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.6": {"name": "3.6","checks": {"cloudtrail_logs_s3_bucket_access_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html","Description": "S3 Bucket Access Logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed. It is recommended that bucket access logging be enabled on the CloudTrail S3 bucket.","DefaultValue": null,"AuditProcedure": "Perform the following ensure the CloudTrail S3 bucket has access logging is enabled:**From Console:**1. Go to the Amazon CloudTrail console at [https://console.aws.amazon.com/cloudtrail/home](https://console.aws.amazon.com/cloudtrail/home) 2. In the API activity history pane on the left, click Trails 3. In the Trails pane, note the bucket names in the S3 bucket column 4. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 5. Under `All Buckets` click on a target S3 bucket 6. Click on `Properties` in the top right of the console 7. Under `Bucket:` _ `` _ click on `Logging`8. Ensure `Enabled` is checked.**From Command Line:**1. Get the name of the S3 bucket that CloudTrail is logging to: ```aws cloudtrail describe-trails --query 'trailList[*].S3BucketName'``` 2. Ensure Bucket Logging is enabled: ``` aws s3api get-bucket-logging --bucket  ``` Ensure command does not returns empty output.Sample Output for a bucket with logging enabled:``` {\"LoggingEnabled\": {\"TargetPrefix\": \"\",\"TargetBucket\": \"\"} } ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "By enabling S3 bucket logging on target S3 buckets, it is possible to capture all events which may affect objects within any target buckets. Configuring logs to be placed in a separate bucket allows access to log information which can be useful in security and incident response workflows.","RemediationProcedure": "Perform the following to enable S3 bucket logging:**From Console:**1. Sign in to the AWS Management Console and open the S3 console at [https://console.aws.amazon.com/s3](https://console.aws.amazon.com/s3). 2. Under `All Buckets` click on the target S3 bucket 3. Click on `Properties` in the top right of the console 4. Under `Bucket:`  click on `Logging`5. Configure bucket logging- Click on the `Enabled` checkbox- Select Target Bucket from list- Enter a Target Prefix 6. Click `Save`.**From Command Line:**1. Get the name of the S3 bucket that CloudTrail is logging to: ``` aws cloudtrail describe-trails --region  --query trailList[*].S3BucketName ``` 2. Copy and add target bucket name at ``, Prefix for logfile at `` and optionally add an email address in the following template and save it as ``: ``` {\"LoggingEnabled\": {\"TargetBucket\": \"\",\"TargetPrefix\": \"\",\"TargetGrants\": [{\"Grantee\": {\"Type\": \"AmazonCustomerByEmail\",\"EmailAddress\": \"\"},\"Permission\": \"FULL_CONTROL\"}]}} ``` 3. Run the `put-bucket-logging` command with bucket name and `` as input, for more information refer at [put-bucket-logging](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-logging.html): ``` aws s3api put-bucket-logging --bucket  --bucket-logging-status file:// ```","AdditionalInformation": ""}],"description": "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.7": {"name": "3.7","checks": {"cloudtrail_kms_encryption_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html:https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html","Description": "AWS CloudTrail is a web service that records AWS API calls for an account and makes those logs available to users and resources in accordance with IAM policies. AWS Key Management Service (KMS) is a managed service that helps create and control the encryption keys used to encrypt account data, and uses Hardware Security Modules (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to leverage server side encryption (SSE) and KMS customer created master keys (CMK) to further protect CloudTrail logs. It is recommended that CloudTrail be configured to use SSE-KMS.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if CloudTrail is configured to use SSE-KMS:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Select a Trail 4. Under the `S3` section, ensure `Encrypt log files` is set to `Yes` and a KMS key ID is specified in the `KSM Key Id` field.**From Command Line:**1. Run the following command: ```aws cloudtrail describe-trails``` 2. For each trail listed, SSE-KMS is enabled if the trail has a `KmsKeyId` property defined.","ImpactStatement": "Customer created keys incur an additional cost. See https://aws.amazon.com/kms/pricing/ for more information.","AssessmentStatus": "Automated","RationaleStatement": "Configuring CloudTrail to use SSE-KMS provides additional confidentiality controls on log data as a given user must have S3 read permission on the corresponding log bucket and must be granted decrypt permission by the CMK policy.","RemediationProcedure": "Perform the following to configure CloudTrail to use SSE-KMS:**From Console:**1. Sign in to the AWS Management Console and open the CloudTrail console at [https://console.aws.amazon.com/cloudtrail](https://console.aws.amazon.com/cloudtrail) 2. In the left navigation pane, choose `Trails` . 3. Click on a Trail 4. Under the `S3` section click on the edit button (pencil icon) 5. Click `Advanced`6. Select an existing CMK from the `KMS key Id` drop-down menu- Note: Ensure the CMK is located in the same region as the S3 bucket- Note: You will need to apply a KMS Key policy on the selected CMK in order for CloudTrail as a service to encrypt and decrypt log files using the CMK provided. Steps are provided [here](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-kms-key-policy-for-cloudtrail.html) for editing the selected CMK Key policy 7. Click `Save`8. You will see a notification message stating that you need to have decrypt permissions on the specified KMS key to decrypt log files. 9. Click `Yes` **From Command Line:** ``` aws cloudtrail update-trail --name  --kms-id  aws kms put-key-policy --key-id  --policy  ```","AdditionalInformation": "3 statements which need to be added to the CMK policy:1\\. Enable Cloudtrail to describe CMK properties ``` 
{\"Sid\": \"Allow CloudTrail access\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"cloudtrail.amazonaws.com\"},\"Action\": \"kms:DescribeKey\",\"Resource\": \"*\" } ``` 2\\. Granting encrypt permissions ``` 
{\"Sid\": \"Allow CloudTrail to encrypt logs\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"cloudtrail.amazonaws.com\"},\"Action\": \"kms:GenerateDataKey*\",\"Resource\": \"*\",\"Condition\": {\"StringLike\": {\"kms:EncryptionContext:aws:cloudtrail:arn\": [\"arn:aws:cloudtrail:*:aws-account-id:trail/*\"]}} } ``` 3\\. Granting decrypt permissions ``` 
{\"Sid\": \"Enable CloudTrail log decrypt permissions\",\"Effect\": \"Allow\",\"Principal\": {\"AWS\": \"arn:aws:iam::aws-account-id:user/username\"},\"Action\": \"kms:Decrypt\",\"Resource\": \"*\",\"Condition\": {\"Null\": {\"kms:EncryptionContext:aws:cloudtrail:arn\": \"false\"}} } ```"}],"description": "Ensure CloudTrail logs are encrypted at rest using KMS CMKs","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"3.8": {"name": "3.8","checks": {"kms_cmk_rotation_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://aws.amazon.com/kms/pricing/:https://csrc.nist.gov/publications/detail/sp/800-57-part-1/rev-5/final","Description": "AWS Key Management Service (KMS) allows customers to rotate the backing key which is key material stored within the KMS which is tied to the key ID of the Customer Created customer master key (CMK). It is the backing key that is used to perform cryptographic operations such as encryption and decryption. Automated key rotation currently retains all prior backing keys so that decryption of encrypted data can take place transparently. It is recommended that CMK key rotation be enabled for symmetric keys. Key rotation can not be enabled for any asymmetric CMK.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam). 2. In the left navigation pane, choose `Customer managed keys` 3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT` 4. Underneath the `General configuration` panel open the tab `Key rotation` 5. Ensure that the checkbox `Automatically rotate this KMS key every year.` is activated 6. Repeat steps 3 - 5 for all customer managed CMKs where \"Key spec = SYMMETRIC_DEFAULT\"**From Command Line:**1. Run the following command to get a list of all keys and their associated `KeyIds````aws kms list-keys ``` 2. For each key, note the KeyId and run the following command ``` describe-key --key-id  ``` 3. If the response contains \"KeySpec = SYMMETRIC_DEFAULT\" run the following command ```aws kms get-key-rotation-status --key-id  ``` 4. Ensure `KeyRotationEnabled` is set to `true` 5. Repeat steps 2 - 4 for all remaining CMKs","ImpactStatement": "Creation, management, and storage of CMKs may require additional time from and administrator.","AssessmentStatus": "Automated","RationaleStatement": "Rotating encryption keys helps reduce the potential impact of a compromised key as data encrypted with a new key cannot be accessed with a previous key that may have been exposed. Keys should be rotated every year, or upon event that would result in the compromise of that key.","RemediationProcedure": "**From Console:**1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam](https://console.aws.amazon.com/iam). 2. In the left navigation pane, choose `Customer managed keys` . 3. Select a customer managed CMK where `Key spec = SYMMETRIC_DEFAULT` 4. Underneath the \"General configuration\" panel open the tab \"Key rotation\" 5. Check the \"Automatically rotate this KMS key every year.\" checkbox**From Command Line:**1. Run the following command to enable key rotation: ```aws kms enable-key-rotation --key-id  ```","AdditionalInformation": ""}],"description": "Ensure rotation for customer created symmetric CMKs is enabled","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.9": {"name": "3.9","checks": {"vpc_flow_logs_enabled": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html","Description": "VPC Flow Logs is a feature that enables you to capture information about the IP traffic going to and from network interfaces in your VPC. After you've created a flow log, you can view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow Logs be enabled for packet \"Rejects\" for VPCs.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if VPC Flow logs are enabled:**From Console:**1. Sign into the management console 2. Select `Services` then `VPC`3. In the left navigation pane, select `Your VPCs`4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. Ensure a Log Flow exists that has `Active` in the `Status` column.**From Command Line:**1. Run `describe-vpcs` command (OSX/Linux/UNIX) to list the VPC networks available in the current AWS region: ``` aws ec2 describe-vpcs --region  --query Vpcs[].VpcId ``` 2. The command output returns the `VpcId` available in the selected region. 3. Run `describe-flow-logs` command (OSX/Linux/UNIX) using the VPC ID to determine if the selected virtual network has the Flow Logs feature enabled: ``` aws ec2 describe-flow-logs --filter \"Name=resource-id,Values=\" ``` 4. If there are no Flow Logs created for the selected VPC, the command output will return an `empty list []`. 5. Repeat step 3 for other VPCs available in the same region. 6. Change the region by updating `--region` and repeat steps 1 - 5 for all the VPCs.","ImpactStatement": "By default, CloudWatch Logs will store Logs indefinitely unless a specific retention period is defined for the log group. When choosing the number of days to retain, keep in mind the average days it takes an organization to realize they have been breached is 210 days (at the time of this writing). Since additional time is required to research a breach, a minimum 365 day retention policy allows time for detection and research. You may also wish to archive the logs to a cheaper storage service rather than simply deleting them. See the following AWS resource to manage CloudWatch Logs retention periods:1. https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/SettingLogRetention.html","AssessmentStatus": "Automated","RationaleStatement": "VPC Flow Logs provide visibility into network traffic that traverses the VPC and can be used to detect anomalous traffic or insight during security workflows.","RemediationProcedure": "Perform the following to determine if VPC Flow logs is enabled:**From Console:**1. Sign into the management console 2. Select `Services` then `VPC`3. In the left navigation pane, select `Your VPCs`4. Select a VPC 5. In the right pane, select the `Flow Logs` tab. 6. If no Flow Log exists, click `Create Flow Log`7. For Filter, select `Reject` 8. Enter in a `Role` and `Destination Log Group`9. Click `Create Log Flow`10. Click on `CloudWatch Logs Group` **Note:** Setting the filter to \"Reject\" will dramatically reduce the logging data accumulation for this recommendation and provide sufficient information for the purposes of breach detection, research and remediation. However, during periods of least privilege security group engineering, setting this the filter to \"All\" can be very helpful in discovering existing traffic flows required for proper operation of an already running environment.**From Command Line:**1. Create a policy document and name it as `role_policy_document.json` and paste the following content: ``` {\"Version\": \"2012-10-17\",\"Statement\": [{\"Sid\": \"test\",\"Effect\": \"Allow\",\"Principal\": {\"Service\": \"ec2.amazonaws.com\"},\"Action\": \"sts:AssumeRole\"}] } ``` 2. Create another policy document and name it as `iam_policy.json` and paste the following content: ``` {\"Version\": \"2012-10-17\",\"Statement\": [{\"Effect\": \"Allow\",\"Action\":[\"logs:CreateLogGroup\",\"logs:CreateLogStream\",\"logs:DescribeLogGroups\",\"logs:DescribeLogStreams\",\"logs:PutLogEvents\",\"logs:GetLogEvents\",\"logs:FilterLogEvents\"],\"Resource\": \"*\"}] } ``` 3. Run the below command to create an IAM role: ``` aws iam create-role --role-name  --assume-role-policy-document file://role_policy_document.json``` 4. Run the below command to create an IAM policy: ``` aws iam create-policy --policy-name  --policy-document file://iam-policy.json ``` 5. Run `attach-group-policy` command using the IAM policy ARN returned at the previous step to attach the policy to the IAM role (if the command succeeds, no output is returned): ``` aws iam attach-group-policy --policy-arn arn:aws:iam:::policy/ --group-name  ``` 6. Run `describe-vpcs` to get the VpcId available in the selected region: ``` aws ec2 describe-vpcs --region  ``` 7. The command output should return the VPC Id available in the selected region. 8. Run `create-flow-logs` to create a flow log for the vpc: ``` aws ec2 create-flow-logs --resource-type VPC --resource-ids  --traffic-type REJECT --log-group-name  --deliver-logs-permission-arn  ``` 9. Repeat step 8 for other vpcs available in the selected region. 10. Change the region by updating --region and repeat remediation procedure for other vpcs.","AdditionalInformation": ""}],"description": "Ensure VPC flow logging is enabled in all VPCs","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"4.1": {"name": "4.1","checks": {"cloudwatch_log_metric_filter_unauthorized_api_calls": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://aws.amazon.com/sns/:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for unauthorized API calls.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with \"Name\":` note ``- From value associated with \"CloudWatchLogsLogGroupArn\" note Example: for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:::log-group:NewGroup:*,  would be NewGroup- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name <\"Name\" as shown in describe-trails>`Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this `` that you captured in step 1:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.errorCode = *UnauthorizedOperation) || ($.errorCode = AccessDenied*) || ($.sourceIPAddress!=delivery.logs.amazonaws.com) || ($.eventName!=HeadBucket) }\", ```4. Note the \"filterName\" `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName == `unauthorized_api_calls_metric`]\" ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "This alert may be triggered by normal read-only console activities that attempt to opportunistically gather optional information, but gracefully fail if they don't have permissions.If an excessive number of alerts are being generated then an organization may wish to consider adding read access to the limited IAM user permissions simply to quiet the alerts.In some cases doing this may allow the users to actually view some areas of the system - any additional access given should be reviewed for alignment with the original limited IAM user intent.","AssessmentStatus": "Automated","RationaleStatement": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for unauthorized API calls and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"cloudtrail_log_group_name\" --filter-name \"\" --metric-transformations metricName=unauthorized_api_calls_metric,metricNamespace=CISBenchmark,metricValue=1 --filter-pattern \"{ ($.errorCode = \"*UnauthorizedOperation\") || ($.errorCode = \"AccessDenied*\") || ($.sourceIPAddress!=\"delivery.logs.amazonaws.com\") || ($.eventName!=\"HeadBucket\") }\" ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms. **Note**: Capture the TopicArn displayed when creating the SNS Topic in Step 2.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"unauthorized_api_calls_alarm\" --metric-name \"unauthorized_api_calls_metric\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for unauthorized API calls","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.2": {"name": "4.2","checks": {"cloudwatch_log_metric_filter_sign_in_without_mfa": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/viewing_metrics_with_cloudwatch.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for console logins that are not protected by multi-factor authentication (MFA).","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all `CloudTrails`:``` aws cloudtrail describe-trails ```- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region `CloudTrail` is active``` aws cloudtrail get-trail-status --name  ```Ensure in the output that `IsLogging` is set to `TRUE`- Ensure identified Multi-region 'Cloudtrail' captures all Management Events``` aws cloudtrail get-event-selectors --trail-name  ```Ensure in the output there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }\" ```Or (To reduce false positives incase Single Sign-On (SSO) is used in organization):``` \"filterPattern\": \"{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring for single-factor console logins will increase visibility into accounts that are not protected by MFA.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Management Console sign-in without MFA and the `` taken from audit step 1.Use Command: ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") }' ```Or (To reduce false positives incase Single Sign-On (SSO) is used in organization):``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\") }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored -Filter pattern set to `{ ($.eventName = \"ConsoleLogin\") && ($.additionalEventData.MFAUsed != \"Yes\") && ($.userIdentity.type = \"IAMUser\") && ($.responseElements.ConsoleLogin = \"Success\"}` reduces false alarms raised when user logs in via SSO account."}],"description": "Ensure a log metric filter and alarm exist for Management Console sign-in without MFA","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.3": {"name": "4.3","checks": {"cloudwatch_log_metric_filter_root_usage": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for 'root' login attempts.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails:`aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring for 'root' account logins will provide visibility into the use of a fully privileged account and an opportunity to reduce the use of it.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for 'Root' account usage and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `` --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ $.userIdentity.type = \"Root\" && $.userIdentity.invokedBy NOT EXISTS && $.eventType != \"AwsServiceEvent\" }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "**Configuring log metric filter and alarm on Multi-region (global) CloudTrail**- ensures that activities from all regions (used as well as unused) are monitored- ensures that activities on all supported global services are monitored- ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for usage of 'root' account","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.4": {"name": "4.4","checks": {"cloudwatch_log_metric_filter_policy_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established changes made to Identity and Access Management (IAM) policies.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails:`aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to IAM policies will help ensure authentication and authorization controls remain intact.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for IAM policy changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name `` --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventName=DeleteGroupPolicy)||($.eventName=DeleteRolePolicy)||($.eventName=DeleteUserPolicy)||($.eventName=PutGroupPolicy)||($.eventName=PutRolePolicy)||($.eventName=PutUserPolicy)||($.eventName=CreatePolicy)||($.eventName=DeletePolicy)||($.eventName=CreatePolicyVersion)||($.eventName=DeletePolicyVersion)||($.eventName=AttachRolePolicy)||($.eventName=DetachRolePolicy)||($.eventName=AttachUserPolicy)||($.eventName=DetachUserPolicy)||($.eventName=AttachGroupPolicy)||($.eventName=DetachGroupPolicy)}' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for IAM policy changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.5": {"name": "4.5","checks": {"cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to CloudTrail's configuration will help ensure sustained visibility to activities performed in the AWS account.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for cloudtrail configuration changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateTrail) || ($.eventName = UpdateTrail) || ($.eventName = DeleteTrail) || ($.eventName = StartLogging) || ($.eventName = StopLogging) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for CloudTrail configuration changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.6": {"name": "4.6","checks": {"cloudwatch_log_metric_filter_authentication_failures": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for failed console authentication attempts.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring failed console logins may decrease lead time to detect an attempt to brute force a credential, which may provide an indicator, such as source IP, that can be used in other event correlation.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS management Console Login Failures and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = ConsoleLogin) && ($.errorMessage = \"Failed authentication\") }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for AWS Management Console authentication failures","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.7": {"name": "4.7","checks": {"cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for customer created CMKs which have changed state to disabled or scheduled deletion.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Data encrypted with disabled or deleted keys will no longer be accessible.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for disabled or scheduled for deletion CMK's and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{($.eventSource = kms.amazonaws.com) && (($.eventName=DisableKey)||($.eventName=ScheduleKeyDeletion)) }' ``` **Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ``` **Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for disabling or scheduled deletion of customer created CMKs","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.8": {"name": "4.8","checks": {"cloudwatch_log_metric_filter_for_s3_bucket_policy_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for changes to S3 bucket policies.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to S3 bucket policies may reduce time to detect and correct permissive policies on sensitive S3 buckets.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for S3 bucket policy changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = s3.amazonaws.com) && (($.eventName = PutBucketAcl) || ($.eventName = PutBucketPolicy) || ($.eventName = PutBucketCors) || ($.eventName = PutBucketLifecycle) || ($.eventName = PutBucketReplication) || ($.eventName = DeleteBucketPolicy) || ($.eventName = DeleteBucketCors) || ($.eventName = DeleteBucketLifecycle) || ($.eventName = DeleteBucketReplication)) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for S3 bucket policy changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.9": {"name": "4.9","checks": {"cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for detecting changes to CloudTrail's configurations.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to AWS Config configuration will help ensure sustained visibility of configuration items within the AWS account.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Configuration changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = config.amazonaws.com) && (($.eventName=StopConfigurationRecorder)||($.eventName=DeleteDeliveryChannel)||($.eventName=PutDeliveryChannel)||($.eventName=PutConfigurationRecorder)) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for AWS Config configuration changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"5.1": {"name": "5.1","checks": {"ec2_networkacl_allow_ingress_any_port": "FAIL","ec2_networkacl_allow_ingress_tcp_port_22": "FAIL","ec2_networkacl_allow_ingress_tcp_port_3389": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html:https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Security.html#VPC_Security_Comparison","Description": "The Network Access Control List (NACL) function provide stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.","DefaultValue": null,"AuditProcedure": "**From Console:**Perform the following to determine if the account is configured as prescribed: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL, perform the following:- Select the network ACL- Click the `Inbound Rules` tab- Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` and shows `ALLOW`**Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.","RemediationProcedure": "**From Console:**Perform the following: 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home 2. In the left pane, click `Network ACLs` 3. For each network ACL to remediate, perform the following:- Select the network ACL- Click the `Inbound Rules` tab- Click `Edit inbound rules`- Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule- Click `Save`","AdditionalInformation": ""}],"description": "Ensure no Network ACLs allow ingress from 0.0.0.0/0 to remote server administration ports","checks_status": {"fail": 3,"pass": 0,"total": 3,"manual": 0}},"5.2": {"name": "5.2","checks": {"ec2_securitygroup_allow_ingress_from_internet_to_all_ports": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "5. Networking","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#deleting-security-group-rule","Description": "Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port `22` and RDP to port `3389`.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exists that has a port range that includes port `22`, `3389`, or other remote server administration ports for your environment and has a `Source` of `0.0.0.0/0` **Note:** A Port value of `ALL` or a port range such as `0-1024` are inclusive of port `22`, `3389`, and other remote server administration ports.","ImpactStatement": "When updating an existing environment, ensure that administrators have access to remote server administration ports through another mechanism before removing access by deleting the 0.0.0.0/0 inbound rule.","AssessmentStatus": "Automated","RationaleStatement": "Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.","RemediationProcedure": "Perform the following to implement the prescribed state:1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. In the left pane, click `Security Groups`3. For each security group, perform the following: 1. Select the security group 2. Click the `Inbound Rules` tab 3. Click the `Edit inbound rules` button 4. Identify the rules to be edited or removed 5. Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click `Delete` to remove the offending inbound rule 6. Click `Save rules`","AdditionalInformation": ""}],"description": "Ensure no security groups allow ingress from 0.0.0.0/0 to remote server administration ports","checks_status": {"fail": 0,"pass": 3,"total": 3,"manual": 0}},"5.3": {"name": "5.3","checks": {"ec2_securitygroup_default_restrict_traffic": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "5. Networking","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#default-security-group","Description": "A VPC comes with a default security group whose initial settings deny all inbound traffic, allow all outbound traffic, and allow all traffic between instances assigned to the security group. If you don't specify a security group when you launch an instance, the instance is automatically assigned to this default security group. Security groups provide stateful filtering of ingress/egress network traffic to AWS resources. It is recommended that the default security group restrict all traffic.The default VPC in every region should have its default security group updated to comply. Any newly created VPCs will automatically contain a default security group that will need remediation to comply with this recommendation.**NOTE:** When implementing this recommendation, VPC flow logging is invaluable in determining the least privilege port access required by systems to work properly because it can log all packet acceptances and rejections occurring under the current security groups. This dramatically reduces the primary barrier to least privilege engineering - discovering the minimum ports required by systems in the environment. Even if the VPC flow logging recommendation in this benchmark is not adopted as a permanent security measure, it should be used during any period of discovery and engineering for least privileged security groups.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if the account is configured as prescribed:Security Group State1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Ensure no rule exist 4. Click the `Outbound Rules` tab 5. Ensure no rules existSecurity Group Members1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all default groups in all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. Copy the id of the default security group. 5. Change to the EC2 Management Console at https://console.aws.amazon.com/ec2/v2/home 6. In the filter column type 'Security Group ID : < security group id from #4 >'","ImpactStatement": "Implementing this recommendation in an existing VPC containing operating resources requires extremely careful migration planning as the default security groups are likely to be enabling many ports that are unknown. Enabling VPC flow logging (of accepts) in an existing environment that is known to be breach free will reveal the current pattern of ports being used for each instance to communicate successfully.","AssessmentStatus": "Automated","RationaleStatement": "Configuring all VPC default security groups to restrict all traffic will encourage least privilege security group development and mindful placement of AWS resources into security groups which will in-turn reduce the exposure of those resources.","RemediationProcedure": "Security Group MembersPerform the following to implement the prescribed state:1. Identify AWS resources that exist within the default security group 2. Create a set of least privilege security groups for those resources 3. Place the resources in those security groups 4. Remove the resources noted in #1 from the default security groupSecurity Group State1. Login to the AWS Management Console at [https://console.aws.amazon.com/vpc/home](https://console.aws.amazon.com/vpc/home) 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: 3. In the left pane, click `Security Groups`4. For each default security group, perform the following: 1. Select the `default` security group 2. Click the `Inbound Rules` tab 3. Remove any inbound rules 4. Click the `Outbound Rules` tab 5. Remove any Outbound rulesRecommended:IAM groups allow you to edit the \"name\" field. After remediating default groups rules for all VPCs in all regions, edit this field to add text similar to \"DO NOT USE. DO NOT ADD RULES\"","AdditionalInformation": ""}],"description": "Ensure the default security group of every VPC restricts all traffic","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"5.4": {"name": "5.4","checks": {"vpc_peering_routing_tables_with_least_privilege": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "5. Networking","References": "https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/peering-configurations-partial-access.html:https://docs.aws.amazon.com/cli/latest/reference/ec2/create-vpc-peering-connection.html","Description": "Once a VPC peering connection is established, routing tables must be updated to establish any connections between the peered VPCs. These routes can be as specific as desired - even peering a VPC to only a single host on the other side of the connection.","DefaultValue": null,"AuditProcedure": "Review routing tables of peered VPCs for whether they route all subnets of each VPC and whether that is necessary to accomplish the intended purposes for peering the VPCs.**From Command Line:**1. List all the route tables from a VPC and check if \"GatewayId\" is pointing to a __ (e.g. pcx-1a2b3c4d) and if \"DestinationCidrBlock\" is as specific as desired. ``` aws ec2 describe-route-tables --filter \"Name=vpc-id,Values=\" --query \"RouteTables[*].{RouteTableId:RouteTableId, VpcId:VpcId, Routes:Routes, AssociatedSubnets:Associations[*].SubnetId}\" ```","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Being highly selective in peering routing tables is a very effective way of minimizing the impact of breach as resources outside of these routes are inaccessible to the peered VPC.","RemediationProcedure": "Remove and add route table entries to ensure that the least number of subnets or hosts as is required to accomplish the purpose for peering are routable.**From Command Line:**1. For each __ containing routes non compliant with your routing policy (which grants more than desired \"least access\"), delete the non compliant route: ``` aws ec2 delete-route --route-table-id  --destination-cidr-block  ```2. Create a new compliant route: ``` aws ec2 create-route --route-table-id  --destination-cidr-block  --vpc-peering-connection-id  ```","AdditionalInformation": "If an organization has AWS transit gateway implemented in their VPC architecture they should look to apply the recommendation above for \"least access\" routing architecture at the AWS transit gateway level in combination with what must be implemented at the standard VPC route table. More specifically, to route traffic between two or more VPCs via a transit gateway VPCs must have an attachment to a transit gateway route table as well as a route, therefore to avoid routing traffic between VPCs an attachment to the transit gateway route table should only be added where there is an intention to route traffic between the VPCs. As transit gateways are able to host multiple route tables it is possible to group VPCs by attaching them to a common route table."}],"description": "Ensure routing tables for VPC peering are \"least access\"","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.10": {"name": "1.10","checks": {"iam_user_mfa_enabled_console_access": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://tools.ietf.org/html/rfc6238:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#enable-mfa-for-privileged-users:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html:https://blogs.aws.amazon.com/security/post/Tx2SJJYE082KBUK/How-to-Delegate-Management-of-Multi-Factor-Authentication-to-AWS-IAM-Users","Description": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if a MFA device is enabled for all IAM users having a console password:**From Console:**1. Open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left pane, select `Users`3. If the `MFA` or `Password age` columns are not visible in the table, click the gear icon at the upper right corner of the table and ensure a checkmark is next to both, then click `Close`. 4. Ensure that for each user where the `Password age` column shows a password age, the `MFA` column shows `Virtual`, `U2F Security Key`, or `Hardware`.**From Command Line:**1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their password and MFA status: ```aws iam generate-credential-report ``` ```aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,8``` 2. The output of this command will produce a table similar to the following: ```user,password_enabled,mfa_activeelise,false,falsebrandon,true,truerakesh,false,falsehelene,false,falseparas,true,trueanitha,false,false``` 3. For any column having `password_enabled` set to `true` , ensure `mfa_active` is also set to `true.`","ImpactStatement": "AWS will soon end support for SMS multi-factor authentication (MFA). New customers are not allowed to use this feature. We recommend that existing customers switch to one of the following alternative methods of MFA.","AssessmentStatus": "Automated","RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that displays a time-sensitive key and have knowledge of a credential.","RemediationProcedure": "Perform the following to enable MFA:**From Console:**1. Sign in to the AWS Management Console and open the IAM console at 'https://console.aws.amazon.com/iam/' 2. In the left pane, select `Users`. 3. In the `User Name` list, choose the name of the intended MFA user. 4. Choose the `Security Credentials` tab, and then choose `Manage MFA Device`. 5. In the `Manage MFA Device wizard`, choose `Virtual MFA` device, and then choose `Continue`. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes.6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see Virtual MFA Applications at https://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications). If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). 7. Determine whether the MFA app supports QR codes, and then do one of the following: - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code.- In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application. When you are finished, the virtual MFA device starts generating one-time passwords.8. In the `Manage MFA Device wizard`, in the `MFA Code 1 box`, type the `one-time password` that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second `one-time password` into the `MFA Code 2 box`.9. Click `Assign MFA`.","AdditionalInformation": "**Forced IAM User Self-Service Remediation**Amazon has published a pattern that forces users to self-service setup MFA before they have access to their complete permissions set. Until they complete this step, they cannot access their full permissions. This pattern can be used on new AWS accounts. It can also be used on existing accounts - it is recommended users are given instructions and a grace period to accomplish MFA enrollment before active enforcement on existing AWS accounts."}],"description": "Ensure multi-factor authentication (MFA) is enabled for all IAM users that have a console password","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.11": {"name": "1.11","checks": {"iam_user_no_setup_initial_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/cli/latest/reference/iam/delete-access-key.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html","Description": "AWS console defaults to no check boxes selected when creating a new IAM user. When cerating the IAM User credentials you have to determine what type of access they require. Programmatic access: The IAM user might need to make API calls, use the AWS CLI, or use the Tools for Windows PowerShell. In that case, create an access key (access key ID and a secret access key) for that user. AWS Management Console access: If the user needs to access the AWS Management Console, create a password for the user.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if access keys were created upon user creation and are being used and rotated as prescribed:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM`4. Click on a User where column `Password age` and `Access key age` is not set to `None` 5. Click on `Security credentials` Tab 6. Compare the user 'Creation time` to the Access Key `Created` date. 6. For any that match, the key was created during initial user setup.- Keys that were created at the same time as the user profile and do not have a last used date should be deleted. Refer to the remediation below.**From Command Line:**1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their access keys utilization: ```aws iam generate-credential-report ``` ```aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,9,11,14,16 ``` 2. The output of this command will produce a table similar to the following: ``` user,password_enabled,access_key_1_active,access_key_1_last_used_date,access_key_2_active,access_key_2_last_used_dateelise,false,true,2015-04-16T15:14:00+00:00,false,N/Abrandon,true,true,N/A,false,N/Arakesh,false,false,N/A,false,N/Ahelene,false,true,2015-11-18T17:47:00+00:00,false,N/Aparas,true,true,2016-08-28T12:04:00+00:00,true,2016-03-04T10:11:00+00:00anitha,true,true,2016-06-08T11:43:00+00:00,true,N/A``` 3. For any user having `password_enabled` set to `true` AND `access_key_last_used_date` set to `N/A` refer to the remediation below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Requiring the additional steps be taken by the user for programmatic access after their profile has been created will give a stronger indication of intent that access keys are [a] necessary for their work and [b] once the access key is established on an account that the keys may be in use somewhere in the organization.**Note**: Even if it is known the user will need access keys, require them to create the keys themselves or put in a support ticket to have them created as a separate step from user creation.","RemediationProcedure": "Perform the following to delete access keys that do not pass the audit:**From Console:**1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. As an Administrator - Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used. 7. As an IAM User- Click on the X `(Delete)` for keys that were created at the same time as the user profile but have not been used.**From Command Line:** ``` aws iam delete-access-key --access-key-id  --user-name  ```","AdditionalInformation": "Credential report does not appear to contain \"Key Creation Date\""}],"description": "Do not setup access keys during initial user setup for all IAM users that have a console password","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.12": {"name": "1.12","checks": {"iam_user_accesskey_unused": null,"iam_user_console_access_unused": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#remove-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_admin-change-user.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html","Description": "AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in 45 or greater days be deactivated or removed.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if unused credentials exist:**From Console:**1. Login to the AWS Management Console 2. Click `Services`3. Click `IAM` 4. Click on `Users` 5. Click the `Settings` (gear) icon. 6. Select `Console last sign-in`, `Access key last used`, and `Access Key Id` 7. Click on `Close`8. Check and ensure that `Console last sign-in` is less than 45 days ago.**Note** - `Never` means the user has never logged in.9. Check and ensure that `Access key age` is less than 45 days and that `Access key last used` does not say `None`If the user hasn't signed into the Console in the last 45 days or Access keys are over 45 days old refer to the remediation.**From Command Line:****Download Credential Report:**1. Run the following commands: ```aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,5,6,9,10,11,14,15,16 | grep -v '^' ```**Ensure unused credentials do not exist:**2. For each user having `password_enabled` set to `TRUE` , ensure `password_last_used_date` is less than `45` days ago.- When `password_enabled` is set to `TRUE` and `password_last_used` is set to `No_Information` , ensure `password_last_changed` is less than 45 days ago.3. For each user having an `access_key_1_active` or `access_key_2_active` to `TRUE` , ensure the corresponding `access_key_n_last_used_date` is less than `45` days ago.- When a user having an `access_key_x_active` (where x is 1 or 2) to `TRUE` and corresponding access_key_x_last_used_date is set to `N/A', ensure `access_key_x_last_rotated` is less than 45 days ago.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Disabling or removing unnecessary credentials will reduce the window of opportunity for credentials associated with a compromised or abandoned account to be used.","RemediationProcedure": "**From Console:**Perform the following to manage Unused Password (IAM user console access)1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. Select user whose `Console last sign-in` is greater than 45 days 7. Click `Security credentials` 8. In section `Sign-in credentials`, `Console password` click `Manage`9. Under Console Access select `Disable` 10.Click `Apply`Perform the following to deactivate Access Keys:1. Login to the AWS Management Console: 2. Click `Services`3. Click `IAM`4. Click on `Users`5. Click on `Security Credentials`6. Select any access keys that are over 45 days old and that have been used and - Click on `Make Inactive` 7. Select any access keys that are over 45 days old and that have not been used and - Click the X to `Delete`","AdditionalInformation": " is excluded in the audit since the root account should not be used for day to day business and would likely be unused for more than 45 days."}],"description": "Ensure credentials unused for 45 days or greater are disabled","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"1.13": {"name": "1.13","checks": {"iam_user_two_active_access_key": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html","Description": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)","DefaultValue": null,"AuditProcedure": "**From Console:**1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. Under `Access Keys` section, in the Status column, check the current status for each access key associated with the IAM user. If the selected IAM user has more than one access key activated then the users access configuration does not adhere to security best practices and the risk of accidental exposures increases. - Repeat steps no. 3 โ€“ 5 for each IAM user in your AWS account.**From Command Line:**1. Run `list-users` command to list all IAM users within your account: ``` aws iam list-users --query \"Users[*].UserName\" ``` The command output should return an array that contains all your IAM user names.2. Run `list-access-keys` command using the IAM user name list to return the current status of each access key associated with the selected IAM user: ``` aws iam list-access-keys --user-name  ``` The command output should expose the metadata `(\"Username\", \"AccessKeyId\", \"Status\", \"CreateDate\")` for each access key on that user account.3. Check the `Status` property value for each key returned to determine each keys current state. If the `Status` property value for more than one IAM access key is set to `Active`, the user access configuration does not adhere to this recommendation, refer to the remediation below.- Repeat steps no. 2 and 3 for each IAM user in your AWS account.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Access keys are long-term credentials for an IAM user or the AWS account 'root' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API. One of the best ways to protect your account is to not allow users to have multiple access keys.","RemediationProcedure": "**From Console:**1. Sign in to the AWS Management Console and navigate to IAM dashboard at `https://console.aws.amazon.com/iam/`. 2. In the left navigation panel, choose `Users`. 3. Click on the IAM user name that you want to examine. 4. On the IAM user configuration page, select `Security Credentials` tab. 5. In `Access Keys` section, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working. 6. In the same `Access Keys` section, identify your non-operational access keys (other than the chosen one) and deactivate it by clicking the `Make Inactive` link. 7. If you receive the `Change Key Status` confirmation box, click `Deactivate` to switch off the selected key. 8. Repeat steps no. 3 โ€“ 7 for each IAM user in your AWS account.**From Command Line:**1. Using the IAM user and access key information provided in the `Audit CLI`, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working.2. Run the `update-access-key` command below using the IAM user name and the non-operational access key IDs to deactivate the unnecessary key(s). Refer to the Audit section to identify the unnecessary access key ID for the selected IAM user**Note** - the command does not return any output: ``` aws iam update-access-key --access-key-id  --status Inactive --user-name  ``` 3. To confirm that the selected access key pair has been successfully `deactivated` run the `list-access-keys` audit command again for that IAM User: ``` aws iam list-access-keys --user-name  ``` - The command output should expose the metadata for each access key associated with the IAM user. If the non-operational key pair(s) `Status` is set to `Inactive`, the key has been successfully deactivated and the IAM user access configuration adheres now to this recommendation.4. Repeat steps no. 1 โ€“ 3 for each IAM user in your AWS account.","AdditionalInformation": ""}],"description": "Ensure there is only one active access key available for any single IAM user","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.14": {"name": "1.14","checks": {"iam_rotate_access_key_90_days": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#rotate-credentials:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html:https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html","Description": "Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if access keys are rotated as prescribed:**From Console:**1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click `setting` icon 4. Select `Console last sign-in` 5. Click `Close` 6. Ensure that `Access key age` is less than 90 days ago. note) `None` in the `Access key age` means the user has not used the access key.**From Command Line:**``` aws iam generate-credential-report aws iam get-credential-report --query 'Content' --output text | base64 -d ``` The `access_key_1_last_rotated` field in this file notes The date and time, in ISO 8601 date-time format, when the user's access key was created or last changed. If the user does not have an active access key, the value in this field is N/A (not applicable).","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Rotating access keys will reduce the window of opportunity for an access key that is associated with a compromised or terminated account to be used.Access keys should be rotated to ensure that data cannot be accessed with an old key which might have been lost, cracked, or stolen.","RemediationProcedure": "Perform the following to rotate access keys:**From Console:**1. Go to Management Console (https://console.aws.amazon.com/iam) 2. Click on `Users` 3. Click on `Security Credentials`4. As an Administrator - Click on `Make Inactive` for keys that have not been rotated in `90` Days 5. As an IAM User- Click on `Make Inactive` or `Delete` for keys which have not been rotated or used in `90` Days 6. Click on `Create Access Key`7. Update programmatic call with new Access Key credentials**From Command Line:**1. While the first access key is still active, create a second access key, which is active by default. Run the following command: ``` aws iam create-access-key ```At this point, the user has two active access keys.2. Update all applications and tools to use the new access key. 3. Determine whether the first access key is still in use by using this command: ``` aws iam get-access-key-last-used ``` 4. One approach is to wait several days and then check the old access key for any use before proceeding.Even if step Step 3 indicates no use of the old key, it is recommended that you do not immediately delete the first access key. Instead, change the state of the first access key to Inactive using this command: ``` aws iam update-access-key ``` 5. Use only the new access key to confirm that your applications are working. Any applications and tools that still use the original access key will stop working at this point because they no longer have access to AWS resources. If you find such an application or tool, you can switch its state back to Active to reenable the first access key. Then return to step Step 2 and update this application to use the new key.6. After you wait some period of time to ensure that all applications and tools have been updated, you can delete the first access key with this command: ``` aws iam delete-access-key ```","AdditionalInformation": ""}],"description": "Ensure access keys are rotated every 90 days or less","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.15": {"name": "1.15","checks": {"iam_policy_attached_only_to_group_or_roles": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html","Description": "IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. Only the third implementation is recommended.","DefaultValue": null,"AuditProcedure": "Perform the following to determine if an inline policy is set or a policy is directly attached to users:1. Run the following to get a list of IAM users: ```aws iam list-users --query 'Users[*].UserName' --output text``` 2. For each user returned, run the following command to determine if any policies are attached to them: ```aws iam list-attached-user-policies --user-name aws iam list-user-policies --user-name ``` 3. If any policies are returned, the user has an inline policy or direct policy attachment.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Assigning IAM policy only through groups unifies permissions management to a single, flexible layer consistent with organizational functional roles. By unifying permissions management, the likelihood of excessive permissions is reduced.","RemediationProcedure": "Perform the following to create an IAM group and assign a policy to it:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups` and then click `Create New Group` . 3. In the `Group Name` box, type the name of the group and then click `Next Step` . 4. In the list of policies, select the check box for each policy that you want to apply to all members of the group. Then click `Next Step` . 5. Click `Create Group` Perform the following to add a user to a given group:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click `Groups`3. Select the group to add a user to 4. Click `Add Users To Group`5. Select the users to be added to the group 6. Click `Add Users` Perform the following to remove a direct association between a user and policy:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the left navigation pane, click on Users 3. For each user:- Select the user- Click on the `Permissions` tab- Expand `Permissions policies` - Click `X` for each policy; then click Detach or Remove (depending on policy type)","AdditionalInformation": ""}],"description": "Ensure IAM Users Receive Permissions Only Through Groups","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.16": {"name": "1.16","checks": {"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://docs.aws.amazon.com/cli/latest/reference/iam/index.html#cli-aws-iam","Description": "IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered a standard security advice to grant _least privilege_ -that is, granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform _only_ those tasks, instead of allowing full administrative privileges.","DefaultValue": null,"AuditProcedure": "Perform the following to determine what policies are created:**From Command Line:**1. Run the following to get a list of IAM policies: ```aws iam list-policies --only-attached --output text ``` 2. For each policy returned, run the following command to determine if any policies is allowing full administrative privileges on the account: ```aws iam get-policy-version --policy-arn  --version-id  ``` 3. In output ensure policy should not have any Statement block with `\"Effect\": \"Allow\"` and `Action` set to `\"*\"` and `Resource` set to `\"*\"`","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "It's more secure to start with a minimum set of permissions and grant additional permissions as necessary, rather than starting with permissions that are too lenient and then trying to tighten them later.Providing full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions.IAM policies that have a statement with \"Effect\": \"Allow\" with \"Action\": \"\\*\" over \"Resource\": \"\\*\" should be removed.","RemediationProcedure": "**From Console:**Perform the following to detach the policy that has full administrative privileges:1. Sign in to the AWS Management Console and open the IAM console at [https://console.aws.amazon.com/iam/](https://console.aws.amazon.com/iam/). 2. In the navigation pane, click Policies and then search for the policy name found in the audit step. 3. Select the policy that needs to be deleted. 4. In the policy action menu, select first `Detach`5. Select all Users, Groups, Roles that have this policy attached 6. Click `Detach Policy`7. In the policy action menu, select `Detach` **From Command Line:**Perform the following to detach the policy that has full administrative privileges as found in the audit step:1. Lists all IAM users, groups, and roles that the specified managed policy is attached to.```aws iam list-entities-for-policy --policy-arn  ``` 2. Detach the policy from all IAM Users: ```aws iam detach-user-policy --user-name  --policy-arn  ``` 3. Detach the policy from all IAM Groups: ```aws iam detach-group-policy --group-name  --policy-arn  ``` 4. Detach the policy from all IAM Roles: ```aws iam detach-role-policy --role-name  --policy-arn  ```","AdditionalInformation": ""}],"description": "Ensure IAM policies that allow full \"*:*\" administrative privileges are not attached","checks_status": {"fail": 0,"pass": 0,"total": 2,"manual": 0}},"1.17": {"name": "1.17","checks": {"iam_support_role_created": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html:https://aws.amazon.com/premiumsupport/pricing/:https://docs.aws.amazon.com/cli/latest/reference/iam/list-policies.html:https://docs.aws.amazon.com/cli/latest/reference/iam/attach-role-policy.html:https://docs.aws.amazon.com/cli/latest/reference/iam/list-entities-for-policy.html","Description": "AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.","DefaultValue": null,"AuditProcedure": "**From Command Line:**1. List IAM policies, filter for the 'AWSSupportAccess' managed policy, and note the \"Arn\" element value: ``` aws iam list-policies --query \"Policies[?PolicyName == 'AWSSupportAccess']\" ``` 2. Check if the 'AWSSupportAccess' policy is attached to any role:``` aws iam list-entities-for-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess ```3. In Output, Ensure `PolicyRoles` does not return empty. 'Example: Example: PolicyRoles: [ ]'If it returns empty refer to the remediation below.","ImpactStatement": "All AWS Support plans include an unlimited number of account and billing support cases, with no long-term contracts. Support billing calculations are performed on a per-account basis for all plans. Enterprise Support plan customers have the option to include multiple enabled accounts in an aggregated monthly billing calculation. Monthly charges for the Business and Enterprise support plans are based on each month's AWS usage charges, subject to a monthly minimum, billed in advance.","AssessmentStatus": "Automated","RationaleStatement": "By implementing least privilege for access control, an IAM Role will require an appropriate IAM Policy to allow Support Center Access in order to manage Incidents with AWS Support.","RemediationProcedure": "**From Command Line:**1. Create an IAM role for managing incidents with AWS:- Create a trust relationship policy document that allows  to manage AWS incidents, and save it locally as /tmp/TrustPolicy.json: ```{\"Version\": \"2012-10-17\",\"Statement\": [{\"Effect\": \"Allow\",\"Principal\": {\"AWS\": \"\"},\"Action\": \"sts:AssumeRole\"}]} ``` 2. Create the IAM role using the above trust policy: ``` aws iam create-role --role-name  --assume-role-policy-document file:///tmp/TrustPolicy.json ``` 3. Attach 'AWSSupportAccess' managed policy to the created IAM role: ``` aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess --role-name  ```","AdditionalInformation": "AWSSupportAccess policy is a global AWS resource. It has same ARN as `arn:aws:iam::aws:policy/AWSSupportAccess` for every account."}],"description": "Ensure a support role has been created to manage incidents with AWS Support","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.18": {"name": "1.18","checks": {"ec2_instance_profile_attached": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html","Description": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. \"AWS Access\" means accessing the APIs of AWS in order to access AWS resources or manage AWS account resources.","DefaultValue": null,"AuditProcedure": "Where an instance is associated with a Role:For instances that are known to perform AWS actions, ensure that they belong to an instance role that has the necessary permissions:1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) 2. Open the EC2 Dashboard and choose \"Instances\" 3. Click the EC2 instance that performs AWS actions, in the lower pane details find \"IAM Role\" 4. If the Role is blank, the instance is not assigned to one. 5. If the Role is filled in, it does not mean the instance might not \\*also\\* have credentials encoded on it for some activities.Where an Instance Contains Embedded Credentials:- On the instance that is known to perform AWS actions, audit all scripts and environment variables to ensure that none of them contain AWS credentials.Where an Instance Application Contains Embedded Credentials:- Applications that run on an instance may also have credentials embedded. This is a bad practice, but even worse if the source code is stored in a public code repository such as github. When an application contains credentials can be determined by eliminating all other sources of credentials and if the application can still access AWS resources - it likely contains embedded credentials. Another method is to examine all source code and configuration files of the application.","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "AWS IAM roles reduce the risks associated with sharing and rotating credentials that can be used outside of AWS itself. If credentials are compromised, they can be used from outside of the AWS account they give access to. In contrast, in order to leverage role permissions an attacker would need to gain and maintain access to a specific instance to use the privileges associated with it.Additionally, if credentials are encoded into compiled applications or other hard to change mechanisms, then they are even more unlikely to be properly rotated due to service disruption risks. As time goes on, credentials that cannot be rotated are more likely to be known by an increasing number of individuals who no longer work for the organization owning the credentials.","RemediationProcedure": "IAM roles can only be associated at the launch of an instance. To remediate an instance to add it to a role you must create a new instance.If the instance has no external dependencies on its current private ip or public addresses are elastic IPs:1. In AWS IAM create a new role. Assign a permissions policy if needed permissions are already known. 2. In the AWS console launch a new instance with identical settings to the existing instance, and ensure that the newly created role is selected. 3. Shutdown both the existing instance and the new instance. 4. Detach disks from both instances. 5. Attach the existing instance disks to the new instance. 6. Boot the new instance and you should have the same machine, but with the associated role.**Note:** if your environment has dependencies on a dynamically assigned PRIVATE IP address you can create an AMI from the existing instance, destroy the old one and then when launching from the AMI, manually assign the previous private IP address.**Note: **if your environment has dependencies on a dynamically assigned PUBLIC IP address there is not a way ensure the address is retained and assign an instance role. Dependencies on dynamically assigned public IP addresses are a bad practice and, if possible, you may wish to rebuild the instance with a new elastic IP address and make the investment to remediate affected systems while assigning the system to a role.","AdditionalInformation": ""}],"description": "Ensure IAM instance roles are used for AWS resource access from instances","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.19": {"name": "1.19","checks": {"iam_no_expired_server_certificates_stored": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html:https://docs.aws.amazon.com/cli/latest/reference/iam/delete-server-certificate.html","Description": "To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates.Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.","DefaultValue": null,"AuditProcedure": "**From Console:**Getting the certificates expiration information via AWS Management Console is not currently supported.To request information about the SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).**From Command Line:**Run list-server-certificates command to list all the IAM-stored server certificates:``` aws iam list-server-certificates ```The command output should return an array that contains all the SSL/TLS certificates currently stored in IAM and their metadata (name, ID, expiration date, etc):``` {\"ServerCertificateMetadataList\": [{\"ServerCertificateId\": \"EHDGFRW7EJFYTE88D\",\"ServerCertificateName\": \"MyServerCertificate\",\"Expiration\": \"2018-07-10T23:59:59Z\",\"Path\": \"/\",\"Arn\": \"arn:aws:iam::012345678910:server-certificate/MySSLCertificate\",\"UploadDate\": \"2018-06-10T11:56:08Z\"}] } ```Verify the `ServerCertificateName` and `Expiration` parameter value (expiration date) for each SSL/TLS certificate returned by the list-server-certificates command and determine if there are any expired server certificates currently stored in AWS IAM. If so, use the AWS API to remove them.If this command returns: ``` { { \"ServerCertificateMetadataList\": [] } ``` This means that there are no expired certificates, It DOES NOT mean that no certificates exist.","ImpactStatement": "Deleting the certificate could have implications for your application if you are using an expired server certificate with Elastic Load Balancing, CloudFront, etc. One has to make configurations at respective services to ensure there is no interruption in application functionality.","AssessmentStatus": "Automated","RationaleStatement": "Removing expired SSL/TLS certificates eliminates the risk that an invalid certificate will be deployed accidentally to a resource such as AWS Elastic Load Balancer (ELB), which can damage the credibility of the application/website behind the ELB. As a best practice, it is recommended to delete expired certificates.","RemediationProcedure": "**From Console:**Removing expired certificates via AWS Management Console is not currently supported. To delete SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI).**From Command Line:**To delete Expired Certificate run following command by replacing  with the name of the certificate to delete:``` aws iam delete-server-certificate --server-certificate-name  ```When the preceding command is successful, it does not return any output.","AdditionalInformation": ""}],"description": "Ensure that all the expired SSL/TLS certificates stored in AWS IAM are removed","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"1.20": {"name": "1.20","checks": {"accessanalyzer_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "1. Identity and Access Management","References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html:https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/get-analyzer.html:https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/create-analyzer.html","Description": "Enable IAM Access analyzer for IAM policies about all resources in each region.IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access. Access Analyzer analyzes only policies that are applied to resources in the same AWS Region.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Open the IAM console at `https://console.aws.amazon.com/iam/` 2. Choose `Access analyzer` 3. Click 'Analyzers' 4. Ensure that at least one analyzer is present 5. Ensure that the `STATUS` is set to `Active` 6. Repeat these step for each active region**From Command Line:**1. Run the following command: ``` aws accessanalyzer list-analyzers | grep status ``` 2. Ensure that at least one Analyzer the `status` is set to `ACTIVE`3. Repeat the steps above for each active region.If an Access analyzer is not listed for each region or the status is not set to active refer to the remediation procedure below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data. Access Analyzer identifies resources that are shared with external principals by using logic-based reasoning to analyze the resource-based policies in your AWS environment. IAM Access Analyzer continuously monitors all policies for S3 bucket, IAM roles, KMS(Key Management Service) keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues.","RemediationProcedure": "**From Console:**Perform the following to enable IAM Access analyzer for IAM policies:1. Open the IAM console at `https://console.aws.amazon.com/iam/.` 2. Choose `Access analyzer`. 3. Choose `Create analyzer`. 4. On the `Create analyzer` page, confirm that the `Region` displayed is the Region where you want to enable Access Analyzer. 5. Enter a name for the analyzer. `Optional as it will generate a name for you automatically`. 6. Add any tags that you want to apply to the analyzer. `Optional`.7. Choose `Create Analyzer`. 8. Repeat these step for each active region**From Command Line:**Run the following command: ``` aws accessanalyzer create-analyzer --analyzer-name  --type  ``` Repeat this command above for each active region.**Note:** The IAM Access Analyzer is successfully configured only when the account you use has the necessary permissions.","AdditionalInformation": ""}],"description": "Ensure that IAM Access analyzer is enabled for all regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"1.21": {"name": "1.21","checks": {"iam_check_saml_providers_sts": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "1. Identity and Access Management","References": "","Description": "In multi-account environments, IAM user centralization facilitates greater user control. User access beyond the initial account is then provided via role assumption. Centralization of users can be accomplished through federation with an external identity provider or through the use of AWS Organizations.","DefaultValue": null,"AuditProcedure": "For multi-account AWS environments with an external identity provider... 1. Determine the master account for identity federation or IAM user management 2. Login to that account through the AWS Management Console 3. Click `Services`4. Click `IAM`5. Click `Identity providers` 6. Verify the configurationThen..., determine all accounts that should not have local users present. For each account...1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click `Services`5. Click `IAM`6. Click `Users` 7. Confirm that no IAM users representing individuals are presentFor multi-account AWS environments implementing AWS Organizations without an external identity provider... 1. Determine all accounts that should not have local users present 2. Log into the AWS Management Console 3. Switch role into each identified account 4. Click `Services`5. Click `IAM`6. Click `Users` 7. Confirm that no IAM users representing individuals are present","ImpactStatement": "","AssessmentStatus": "Manual","RationaleStatement": "Centralizing IAM user management to a single identity store reduces complexity and thus the likelihood of access management errors.","RemediationProcedure": "The remediation procedure will vary based on the individual organization's implementation of identity federation and/or AWS Organizations with the acceptance criteria that no non-service IAM users, and non-root accounts, are present outside the account providing centralized IAM user management.","AdditionalInformation": ""}],"description": "Ensure IAM users are managed centrally via identity federation or AWS Organizations for multi-account environments","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.10": {"name": "3.10","checks": {"cloudtrail_s3_dataevents_write_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html","Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to CloudTrail dashboard at `https://console.aws.amazon.com/cloudtrail/` 2. In the left panel, click `Trails` and then click on the CloudTrail Name that you want to examine. 3. Review `General details` 4. Confirm that `Multi-region trail` is set to `Yes` 5. Scroll down to `Data events` 6. Confirm that it reads: Data events: S3 Bucket Name: All current and future S3 buckets Read: Enabled Write: Enabled 7. Repeat steps 2 to 6 to verify that Multi-region trail and Data events logging of S3 buckets in CloudTrail. If the CloudTrails do not have multi-region and data events configured for S3 refer to the remediation below.**From Command Line:**1. Run `list-trails` command to list the names of all Amazon CloudTrail trails currently available in all AWS regions: ``` aws cloudtrail list-trails ``` 2. The command output will be a list of all the trail names to include. \"TrailARN\": \"arn:aws:cloudtrail:::trail/\", \"Name\": \"\", \"HomeRegion\": \"\" 3. Next run 'get-trail- command to determine Multi-region. ``` aws cloudtrail get-trail --name  --region  ``` 4. The command output should include: \"IsMultiRegionTrail\": true, 5. Next run `get-event-selectors` command using the `Name` of the trail and the `region` returned in step 2 to determine if Data events logging feature is enabled within the selected CloudTrail trail for all S3 buckets: ``` aws cloudtrail get-event-selectors --region  --trail-name  --query EventSelectors[*].DataResources[] ``` 6. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. \"Type\": \"AWS::S3::Object\",\"Values\": [\"arn:aws:s3\" 7. If the `get-event-selectors` command returns an empty array '[]', the Data events are not included in the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. 8. Repeat steps 1 to 5 for auditing each CloudTrail to determine if Data events for S3 are covered. If Multi-region is not set to true and the Data events does not show S3 defined as shown refer to the remediation procedure below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity within your S3 Buckets using Amazon CloudWatch Events.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, check the `Write` event checkbox, so that `object-level` logging for Write events is enabled. 6. Repeat steps 2 to 5 to enable object-level logging of write events for other S3 buckets.**From Command Line:**1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier: ``` aws cloudtrail put-event-selectors --region  --trail-name  --event-selectors '[{ \"ReadWriteType\": \"WriteOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::/\"] }] }]' ``` 2. The command output will be `object-level` event trail configuration. 3. If you want to enable it for all buckets at once then change Values parameter to `[\"arn:aws:s3\"]` in command given above. 4. Repeat step 1 for each s3 bucket to update `object-level` logging of write events. 5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.","AdditionalInformation": ""}],"description": "Ensure that Object-level logging for write events is enabled for S3 bucket","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"3.11": {"name": "3.11","checks": {"cloudtrail_s3_dataevents_read_enabled": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "3. Logging","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html","Description": "S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don't log data events and so it is recommended to enable Object-level logging for S3 buckets.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. If the current status for `Object-level` logging is set to `Disabled`, then object-level logging of read events for the selected s3 bucket is not set. 5. If the current status for `Object-level` logging is set to `Enabled`, but the Read event check-box is unchecked, then object-level logging of read events for the selected s3 bucket is not set. 6. Repeat steps 2 to 5 to verify `object-level` logging for `read` events of your other S3 buckets.**From Command Line:** 1. Run `describe-trails` command to list the names of all Amazon CloudTrail trails currently available in the selected AWS region: ``` aws cloudtrail describe-trails --region  --output table --query trailList[*].Name ``` 2. The command output will be table of the requested trail names. 3. Run `get-event-selectors` command using the name of the trail returned at the previous step and custom query filters to determine if Data events logging feature is enabled within the selected CloudTrail trail configuration for s3 bucket resources: ``` aws cloudtrail get-event-selectors --region  --trail-name  --query EventSelectors[*].DataResources[] ``` 4. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. 5. If the `get-event-selectors` command returns an empty array, the Data events are not included into the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. 6. Repeat steps 1 to 5 for auditing each s3 bucket to identify other trails that are missing the capability to log Data events. 7. Change the AWS region by updating the `--region` command parameter and perform the audit process for other regions.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity using Amazon CloudWatch Events.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and navigate to S3 dashboard at `https://console.aws.amazon.com/s3/` 2. In the left navigation panel, click `buckets` and then click on the S3 Bucket Name that you want to examine. 3. Click `Properties` tab to see in detail bucket configuration. 4. Click on the `Object-level` logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link `https://console.aws.amazon.com/cloudtrail/` 5. Once the Cloudtrail is selected, check the Read event checkbox, so that `object-level` logging for `Read` events is enabled. 6. Repeat steps 2 to 5 to enable `object-level` logging of read events for other S3 buckets.**From Command Line:** 1. To enable `object-level` data events logging for S3 buckets within your AWS account, run `put-event-selectors` command using the name of the trail that you want to reconfigure as identifier: ``` aws cloudtrail put-event-selectors --region  --trail-name  --event-selectors '[{ \"ReadWriteType\": \"ReadOnly\", \"IncludeManagementEvents\":true, \"DataResources\": [{ \"Type\": \"AWS::S3::Object\", \"Values\": [\"arn:aws:s3:::/\"] }] }]' ``` 2. The command output will be `object-level` event trail configuration. 3. If you want to enable it for all buckets at ones then change Values parameter to `[\"arn:aws:s3\"]` in command given above. 4. Repeat step 1 for each s3 bucket to update `object-level` logging of read events. 5. Change the AWS region by updating the `--region` command parameter and perform the process for other regions.","AdditionalInformation": ""}],"description": "Ensure that Object-level logging for read events is enabled for S3 bucket","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.10": {"name": "4.10","checks": {"cloudwatch_log_metric_filter_security_group_changes": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Security Groups are a stateful packet filter that controls ingress and egress traffic within a VPC. It is recommended that a metric filter and alarm be established for detecting changes to Security Groups.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query \"MetricAlarms[?MetricName== '']\" ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to security group will help ensure that resources and services are not unintentionally exposed.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for security groups changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name \"\" --filter-name \"\" --metric-transformations metricName= \"\" ,metricNamespace=\"CISBenchmark\",metricValue=1 --filter-pattern \"{ ($.eventName = AuthorizeSecurityGroupIngress) || ($.eventName = AuthorizeSecurityGroupEgress) || ($.eventName = RevokeSecurityGroupIngress) || ($.eventName = RevokeSecurityGroupEgress) || ($.eventName = CreateSecurityGroup) || ($.eventName = DeleteSecurityGroup) }\" ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name \"\" ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn \"\" --protocol  --notification-endpoint \"\" ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name \"\" --metric-name \"\" --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace \"CISBenchmark\" --alarm-actions \"\" ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for security group changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.11": {"name": "4.11","checks": {"cloudwatch_changes_to_network_acls_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets within a VPC. It is recommended that a metric filter and alarm be established for changes made to NACLs.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to NACLs will help ensure that AWS resources and services are not unintentionally exposed.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for NACL changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateNetworkAcl) || ($.eventName = CreateNetworkAclEntry) || ($.eventName = DeleteNetworkAcl) || ($.eventName = DeleteNetworkAclEntry) || ($.eventName = ReplaceNetworkAclEntry) || ($.eventName = ReplaceNetworkAclAssociation) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for changes to Network Access Control Lists (NACL)","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.12": {"name": "4.12","checks": {"cloudwatch_changes_to_network_gateways_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Network gateways are required to send/receive traffic to a destination outside of a VPC. It is recommended that a metric filter and alarm be established for changes to network gateways.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``: ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }\" ``` 4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4. ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. ``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to network gateways will help ensure that all ingress/egress traffic traverses the VPC border via a controlled path.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for network gateways changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateCustomerGateway) || ($.eventName = DeleteCustomerGateway) || ($.eventName = AttachInternetGateway) || ($.eventName = CreateInternetGateway) || ($.eventName = DeleteInternetGateway) || ($.eventName = DetachInternetGateway) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for changes to network gateways","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.13": {"name": "4.13","checks": {"cloudwatch_changes_to_network_route_tables_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. Routing tables are used to route network traffic between subnets and to network gateways. It is recommended that a metric filter and alarm be established for changes to route tables.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to route tables will help ensure that all VPC traffic flows through an expected path.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for route table changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateRoute) || ($.eventName = CreateRouteTable) || ($.eventName = ReplaceRoute) || ($.eventName = ReplaceRouteTableAssociation) || ($.eventName = DeleteRouteTable) || ($.eventName = DeleteRoute) || ($.eventName = DisassociateRouteTable) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for route table changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.14": {"name": "4.14","checks": {"cloudwatch_changes_to_vpcs_alarm_configured": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html:https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/sns/latest/dg/SubscribeTopic.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is possible to have more than 1 VPC within an account, in addition it is also possible to create a peer connection between 2 VPCs enabling network traffic to route between VPCs. It is recommended that a metric filter and alarm be established for changes made to VPCs.","DefaultValue": null,"AuditProcedure": "Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured:1. Identify the log group name configured for use with active multi-region CloudTrail:- List all CloudTrails: `aws cloudtrail describe-trails`- Identify Multi region Cloudtrails: `Trails with \"IsMultiRegionTrail\" set to true`- From value associated with CloudWatchLogsLogGroupArn note ``Example: for CloudWatchLogsLogGroupArn that looks like `arn:aws:logs:::log-group:NewGroup:*`, `` would be `NewGroup`- Ensure Identified Multi region CloudTrail is active`aws cloudtrail get-trail-status --name `ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events`aws cloudtrail get-event-selectors --trail-name `Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to `true` and `ReadWriteType` set to `All`2. Get a list of all associated metric filters for this ``:``` aws logs describe-metric-filters --log-group-name \"\" ```3. Ensure the output from the above command contains the following:``` \"filterPattern\": \"{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }\" ```4. Note the `` value associated with the `filterPattern` found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4.``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ```6. Note the `AlarmActions` value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN.``` Example of valid \"SubscriptionArn\": \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring changes to VPC will help ensure VPC traffic flow is not getting impacted.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for VPC changes and the `` taken from audit step 1. ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventName = CreateVpc) || ($.eventName = DeleteVpc) || ($.eventName = ModifyVpcAttribute) || ($.eventName = AcceptVpcPeeringConnection) || ($.eventName = CreateVpcPeeringConnection) || ($.eventName = DeleteVpcPeeringConnection) || ($.eventName = RejectVpcPeeringConnection) || ($.eventName = AttachClassicLinkVpc) || ($.eventName = DetachClassicLinkVpc) || ($.eventName = DisableVpcClassicLink) || ($.eventName = EnableVpcClassicLink) }' ```**Note**: You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify ``` aws sns create-topic --name  ```**Note**: you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2 ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ```**Note**: you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2 ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": "Configuring log metric filter and alarm on Multi-region (global) CloudTrail - ensures that activities from all regions (used as well as unused) are monitored - ensures that activities on all supported global services are monitored - ensures that all management events across all regions are monitored"}],"description": "Ensure a log metric filter and alarm exist for VPC changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"4.15": {"name": "4.15","checks": {"cloudwatch_log_metric_filter_aws_organizations_changes": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "4. Monitoring","References": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html:https://docs.aws.amazon.com/organizations/latest/userguide/orgs_security_incident-response.html","Description": "Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms. It is recommended that a metric filter and alarm be established for AWS Organizations changes made in the master AWS Account.","DefaultValue": null,"AuditProcedure": "1. Perform the following to ensure that there is at least one active multi-region CloudTrail with prescribed metric filters and alarms configured: - Identify the log group name configured for use with active multi-region CloudTrail: - List all CloudTrails:``` aws cloudtrail describe-trails ``` - Identify Multi region Cloudtrails, Trails with `\"IsMultiRegionTrail\"` set to true - From value associated with CloudWatchLogsLogGroupArn note **Example:** for CloudWatchLogsLogGroupArn that looks like arn:aws:logs:::log-group:NewGroup:*,  would be NewGroup- Ensure Identified Multi region CloudTrail is active: ``` aws cloudtrail get-trail-status --name  ``` Ensure `IsLogging` is set to `TRUE`- Ensure identified Multi-region Cloudtrail captures all Management Events: ``` aws cloudtrail get-event-selectors --trail-name  ``` - Ensure there is at least one Event Selector for a Trail with `IncludeManagementEvents` set to true and `ReadWriteType` set to `All`.2. Get a list of all associated metric filters for this : ``` aws logs describe-metric-filters --log-group-name \"\" ``` 3. Ensure the output from the above command contains the following: ``` \"filterPattern\": \"{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }\" ``` 4. Note the `` value associated with the filterPattern found in step 3.5. Get a list of CloudWatch alarms and filter on the `` captured in step 4: ``` aws cloudwatch describe-alarms --query 'MetricAlarms[?MetricName== ``]' ``` 6. Note the AlarmActions value - this will provide the SNS topic ARN value.7. Ensure there is at least one active subscriber to the SNS topic: ``` aws sns list-subscriptions-by-topic --topic-arn ``` at least one subscription should have \"SubscriptionArn\" with valid aws ARN. Example of valid \"SubscriptionArn\":``` \"arn:aws:sns::::\" ```","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Monitoring AWS Organizations changes can help you prevent any unwanted, accidental or intentional modifications that may lead to unauthorized access or other security breaches. This monitoring technique helps you to ensure that any unexpected changes performed within your AWS Organizations can be investigated and any unwanted changes can be rolled back.","RemediationProcedure": "Perform the following to setup the metric filter, alarm, SNS topic, and subscription:1. Create a metric filter based on filter pattern provided which checks for AWS Organizations changes and the `` taken from audit step 1: ``` aws logs put-metric-filter --log-group-name  --filter-name `` --metric-transformations metricName= `` ,metricNamespace='CISBenchmark',metricValue=1 --filter-pattern '{ ($.eventSource = organizations.amazonaws.com) && (($.eventName = \"AcceptHandshake\") || ($.eventName = \"AttachPolicy\") || ($.eventName = \"CreateAccount\") || ($.eventName = \"CreateOrganizationalUnit\") || ($.eventName = \"CreatePolicy\") || ($.eventName = \"DeclineHandshake\") || ($.eventName = \"DeleteOrganization\") || ($.eventName = \"DeleteOrganizationalUnit\") || ($.eventName = \"DeletePolicy\") || ($.eventName = \"DetachPolicy\") || ($.eventName = \"DisablePolicyType\") || ($.eventName = \"EnablePolicyType\") || ($.eventName = \"InviteAccountToOrganization\") || ($.eventName = \"LeaveOrganization\") || ($.eventName = \"MoveAccount\") || ($.eventName = \"RemoveAccountFromOrganization\") || ($.eventName = \"UpdatePolicy\") || ($.eventName = \"UpdateOrganizationalUnit\")) }' ``` **Note:** You can choose your own metricName and metricNamespace strings. Using the same metricNamespace for all Foundations Benchmark metrics will group them together.2. Create an SNS topic that the alarm will notify: ``` aws sns create-topic --name  ``` **Note:** you can execute this command once and then re-use the same topic for all monitoring alarms.3. Create an SNS subscription to the topic created in step 2: ``` aws sns subscribe --topic-arn  --protocol  --notification-endpoint  ``` **Note:** you can execute this command once and then re-use the SNS subscription for all monitoring alarms.4. Create an alarm that is associated with the CloudWatch Logs Metric Filter created in step 1 and an SNS topic created in step 2: ``` aws cloudwatch put-metric-alarm --alarm-name `` --metric-name `` --statistic Sum --period 300 --threshold 1 --comparison-operator GreaterThanOrEqualToThreshold --evaluation-periods 1 --namespace 'CISBenchmark' --alarm-actions  ```","AdditionalInformation": ""}],"description": "Ensure a log metric filter and alarm exists for AWS Organizations changes","checks_status": {"fail": 0,"pass": 0,"total": 1,"manual": 0}},"2.1.1": {"name": "2.1.1","checks": {"s3_bucket_default_encryption": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "2.1. Simple Storage Service (S3)","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/default-bucket-encryption.html:https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-related-resources","Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select a Bucket. 3. Click on 'Properties'. 4. Verify that `Default Encryption` is enabled, and displays either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`. 5. Repeat for all the buckets in your AWS account.**From Command Line:**1. Run command to list buckets ``` aws s3 ls ``` 2. For each bucket, run``` aws s3api get-bucket-encryption --bucket  ``` 3. Verify that either``` \"SSEAlgorithm\": \"AES256\" ```or``` \"SSEAlgorithm\": \"aws:kms\"```is displayed.","ImpactStatement": "Amazon S3 buckets with default bucket encryption using SSE-KMS cannot be used as destination buckets for Amazon S3 server access logging. Only SSE-S3 default encryption is supported for server access log destination buckets.","AssessmentStatus": "Automated","RationaleStatement": "Encrypting data at rest reduces the likelihood that it is unintentionally exposed and can nullify the impact of disclosure if the encryption remains unbroken.","RemediationProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select a Bucket. 3. Click on 'Properties'. 4. Click edit on `Default Encryption`. 5. Select either `AES-256`, `AWS-KMS`, `SSE-KMS` or `SSE-S3`. 6. Click `Save` 7. Repeat for all the buckets in your AWS account lacking encryption.**From Command Line:**Run either``` aws s3api put-bucket-encryption --bucket  --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"AES256\"}}]}' ```or``` aws s3api put-bucket-encryption --bucket  --server-side-encryption-configuration '{\"Rules\": [{\"ApplyServerSideEncryptionByDefault\": {\"SSEAlgorithm\": \"aws:kms\",\"KMSMasterKeyID\": \"aws/s3\"}}]}' ```**Note:** the KMSMasterKeyID can be set to the master key of your choosing; aws/s3 is an AWS preconfigured default.","AdditionalInformation": "S3 bucket encryption only applies to objects as they are placed in the bucket. Enabling S3 bucket encryption does **not** encrypt objects previously stored within the bucket."}],"description": "Ensure all S3 buckets employ encryption-at-rest","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.1.2": {"name": "2.1.2","checks": {"s3_bucket_secure_transport_policy": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 2","Section": "2.1. Simple Storage Service (S3)","References": "https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-policy-for-config-rule/:https://aws.amazon.com/blogs/security/how-to-use-bucket-policies-and-apply-defense-in-depth-to-help-secure-your-amazon-s3-data/:https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/get-bucket-policy.html","Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.","DefaultValue": null,"AuditProcedure": "To allow access to HTTPS you can use a condition that checks for the key `\"aws:SecureTransport: true\"`. This means that the request is sent through HTTPS but that HTTP can still be used. So to make sure you do not allow HTTP access confirm that there is a bucket policy that explicitly denies access for HTTP requests and that it contains the key \"aws:SecureTransport\": \"false\".**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions', then Click on `Bucket Policy`. 4. Ensure that a policy is listed that matches: ``` '{\"Sid\": ,\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}' ``` `` and `` will be specific to your account5. Repeat for all the buckets in your AWS account.**From Command Line:**1. List all of the S3 Buckets``` aws s3 ls ``` 2. Using the list of buckets run this command on each of them: ``` aws s3api get-bucket-policy --bucket  | grep aws:SecureTransport ``` 3. Confirm that `aws:SecureTransport` is set to false `aws:SecureTransport:false` 4. Confirm that the policy line has Effect set to Deny 'Effect:Deny'","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "By default, Amazon S3 allows both HTTP and HTTPS requests. To achieve only allowing access to Amazon S3 objects through HTTPS you also have to explicitly deny access to HTTP requests. Bucket policies that allow HTTPS requests without explicitly denying HTTP requests will not comply with this recommendation.","RemediationProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ 2. Select the Check box next to the Bucket. 3. Click on 'Permissions'. 4. Click 'Bucket Policy' 5. Add this to the existing policy filling in the required information ``` {\"Sid\": \",\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}}} ``` 6. Save 7. Repeat for all the buckets in your AWS account that contain sensitive data.**From Console** using AWS Policy Generator:1. Repeat steps 1-4 above. 2. Click on `Policy Generator` at the bottom of the Bucket Policy Editor 3. Select Policy Type `S3 Bucket Policy` 4. Add Statements - `Effect` = Deny - `Principal` = * - `AWS Service` = Amazon S3 - `Actions` = * - `Amazon Resource Name` =  5. Generate Policy 6. Copy the text and add it to the Bucket Policy.**From Command Line:**1. Export the bucket policy to a json file. ``` aws s3api get-bucket-policy --bucket  --query Policy --output text > policy.json ```2. Modify the policy.json file by adding in this statement: ``` {\"Sid\": \",\"Effect\": \"Deny\",\"Principal\": \"*\",\"Action\": \"s3:*\",\"Resource\": \"arn:aws:s3:::/*\",\"Condition\": {\"Bool\": {\"aws:SecureTransport\": \"false\"}}} ``` 3. Apply this modified policy back to the S3 bucket: ``` aws s3api put-bucket-policy --bucket  --policy file://policy.json ```","AdditionalInformation": ""}],"description": "Ensure S3 Bucket Policy is set to deny HTTP requests","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"2.1.3": {"name": "2.1.3","checks": {"s3_bucket_no_mfa_delete": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "2.1. Simple Storage Service (S3)","References": "https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete:https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html:https://aws.amazon.com/blogs/security/securing-access-to-aws-using-mfa-part-3/:https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_lost-or-broken.html","Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.","DefaultValue": null,"AuditProcedure": "Perform the steps below to confirm MFA delete is configured on an S3 Bucket**From Console:**1. Login to the S3 console at `https://console.aws.amazon.com/s3/`2. Click the `Check` box next to the Bucket name you want to confirm3. In the window under `Properties`4. Confirm that Versioning is `Enabled`5. Confirm that MFA Delete is `Enabled`**From Command Line:**1. Run the `get-bucket-versioning` ``` aws s3api get-bucket-versioning --bucket my-bucket ```Output example: ```  EnabledEnabled ```If the Console or the CLI output does not show Versioning and MFA Delete `enabled` refer to the remediation below.","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Adding MFA delete to an S3 bucket, requires additional authentication when you change the version state of your bucket or you delete and object version adding another layer of security in the event your security credentials are compromised or unauthorized access is granted.","RemediationProcedure": "Perform the steps below to enable MFA delete on an S3 bucket.Note: -You cannot enable MFA Delete using the AWS Management Console. You must use the AWS CLI or API. -You must use your 'root' account to enable MFA Delete on S3 buckets.**From Command line:**1. Run the s3api put-bucket-versioning command``` aws s3api put-bucket-versioning --profile my-root-profile --bucket Bucket_Name --versioning-configuration Status=Enabled,MFADelete=Enabled --mfa โ€œarn:aws:iam::aws_account_id:mfa/root-account-mfa-device passcodeโ€ ```","AdditionalInformation": ""}],"description": "Ensure MFA Delete is enabled on S3 buckets","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}},"2.1.4": {"name": "2.1.4","checks": {"macie_is_enabled": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 2","Section": "2.1. Simple Storage Service (S3)","References": "https://aws.amazon.com/macie/getting-started/:https://docs.aws.amazon.com/workspaces/latest/adminguide/data-protection.html:https://docs.aws.amazon.com/macie/latest/user/data-classification.html","Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.","DefaultValue": null,"AuditProcedure": "Perform the following steps to determine if Macie is running:**From Console:** 1. Login to the Macie console at https://console.aws.amazon.com/macie/ 2. In the left hand pane click on By job under findings. 3. Confirm that you have a Job setup for your S3 BucketsWhen you log into the Macie console if you aren't taken to the summary page and you don't have a job setup and running then refer to the remediation procedure below.If you are using a 3rd Party tool to manage and protect your s3 data you meet this recommendation.","ImpactStatement": "There is a cost associated with using Amazon Macie. There is also typically a cost associated with 3rd Party tools that perform similar processes and protection.","AssessmentStatus": "Manual","RationaleStatement": "Using a Cloud service or 3rd Party software to continuously monitor and automate the process of data discovery and classification for S3 buckets using machine learning and pattern matching is a strong defense in protecting that information.Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to discover and protect your sensitive data in AWS.","RemediationProcedure": "Perform the steps below to enable and configure Amazon Macie**From Console:**1. Log on to the Macie console at `https://console.aws.amazon.com/macie/`2. Click `Get started`.3. Click `Enable Macie`.Setup a repository for sensitive data discovery results1. In the Left pane, under Settings, click `Discovery results`.2. Make sure `Create bucket` is selected.3. Create a bucket, enter a name for the bucket. The name must be unique across all S3 buckets. In addition, the name must start with a lowercase letter or a number.4. Click on `Advanced`.5. Block all public access, make sure `Yes` is selected.6. KMS encryption, specify the AWS KMS key that you want to use to encrypt the results. The key must be a symmetric, customer master key (CMK) that's in the same Region as the S3 bucket.7. Click on `Save`Create a job to discover sensitive data1. In the left pane, click `S3 buckets`. Macie displays a list of all the S3 buckets for your account.2. Select the `check box` for each bucket that you want Macie to analyze as part of the job3. Click `Create job`.3. Click `Quick create`.4. For the Name and description step, enter a name and, optionally, a description of the job.5. Then click `Next`.6. For the Review and create step, click `Submit`.Review your findings1. In the left pane, click `Findings`.2. To view the details of a specific finding, choose any field other than the check box for the finding.If you are using a 3rd Party tool to manage and protect your s3 data, follow the Vendor documentation for implementing and configuring that tool.","AdditionalInformation": ""}],"description": "Ensure all data in Amazon S3 has been discovered, classified and secured when required.","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.1.5": {"name": "2.1.5","checks": {"s3_bucket_level_public_access_block": "PASS","s3_account_level_public_access_blocks": null},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.1. Simple Storage Service (S3)","References": "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/block-public-access-account.html","Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.","DefaultValue": null,"AuditProcedure": "**If utilizing Block Public Access (bucket settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Ensure that block public access settings are set appropriately for this bucket 5. Repeat for all the buckets in your AWS account.**From Command Line:**1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Find the public access setting on that bucket ``` aws s3api get-public-access-block --bucket  ``` Output if Block Public access is enabled:``` {\"PublicAccessBlockConfiguration\": {\"BlockPublicAcls\": true,\"IgnorePublicAcls\": true,\"BlockPublicPolicy\": true,\"RestrictPublicBuckets\": true} } ```If the output reads `false` for the separate configuration settings then proceed to the remediation.**If utilizing Block Public Access (account settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Choose `Block public access (account settings)` 3. Ensure that block public access settings are set appropriately for your AWS account.**From Command Line:**To check Public access settings for this account status, run the following command, `aws s3control get-public-access-block --account-id  --region `Output if Block Public access is enabled:``` {\"PublicAccessBlockConfiguration\": {\"IgnorePublicAcls\": true, \"BlockPublicPolicy\": true, \"BlockPublicAcls\": true, \"RestrictPublicBuckets\": true} } ```If the output reads `false` for the separate configuration settings then proceed to the remediation.","ImpactStatement": "When you apply Block Public Access settings to an account, the settings apply to all AWS Regions globally. The settings might not take effect in all Regions immediately or simultaneously, but they eventually propagate to all Regions.","AssessmentStatus": "Automated","RationaleStatement": "Amazon S3 `Block public access (bucket settings)` prevents the accidental or malicious public exposure of data contained within the respective bucket(s). Amazon S3 `Block public access (account settings)` prevents the accidental or malicious public exposure of data contained within all buckets of the respective AWS account.Whether blocking public access to all or some buckets is an organizational decision that should be based on data sensitivity, least privilege, and use case.","RemediationProcedure": "**If utilizing Block Public Access (bucket settings)****From Console:**1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Select the Check box next to the Bucket. 3. Click on 'Edit public access settings'. 4. Click 'Block all public access' 5. Repeat for all the buckets in your AWS account that contain sensitive data.**From Command Line:**1. List all of the S3 Buckets ``` aws s3 ls ``` 2. Set the Block Public Access to true on that bucket ``` aws s3api put-public-access-block --bucket  --public-access-block-configuration \"BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true\" ```**If utilizing Block Public Access (account settings)****From Console:**If the output reads `true` for the separate configuration settings then it is set on the account.1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/2. Choose `Block Public Access (account settings)` 3. Choose `Edit` to change the block public access settings for all the buckets in your AWS account 4. Choose the settings you want to change, and then choose `Save`. For details about each setting, pause on the `i` icons. 5. When you're asked for confirmation, enter `confirm`. Then Click `Confirm` to save your changes.**From Command Line:**To set Block Public access settings for this account, run the following command: ``` aws s3control put-public-access-block --public-access-block-configuration BlockPublicAcls=true, IgnorePublicAcls=true, BlockPublicPolicy=true, RestrictPublicBuckets=true --account-id  ```","AdditionalInformation": ""}],"description": "Ensure that S3 Buckets are configured with 'Block public access (bucket settings)'","checks_status": {"fail": 0,"pass": 1,"total": 2,"manual": 0}},"2.2.1": {"name": "2.2.1","checks": {"ec2_ebs_volume_encryption": "PASS"},"status": "PASS","attributes": [{"Profile": "Level 1","Section": "2.2. Elastic Compute Cloud (EC2)","References": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html:https://aws.amazon.com/blogs/aws/new-opt-in-to-default-encryption-for-new-ebs-volumes/","Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/2. Under `Account attributes`, click `EBS encryption`. 3. Verify `Always encrypt new EBS volumes` displays `Enabled`. 4. Review every region in-use.**Note:** EBS volume encryption is configured per region.**From Command Line:**1. Run``` aws --region  ec2 get-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Review every region in-use.**Note:** EBS volume encryption is configured per region.","ImpactStatement": "Losing access or removing the KMS key in use by the EBS volumes will result in no longer being able to access the volumes.","AssessmentStatus": "Automated","RationaleStatement": "Encrypting data at rest reduces the likelihood that it is unintentionally exposed and can nullify the impact of disclosure if the encryption remains unbroken.","RemediationProcedure": "**From Console:**1. Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/2. Under `Account attributes`, click `EBS encryption`. 3. Click `Manage`. 4. Click the `Enable` checkbox. 5. Click `Update EBS encryption` 6. Repeat for every region requiring the change.**Note:** EBS volume encryption is configured per region.**From Command Line:**1. Run``` aws --region  ec2 enable-ebs-encryption-by-default ``` 2. Verify that `\"EbsEncryptionByDefault\": true` is displayed. 3. Repeat every region requiring the change.**Note:** EBS volume encryption is configured per region.","AdditionalInformation": "Default EBS volume encryption only applies to newly created EBS volumes. Existing EBS volumes are **not** converted automatically."}],"description": "Ensure EBS Volume Encryption is Enabled in all Regions","checks_status": {"fail": 0,"pass": 1,"total": 1,"manual": 0}},"2.3.1": {"name": "2.3.1","checks": {"rds_instance_storage_encrypted": "FAIL"},"status": "FAIL","attributes": [{"Profile": "Level 1","Section": "2.3. Relational Database Service (RDS)","References": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html:https://aws.amazon.com/blogs/database/selecting-the-right-encryption-options-for-amazon-rds-and-amazon-aurora-database-engines/#:~:text=With%20RDS%2Dencrypted%20resources%2C%20data,transparent%20to%20your%20database%20engine.:https://aws.amazon.com/rds/features/security/","Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.","DefaultValue": null,"AuditProcedure": "**From Console:**1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/ 2. In the navigation pane, under RDS dashboard, click `Databases`. 3. Select the RDS Instance that you want to examine 4. Click `Instance Name` to see details, then click on `Configuration` tab. 5. Under Configuration Details section, In Storage pane search for the `Encryption Enabled` Status. 6. If the current status is set to `Disabled`, Encryption is not enabled for the selected RDS Instance database instance. 7. Repeat steps 3 to 7 to verify encryption status of other RDS Instance in same region. 8. Change region from the top of the navigation bar and repeat audit for other regions.**From Command Line:**1. Run `describe-db-instances` command to list all RDS Instance database names, available in the selected AWS region, Output will return each Instance database identifier-name.``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run again `describe-db-instances` command using the RDS Instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True` Or `False`. ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].StorageEncrypted' ``` 3. If the StorageEncrypted parameter value is `False`, Encryption is not enabled for the selected RDS database instance. 4. Repeat steps 1 to 3 for auditing each RDS Instance and change Region to verify for other regions","ImpactStatement": "","AssessmentStatus": "Automated","RationaleStatement": "Databases are likely to hold sensitive and critical data, it is highly recommended to implement encryption in order to protect your data from unauthorized access or disclosure. With RDS encryption enabled, the data stored on the instance's underlying storage, the automated backups, read replicas, and snapshots, are all encrypted.","RemediationProcedure": "**From Console:**1. Login to the AWS Management Console and open the RDS dashboard at https://console.aws.amazon.com/rds/. 2. In the left navigation panel, click on `Databases` 3. Select the Database instance that needs to be encrypted. 4. Click on `Actions` button placed at the top right and select `Take Snapshot`. 5. On the Take Snapshot page, enter a database name of which you want to take a snapshot in the `Snapshot Name` field and click on `Take Snapshot`. 6. Select the newly created snapshot and click on the `Action` button placed at the top right and select `Copy snapshot` from the Action menu. 7. On the Make Copy of DB Snapshot page, perform the following:- In the New DB Snapshot Identifier field, Enter a name for the `new snapshot`. - Check `Copy Tags`, New snapshot must have the same tags as the source snapshot. - Select `Yes` from the `Enable Encryption` dropdown list to enable encryption, You can choose to use the AWS default encryption key or custom key from Master Key dropdown list.8. Click `Copy Snapshot` to create an encrypted copy of the selected instance snapshot. 9. Select the new Snapshot Encrypted Copy and click on the `Action` button placed at the top right and select `Restore Snapshot` button from the Action menu, This will restore the encrypted snapshot to a new database instance. 10. On the Restore DB Instance page, enter a unique name for the new database instance in the DB Instance Identifier field. 11. Review the instance configuration details and click `Restore DB Instance`. 12. As the new instance provisioning process is completed can update application configuration to refer to the endpoint of the new Encrypted database instance Once the database endpoint is changed at the application level, can remove the unencrypted instance.**From Command Line:**1. Run `describe-db-instances` command to list all RDS database names available in the selected AWS region, The command output should return the database instance identifier. ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 2. Run `create-db-snapshot` command to create a snapshot for the selected database instance, The command output will return the `new snapshot` with name DB Snapshot Name. ``` aws rds create-db-snapshot --region  --db-snapshot-identifier  --db-instance-identifier  ``` 3. Now run `list-aliases` command to list the KMS keys aliases available in a specified region, The command output should return each `key alias currently available`. For our RDS encryption activation process, locate the ID of the AWS default KMS key. ``` aws kms list-aliases --region  ``` 4. Run `copy-db-snapshot` command using the default KMS key ID for RDS instances returned earlier to create an encrypted copy of the database instance snapshot, The command output will return the `encrypted instance snapshot configuration`. ``` aws rds copy-db-snapshot --region  --source-db-snapshot-identifier  --target-db-snapshot-identifier  --copy-tags --kms-key-id  ``` 5. Run `restore-db-instance-from-db-snapshot` command to restore the encrypted snapshot created at the previous step to a new database instance, If successful, the command output should return the new encrypted database instance configuration. ``` aws rds restore-db-instance-from-db-snapshot --region  --db-instance-identifier  --db-snapshot-identifier  ``` 6. Run `describe-db-instances` command to list all RDS database names, available in the selected AWS region, Output will return database instance identifier name Select encrypted database name that we just created DB-Name-Encrypted. ``` aws rds describe-db-instances --region  --query 'DBInstances[*].DBInstanceIdentifier' ``` 7. Run again `describe-db-instances` command using the RDS instance identifier returned earlier, to determine if the selected database instance is encrypted, The command output should return the encryption status `True`. ``` aws rds describe-db-instances --region  --db-instance-identifier  --query 'DBInstances[*].StorageEncrypted' ```","AdditionalInformation": ""}],"description": "Ensure that encryption is enabled for RDS Instances","checks_status": {"fail": 1,"pass": 0,"total": 1,"manual": 0}}},"requirements_passed": 48,"requirements_failed": 10,"requirements_manual": 0,"total_requirements": 58,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}},{"model": "api.complianceoverview","pk": "fb07e872-c61f-4749-96d2-da2b68993ae5","fields": {"tenant": "12646005-9067-4d2a-a098-8bb378604362","inserted_at": "2024-11-15T13:14:10.043Z","compliance_id": "gxp_21_cfr_part_11_aws","framework": "GxP-21-CFR-Part-11","version": "","description": "GxP refers to the regulations and guidelines that are applicable to life sciences organizations that make food and medical products. Medical products that fall under this include medicines, medical devices, and medical software applications. The overall intent of GxP requirements is to ensure that food and medical products are safe for consumers. It's also to ensure the integrity of data that's used to make product-related safety decisions.","region": "eu-west-1","requirements": {"11.30": {"name": "11.30 Controls for open systems","checks": {"elb_ssl_listeners": "FAIL","kms_cmk_rotation_enabled": null,"ec2_ebs_volume_encryption": "PASS","ec2_ebs_default_encryption": "PASS","elbv2_insecure_ssl_ciphers": "PASS","s3_bucket_default_encryption": "PASS","efs_encryption_at_rest_enabled": "FAIL","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL","sns_topics_kms_encryption_at_rest_enabled": "FAIL","dynamodb_tables_kms_cmk_encryption_enabled": null,"cloudwatch_log_group_kms_encryption_enabled": "FAIL","apigateway_restapi_client_certificate_enabled": "FAIL","sagemaker_notebook_instance_encryption_enabled": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"opensearch_service_domains_node_to_node_encryption_enabled": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "11.30","Section": "11.30 Controls for open systems","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Persons who use open systems to create, modify, maintain, or transmit electronic records shall employ procedures and controls designed to ensure the authenticity, integrity, and, as appropriate, the confidentiality of electronic records from the point of their creation to the point of their receipt. Such procedures and controls shall include those identified in 11.10, as appropriate, and additional measures such as document encryption and use of appropriate digital signature standards to ensure, as necessary under the circumstances, record authenticity, integrity, and confidentiality.","checks_status": {"fail": 9,"pass": 4,"total": 21,"manual": 0}},"11.200": {"name": "11.200 Electronic signature components and controls","checks": {"iam_root_mfa_enabled": null,"iam_no_root_access_key": null,"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"iam_user_mfa_enabled_console_access": null,"iam_password_policy_minimum_length_14": null},"status": "PASS","attributes": [{"Type": null,"ItemId": "11.200","Section": "11.200 Electronic signature components and controls","Service": "aws","SubGroup": null,"SubSection": null}],"description": "(a) Electronic signatures that are not based upon biometrics shall: (1) Employ at least two distinct identification components such as an identification code and password. (i) When an individual executes a series of signings during a single, continuous period of controlled system access, the first signing shall be executed using all electronic signature components; subsequent signings shall be executed using at least one electronic signature component that is only executable by, and designed to be used only by, the individual. (ii) When an individual executes one or more signings not performed during a single, continuous period of controlled system access, each signing shall be executed using all of the electronic signature components. (2) Be used only by their genuine owners; and (3) Be administered and executed to ensure that attempted use of an individual's electronic signature by anyone other than its genuine owner requires collaboration of two or more individuals.","checks_status": {"fail": 0,"pass": 0,"total": 12,"manual": 0}},"11.10-a": {"name": "11.10(a)","checks": {"rds_instance_multi_az": "FAIL","elbv2_deletion_protection": "FAIL","ec2_instance_managed_by_ssm": "FAIL","rds_instance_backup_enabled": "PASS","s3_bucket_object_versioning": "FAIL","dynamodb_tables_pitr_enabled": null,"ssm_managed_compliant_patching": "FAIL","rds_instance_deletion_protection": "FAIL","redshift_cluster_automated_snapshot": null,"ec2_instance_older_than_specific_days": "FAIL","cloudtrail_log_file_validation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "11.10-a","Section": "11.10 Controls for closed systems","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Persons who use closed systems to create, modify, maintain, or transmit electronic records shall employ procedures and controls designed to ensure the authenticity, integrity, and, when appropriate, the confidentiality of electronic records, and to ensure that the signer cannot readily repudiate the signed record as not genuine. Such procedures and controls shall include the following: (a) Validation of systems to ensure accuracy, reliability, consistent intended performance, and the ability to discern invalid or altered records.","checks_status": {"fail": 8,"pass": 1,"total": 13,"manual": 0}},"11.10-c": {"name": "11.10(c)","checks": {"s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","s3_bucket_object_versioning": "FAIL","s3_bucket_default_encryption": "PASS","rds_instance_storage_encrypted": "FAIL","redshift_cluster_audit_logging": null,"redshift_cluster_public_access": null,"cloudtrail_kms_encryption_enabled": "FAIL","s3_bucket_secure_transport_policy": "FAIL","s3_bucket_policy_public_write_access": "PASS","sagemaker_notebook_instance_encryption_enabled": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "11.10-c","Section": "11.10 Controls for closed systems","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Persons who use closed systems to create, modify, maintain, or transmit electronic records shall employ procedures and controls designed to ensure the authenticity, integrity, and, when appropriate, the confidentiality of electronic records, and to ensure that the signer cannot readily repudiate the signed record as not genuine. Such procedures and controls shall include the following: (c) Protection of records to enable their accurate and ready retrieval throughout the records retention period.","checks_status": {"fail": 5,"pass": 3,"total": 14,"manual": 0}},"11.10-d": {"name": "11.10(d)","checks": {"iam_root_mfa_enabled": null,"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"iam_user_accesskey_unused": null,"iam_password_policy_number": null,"iam_password_policy_symbol": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"ec2_instance_profile_attached": "PASS","iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"rds_instance_no_public_access": "PASS","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"iam_user_mfa_enabled_console_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"iam_password_policy_minimum_length_14": null,"s3_account_level_public_access_blocks": null,"secretsmanager_automatic_rotation_enabled": "FAIL","awslambda_function_not_publicly_accessible": "PASS","ec2_securitygroup_default_restrict_traffic": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"iam_customer_attached_policy_no_administrative_privileges": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "11.10-d","Section": "11.10 Controls for closed systems","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Persons who use closed systems to create, modify, maintain, or transmit electronic records shall employ procedures and controls designed to ensure the authenticity, integrity, and, when appropriate, the confidentiality of electronic records, and to ensure that the signer cannot readily repudiate the signed record as not genuine. Such procedures and controls shall include the following: (d) Limiting system access to authorized individuals.","checks_status": {"fail": 4,"pass": 8,"total": 38,"manual": 0}},"11.10-e": {"name": "11.10(e)","checks": {"elb_logging_enabled": "FAIL","elbv2_logging_enabled": "FAIL","vpc_flow_logs_enabled": "FAIL","redshift_cluster_audit_logging": null,"cloudtrail_multi_region_enabled": "PASS","apigateway_restapi_logging_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","opensearch_service_domains_cloudwatch_logging_enabled": null,"cloudwatch_log_group_retention_policy_specific_days_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "11.10-d","Section": "11.10 Controls for closed systems","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Persons who use closed systems to create, modify, maintain, or transmit electronic records shall employ procedures and controls designed to ensure the authenticity, integrity, and, when appropriate, the confidentiality of electronic records, and to ensure that the signer cannot readily repudiate the signed record as not genuine. Such procedures and controls shall include the following: (e) Use of secure, computer-generated, time-stamped audit trails to independently record the date and time of operator entries and actions that create, modify, or delete electronic records. Record changes shall not obscure previously recorded information. Such audit trail documentation shall be retained for a period at least as long as that required for the subject electronic records and shall be available for agency review and copying.","checks_status": {"fail": 7,"pass": 2,"total": 14,"manual": 0}},"11.10-g": {"name": "11.10(g)","checks": {"iam_root_mfa_enabled": null,"ec2_instance_public_ip": "FAIL","iam_no_root_access_key": null,"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"ec2_ebs_volume_encryption": "PASS","iam_user_accesskey_unused": null,"ec2_ebs_default_encryption": "PASS","iam_password_policy_number": null,"iam_password_policy_symbol": null,"ec2_instance_imdsv2_enabled": "PASS","rds_snapshots_public_access": "PASS","awslambda_function_url_public": null,"ec2_instance_profile_attached": "PASS","iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_root_hardware_mfa_enabled": null,"iam_rotate_access_key_90_days": null,"rds_instance_no_public_access": "PASS","efs_encryption_at_rest_enabled": "FAIL","iam_user_console_access_unused": null,"redshift_cluster_public_access": null,"iam_user_mfa_enabled_console_access": null,"s3_bucket_policy_public_write_access": "PASS","ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"iam_password_policy_minimum_length_14": null,"s3_account_level_public_access_blocks": null,"secretsmanager_automatic_rotation_enabled": "FAIL","awslambda_function_not_publicly_accessible": "PASS","dynamodb_tables_kms_cmk_encryption_enabled": null,"ec2_securitygroup_default_restrict_traffic": "FAIL","iam_policy_attached_only_to_group_or_roles": null,"iam_inline_policy_no_administrative_privileges": null,"iam_aws_attached_policy_no_administrative_privileges": null,"opensearch_service_domains_encryption_at_rest_enabled": null,"iam_customer_attached_policy_no_administrative_privileges": null,"opensearch_service_domains_node_to_node_encryption_enabled": null,"ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22": "PASS","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "11.10-g","Section": "11.10 Controls for closed systems","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Persons who use closed systems to create, modify, maintain, or transmit electronic records shall employ procedures and controls designed to ensure the authenticity, integrity, and, when appropriate, the confidentiality of electronic records, and to ensure that the signer cannot readily repudiate the signed record as not genuine. Such procedures and controls shall include the following: (g) Use of authority checks to ensure that only authorized individuals can use the system, electronically sign a record, access the operation or computer system input or output device, alter a record, or perform the operation at hand.","checks_status": {"fail": 5,"pass": 10,"total": 44,"manual": 0}},"11.10-h": {"name": "11.10(h)","checks": {"ec2_instance_managed_by_ssm": "FAIL","ssm_managed_compliant_patching": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "11.10-h","Section": "11.10 Controls for closed systems","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Persons who use closed systems to create, modify, maintain, or transmit electronic records shall employ procedures and controls designed to ensure the authenticity, integrity, and, when appropriate, the confidentiality of electronic records, and to ensure that the signer cannot readily repudiate the signed record as not genuine. Such procedures and controls shall include the following: (h) Use of device (e.g., terminal) checks to determine, as appropriate, the validity of the source of data input or operational instruction.","checks_status": {"fail": 2,"pass": 0,"total": 3,"manual": 0}},"11.10-k": {"name": "11.10(k)","checks": {"ec2_ebs_public_snapshot": "PASS","s3_bucket_public_access": null,"rds_snapshots_public_access": "PASS","rds_instance_no_public_access": "PASS","redshift_cluster_public_access": null,"cloudtrail_multi_region_enabled": "PASS","s3_bucket_policy_public_write_access": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL","cloudtrail_s3_dataevents_read_enabled": null,"ec2_networkacl_allow_ingress_any_port": "FAIL","emr_cluster_master_nodes_no_public_ip": null,"cloudtrail_s3_dataevents_write_enabled": null,"s3_bucket_server_access_logging_enabled": "FAIL","rds_instance_integration_cloudwatch_logs": "FAIL","ec2_securitygroup_default_restrict_traffic": "FAIL","sagemaker_notebook_instance_without_direct_internet_access_configured": null},"status": "FAIL","attributes": [{"Type": null,"ItemId": "11.10-k","Section": "11.10 Controls for closed systems","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Persons who use closed systems to create, modify, maintain, or transmit electronic records shall employ procedures and controls designed to ensure the authenticity, integrity, and, when appropriate, the confidentiality of electronic records, and to ensure that the signer cannot readily repudiate the signed record as not genuine. Such procedures and controls shall include the following: (k) Use of appropriate controls over systems documentation including: (1) Adequate controls over the distribution of, access to, and use of documentation for system operation and maintenance. (2) Revision and change control procedures to maintain an audit trail that documents time-sequenced development and modification of systems documentation.","checks_status": {"fail": 5,"pass": 5,"total": 17,"manual": 0}},"11.300-b": {"name": "11.300(b)","checks": {"iam_user_accesskey_unused": null,"iam_password_policy_number": null,"iam_password_policy_symbol": null,"iam_password_policy_lowercase": null,"iam_password_policy_uppercase": null,"iam_rotate_access_key_90_days": null,"iam_user_console_access_unused": null,"iam_password_policy_minimum_length_14": null,"secretsmanager_automatic_rotation_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "11.300-b","Section": "11.300 Controls for identification codes/passwords","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Persons who use electronic signatures based upon use of identification codes in combination with passwords shall employ controls to ensure their security and integrity. Such controls shall include: (b) Ensuring that identification code and password issuances are periodically checked, recalled, or revised (e.g., to cover such events as password aging).","checks_status": {"fail": 1,"pass": 0,"total": 10,"manual": 0}},"11.300-d": {"name": "11.300(d)","checks": {"securityhub_enabled": "PASS","guardduty_is_enabled": "PASS","cloudtrail_multi_region_enabled": "PASS","cloudtrail_cloudwatch_logging_enabled": "FAIL"},"status": "FAIL","attributes": [{"Type": null,"ItemId": "11.300-d","Section": "11.300 Controls for identification codes/passwords","Service": "aws","SubGroup": null,"SubSection": null}],"description": "Persons who use electronic signatures based upon use of identification codes in combination with passwords shall employ controls to ensure their security and integrity. Such controls shall include: (d) Use of transaction safeguards to prevent unauthorized use of passwords and/or identification codes, and to detect and report in an immediate and urgent manner any attempts at their unauthorized use to the system security unit, and, as appropriate, to organizational management.","checks_status": {"fail": 1,"pass": 3,"total": 4,"manual": 0}}},"requirements_passed": 1,"requirements_failed": 10,"requirements_manual": 0,"total_requirements": 11,"scan": "0191e280-9d2f-71c8-9b18-487a23ba185e"}}]
diff --git a/api/src/backend/api/middleware.py b/api/src/backend/api/middleware.py
new file mode 100644
index 0000000000..743eff5eb7
--- /dev/null
+++ b/api/src/backend/api/middleware.py
@@ -0,0 +1,49 @@
+import logging
+import time
+
+from config.custom_logging import BackendLogger
+
+
+def extract_auth_info(request) -> dict:
+    if getattr(request, "auth", None) is not None:
+        tenant_id = request.auth.get("tenant_id", "N/A")
+        user_id = request.auth.get("sub", "N/A")
+    else:
+        tenant_id, user_id = "N/A", "N/A"
+    return {"tenant_id": tenant_id, "user_id": user_id}
+
+
+class APILoggingMiddleware:
+    """
+    Middleware for logging API requests.
+
+    This middleware logs details of API requests, including the typical request metadata among other useful information.
+
+    Args:
+        get_response (Callable): A callable to get the response, typically the next middleware or view.
+    """
+
+    def __init__(self, get_response):
+        self.get_response = get_response
+        self.logger = logging.getLogger(BackendLogger.API)
+
+    def __call__(self, request):
+        request_start_time = time.time()
+
+        response = self.get_response(request)
+        duration = time.time() - request_start_time
+        auth_info = extract_auth_info(request)
+        self.logger.info(
+            "",
+            extra={
+                "user_id": auth_info["user_id"],
+                "tenant_id": auth_info["tenant_id"],
+                "method": request.method,
+                "path": request.path,
+                "query_params": request.GET.dict(),
+                "status_code": response.status_code,
+                "duration": duration,
+            },
+        )
+
+        return response
diff --git a/api/src/backend/api/migrations/0001_initial.py b/api/src/backend/api/migrations/0001_initial.py
new file mode 100644
index 0000000000..d2a8a39d3b
--- /dev/null
+++ b/api/src/backend/api/migrations/0001_initial.py
@@ -0,0 +1,1485 @@
+import uuid
+from functools import partial
+
+import django.contrib.auth.models
+import django.contrib.postgres.indexes
+import django.contrib.postgres.search
+import django.core.validators
+import django.db.models.deletion
+import django.utils.timezone
+from django.conf import settings
+from django.db import migrations, models
+from psqlextra.backend.migrations.operations.add_default_partition import (
+    PostgresAddDefaultPartition,
+)
+from psqlextra.backend.migrations.operations.create_partitioned_model import (
+    PostgresCreatePartitionedModel,
+)
+from psqlextra.manager.manager import PostgresManager
+from psqlextra.models.partitioned import PostgresPartitionedModel
+from psqlextra.types import PostgresPartitioningMethod
+from uuid6 import uuid7
+
+import api.rls
+from api.db_utils import (
+    PostgresEnumMigration,
+    MemberRoleEnumField,
+    MemberRoleEnum,
+    ProviderEnum,
+    ProviderEnumField,
+    ProviderSecretTypeEnum,
+    ProviderSecretTypeEnumField,
+    ScanTriggerEnum,
+    StateEnumField,
+    StateEnum,
+    ScanTriggerEnumField,
+    InvitationStateEnum,
+    InvitationStateEnumField,
+    register_enum,
+    DB_PROWLER_USER,
+    DB_PROWLER_PASSWORD,
+    TASK_RUNNER_DB_TABLE,
+    POSTGRES_TENANT_VAR,
+    POSTGRES_USER_VAR,
+)
+from api.models import (
+    Provider,
+    Scan,
+    StateChoices,
+    Finding,
+    StatusChoices,
+    SeverityChoices,
+    Membership,
+    ProviderSecret,
+    Invitation,
+)
+
+DB_NAME = settings.DATABASES["default"]["NAME"]
+
+MemberRoleEnumMigration = PostgresEnumMigration(
+    enum_name="member_role",
+    enum_values=tuple(role[0] for role in Membership.RoleChoices.choices),
+)
+
+ProviderEnumMigration = PostgresEnumMigration(
+    enum_name="provider",
+    enum_values=tuple(provider[0] for provider in Provider.ProviderChoices.choices),
+)
+
+ScanTriggerEnumMigration = PostgresEnumMigration(
+    enum_name="scan_trigger",
+    enum_values=tuple(scan_trigger[0] for scan_trigger in Scan.TriggerChoices.choices),
+)
+
+StateEnumMigration = PostgresEnumMigration(
+    enum_name="state",
+    enum_values=tuple(state[0] for state in StateChoices.choices),
+)
+
+FindingDeltaEnumMigration = PostgresEnumMigration(
+    enum_name="finding_delta",
+    enum_values=tuple(
+        finding_delta[0] for finding_delta in Finding.DeltaChoices.choices
+    ),
+)
+
+StatusEnumMigration = PostgresEnumMigration(
+    enum_name="status",
+    enum_values=tuple(status[0] for status in StatusChoices.choices),
+)
+
+SeverityEnumMigration = PostgresEnumMigration(
+    enum_name="severity",
+    enum_values=tuple(severity[0] for severity in SeverityChoices),
+)
+
+ProviderSecretTypeEnumMigration = PostgresEnumMigration(
+    enum_name="provider_secret_type",
+    enum_values=tuple(
+        secret_type[0] for secret_type in ProviderSecret.TypeChoices.choices
+    ),
+)
+
+InvitationStateEnumMigration = PostgresEnumMigration(
+    enum_name="invitation_state",
+    enum_values=tuple(state[0] for state in Invitation.State.choices),
+)
+
+
+class Migration(migrations.Migration):
+    initial = True
+    # Required for our kind of `RunPython` operations
+    atomic = False
+
+    dependencies = [
+        ("django_celery_results", "0011_taskresult_periodic_task_name"),
+        ("auth", "0012_alter_user_first_name_max_length"),
+    ]
+
+    operations = [
+        migrations.RunSQL(
+            f"""
+            DO $$
+            BEGIN
+                IF NOT EXISTS (
+                    SELECT
+                    FROM   pg_catalog.pg_roles
+                    WHERE  rolname = '{DB_PROWLER_USER}') THEN
+                    CREATE ROLE {DB_PROWLER_USER} LOGIN PASSWORD '{DB_PROWLER_PASSWORD}';
+                END IF;
+            END
+            $$;
+            """
+        ),
+        migrations.RunSQL(
+            # Required permissions for API user related tables
+            f"""
+            GRANT CONNECT ON DATABASE "{DB_NAME}" TO {DB_PROWLER_USER};
+            GRANT SELECT ON django_migrations TO {DB_PROWLER_USER};
+            """
+        ),
+        migrations.CreateModel(
+            name="User",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                (
+                    "name",
+                    models.CharField(
+                        max_length=150,
+                        validators=[django.core.validators.MinLengthValidator(3)],
+                    ),
+                ),
+                ("password", models.CharField(max_length=128, verbose_name="password")),
+                (
+                    "last_login",
+                    models.DateTimeField(
+                        blank=True, null=True, verbose_name="last login"
+                    ),
+                ),
+                (
+                    "email",
+                    models.EmailField(
+                        error_messages={
+                            "unique": "Please check the email address and try again."
+                        },
+                        help_text="Case insensitive",
+                        max_length=254,
+                        unique=True,
+                    ),
+                ),
+                ("company_name", models.CharField(max_length=150, blank=True)),
+                ("is_active", models.BooleanField(default=True)),
+                ("date_joined", models.DateTimeField(auto_now_add=True)),
+            ],
+            options={
+                "db_table": "users",
+            },
+        ),
+        migrations.AddConstraint(
+            model_name="user",
+            constraint=api.rls.BaseSecurityConstraint(
+                name="statements_on_user",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        # Create and register State type
+        migrations.RunPython(
+            StateEnumMigration.create_enum_type,
+            reverse_code=StateEnumMigration.drop_enum_type,
+        ),
+        migrations.RunPython(partial(register_enum, enum_class=StateEnum)),
+        migrations.CreateModel(
+            name="Tenant",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+                ("updated_at", models.DateTimeField(auto_now=True)),
+                ("name", models.CharField(max_length=100)),
+            ],
+            options={
+                "db_table": "tenants",
+            },
+        ),
+        migrations.RunSQL(
+            # Needed for now since we don't have users yet
+            f"""
+            GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE tenants TO {DB_PROWLER_USER};
+            """
+        ),
+        # Create and register MemberRoleEnum type
+        migrations.RunPython(
+            MemberRoleEnumMigration.create_enum_type,
+            reverse_code=MemberRoleEnumMigration.drop_enum_type,
+        ),
+        migrations.RunPython(partial(register_enum, enum_class=MemberRoleEnum)),
+        migrations.CreateModel(
+            name="Membership",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                (
+                    "role",
+                    MemberRoleEnumField(
+                        choices=[("owner", "Owner"), ("member", "Member")],
+                        default="member",
+                    ),
+                ),
+                (
+                    "date_joined",
+                    models.DateTimeField(auto_now_add=True, editable=False),
+                ),
+                (
+                    "tenant",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE,
+                        related_name="memberships",
+                        related_query_name="membership",
+                        to="api.tenant",
+                    ),
+                ),
+                (
+                    "user",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE,
+                        related_name="memberships",
+                        related_query_name="membership",
+                        to=settings.AUTH_USER_MODEL,
+                    ),
+                ),
+            ],
+            options={
+                "db_table": "memberships",
+            },
+        ),
+        migrations.AddConstraint(
+            model_name="membership",
+            constraint=api.rls.BaseSecurityConstraint(
+                name="statements_on_membership",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="membership",
+            constraint=models.UniqueConstraint(
+                fields=("user", "tenant"),
+                name="unique_resources_by_membership",
+            ),
+        ),
+        # Enable tenants RLS based on memberships
+        migrations.RunSQL(f"""
+        ALTER TABLE tenants ENABLE ROW LEVEL SECURITY;
+
+        -- Policy for SELECT
+        CREATE POLICY "{DB_PROWLER_USER}_tenants_select"
+        ON tenants
+        FOR SELECT
+        TO {DB_PROWLER_USER}
+        USING (
+            CASE
+                WHEN (current_setting('{POSTGRES_USER_VAR}', true) IS NOT NULL AND current_setting('{POSTGRES_USER_VAR}', true) <> '') THEN
+                    EXISTS (
+                        SELECT 1
+                        FROM memberships
+                        WHERE
+                            memberships.tenant_id = tenants.id
+                            AND memberships.user_id = current_setting('{POSTGRES_USER_VAR}', true)::uuid
+                    )
+                WHEN (current_setting('{POSTGRES_TENANT_VAR}', true) IS NOT NULL AND current_setting('{POSTGRES_TENANT_VAR}', true) <> '') THEN
+                    tenants.id = current_setting('{POSTGRES_TENANT_VAR}', true)::uuid
+                ELSE
+                    FALSE
+            END
+        );
+
+        -- Policy for UPDATE
+        CREATE POLICY "{DB_PROWLER_USER}_tenants_update"
+        ON tenants
+        FOR UPDATE
+        TO {DB_PROWLER_USER}
+        USING (
+            CASE
+                WHEN (current_setting('{POSTGRES_USER_VAR}', true) IS NOT NULL AND current_setting('{POSTGRES_USER_VAR}', true) <> '') THEN
+                    EXISTS (
+                        SELECT 1
+                        FROM memberships
+                        WHERE
+                            memberships.tenant_id = tenants.id
+                            AND memberships.user_id = current_setting('{POSTGRES_USER_VAR}', true)::uuid
+                    )
+                WHEN (current_setting('{POSTGRES_TENANT_VAR}', true) IS NOT NULL AND current_setting('{POSTGRES_TENANT_VAR}', true) <> '') THEN
+                    tenants.id = current_setting('{POSTGRES_TENANT_VAR}', true)::uuid
+                ELSE
+                    FALSE
+            END
+        );
+
+        -- Policy for DELETE
+        CREATE POLICY "{DB_PROWLER_USER}_tenants_delete"
+        ON tenants
+        FOR DELETE
+        TO {DB_PROWLER_USER}
+        USING (
+            CASE
+                WHEN (current_setting('{POSTGRES_USER_VAR}', true) IS NOT NULL AND current_setting('{POSTGRES_USER_VAR}', true) <> '') THEN
+                    EXISTS (
+                        SELECT 1
+                        FROM memberships
+                        WHERE
+                            memberships.tenant_id = tenants.id
+                            AND memberships.user_id = current_setting('{POSTGRES_USER_VAR}', true)::uuid
+                    )
+                WHEN (current_setting('{POSTGRES_TENANT_VAR}', true) IS NOT NULL AND current_setting('{POSTGRES_TENANT_VAR}', true) <> '') THEN
+                    tenants.id = current_setting('{POSTGRES_TENANT_VAR}', true)::uuid
+                ELSE
+                    FALSE
+            END
+        );
+
+        -- Policy for INSERT
+        CREATE POLICY "{DB_PROWLER_USER}_tenants_insert"
+        ON tenants
+        FOR INSERT
+        TO {DB_PROWLER_USER}
+        WITH CHECK (true);
+                """),
+        # Create and register ProviderEnum type
+        migrations.RunPython(
+            ProviderEnumMigration.create_enum_type,
+            reverse_code=ProviderEnumMigration.drop_enum_type,
+        ),
+        migrations.RunPython(partial(register_enum, enum_class=ProviderEnum)),
+        migrations.CreateModel(
+            name="Provider",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+                ("updated_at", models.DateTimeField(auto_now=True)),
+                (
+                    "provider",
+                    ProviderEnumField(
+                        choices=[
+                            ("aws", "AWS"),
+                            ("azure", "Azure"),
+                            ("gcp", "GCP"),
+                            ("kubernetes", "Kubernetes"),
+                        ],
+                        default="aws",
+                    ),
+                ),
+                (
+                    "uid",
+                    models.CharField(
+                        max_length=63,
+                        validators=[django.core.validators.MinLengthValidator(3)],
+                        verbose_name="Unique identifier for the provider, set by the provider",
+                    ),
+                ),
+                (
+                    "alias",
+                    models.CharField(
+                        blank=True,
+                        null=True,
+                        max_length=100,
+                        validators=[django.core.validators.MinLengthValidator(3)],
+                    ),
+                ),
+                ("connected", models.BooleanField(blank=True, null=True)),
+                (
+                    "connection_last_checked_at",
+                    models.DateTimeField(blank=True, null=True),
+                ),
+                ("metadata", models.JSONField(blank=True, default=dict)),
+                ("scanner_args", models.JSONField(blank=True, default=dict)),
+                (
+                    "tenant",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
+                    ),
+                ),
+            ],
+            options={
+                "abstract": False,
+                "db_table": "providers",
+            },
+        ),
+        migrations.AddConstraint(
+            model_name="provider",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_provider",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="provider",
+            constraint=models.UniqueConstraint(
+                fields=("tenant_id", "provider", "uid"),
+                name="unique_provider_uids",
+            ),
+        ),
+        migrations.CreateModel(
+            name="ProviderGroup",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                ("name", models.CharField(max_length=255)),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+                ("updated_at", models.DateTimeField(auto_now=True)),
+            ],
+            options={
+                "db_table": "provider_groups",
+            },
+        ),
+        migrations.CreateModel(
+            name="ProviderGroupMembership",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+            ],
+            options={
+                "db_table": "provider_group_memberships",
+            },
+        ),
+        migrations.AddField(
+            model_name="providergroup",
+            name="tenant",
+            field=models.ForeignKey(
+                on_delete=django.db.models.deletion.CASCADE,
+                to="api.tenant",
+            ),
+        ),
+        migrations.AddField(
+            model_name="providergroup",
+            name="providers",
+            field=models.ManyToManyField(
+                related_name="provider_groups",
+                through="api.ProviderGroupMembership",
+                to="api.provider",
+            ),
+        ),
+        migrations.AddField(
+            model_name="providergroupmembership",
+            name="tenant",
+            field=models.ForeignKey(
+                on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
+            ),
+        ),
+        migrations.AddField(
+            model_name="providergroupmembership",
+            name="provider",
+            field=models.ForeignKey(
+                on_delete=django.db.models.deletion.CASCADE, to="api.provider"
+            ),
+        ),
+        migrations.AddField(
+            model_name="providergroupmembership",
+            name="provider_group",
+            field=models.ForeignKey(
+                on_delete=django.db.models.deletion.CASCADE, to="api.providergroup"
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="providergroup",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_providergroup",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="providergroup",
+            constraint=models.UniqueConstraint(
+                fields=("tenant_id", "name"), name="unique_group_name_per_tenant"
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="providergroupmembership",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_providergroupmembership",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="providergroupmembership",
+            constraint=models.UniqueConstraint(
+                fields=("provider_id", "provider_group"),
+                name="unique_provider_group_membership",
+            ),
+        ),
+        migrations.CreateModel(
+            name="Task",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+                (
+                    "task_runner_task",
+                    models.OneToOneField(
+                        blank=True,
+                        null=True,
+                        on_delete=django.db.models.deletion.CASCADE,
+                        related_name="task",
+                        related_query_name="task",
+                        to="django_celery_results.taskresult",
+                    ),
+                ),
+                (
+                    "tenant",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
+                    ),
+                ),
+            ],
+            options={
+                "db_table": "tasks",
+                "abstract": False,
+            },
+        ),
+        migrations.AddConstraint(
+            model_name="task",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_task",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.AddIndex(
+            model_name="task",
+            index=models.Index(
+                fields=["id", "task_runner_task"],
+                name="tasks_id_trt_id_idx",
+            ),
+        ),
+        migrations.RunSQL(
+            f"""
+        ALTER TABLE {TASK_RUNNER_DB_TABLE} ENABLE ROW LEVEL SECURITY;
+        CREATE POLICY "{DB_PROWLER_USER}_{TASK_RUNNER_DB_TABLE}_select"
+        ON {TASK_RUNNER_DB_TABLE}
+        FOR SELECT
+        TO {DB_PROWLER_USER}
+        USING (
+            task_id::uuid in (SELECT id FROM tasks WHERE tenant_id = (NULLIF(current_setting('{POSTGRES_TENANT_VAR}', true), ''))::uuid)
+        );
+        GRANT SELECT ON TABLE {TASK_RUNNER_DB_TABLE} TO {DB_PROWLER_USER};
+        """
+        ),
+        # Create and register ScanTriggerEnum type
+        migrations.RunPython(
+            ScanTriggerEnumMigration.create_enum_type,
+            reverse_code=ScanTriggerEnumMigration.drop_enum_type,
+        ),
+        migrations.RunPython(partial(register_enum, enum_class=ScanTriggerEnum)),
+        migrations.CreateModel(
+            name="Scan",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid7,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                (
+                    "name",
+                    models.CharField(
+                        blank=True,
+                        max_length=100,
+                        null=True,
+                        validators=[django.core.validators.MinLengthValidator(3)],
+                    ),
+                ),
+                (
+                    "trigger",
+                    ScanTriggerEnumField(
+                        choices=[("scheduled", "Scheduled"), ("manual", "Manual")]
+                    ),
+                ),
+                (
+                    "state",
+                    StateEnumField(
+                        choices=[
+                            ("available", "Available"),
+                            ("scheduled", "Scheduled"),
+                            ("executing", "Executing"),
+                            ("completed", "Completed"),
+                            ("failed", "Failed"),
+                            ("cancelled", "Cancelled"),
+                        ],
+                        default="available",
+                    ),
+                ),
+                ("unique_resource_count", models.IntegerField(default=0)),
+                ("progress", models.IntegerField(default=0)),
+                ("scanner_args", models.JSONField(default=dict)),
+                ("duration", models.IntegerField(blank=True, null=True)),
+                (
+                    "scheduled_at",
+                    models.DateTimeField(null=True, blank=True),
+                ),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+                ("updated_at", models.DateTimeField(auto_now=True)),
+                ("started_at", models.DateTimeField(null=True, blank=True)),
+                ("completed_at", models.DateTimeField(null=True, blank=True)),
+                (
+                    "provider",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE,
+                        related_name="scans",
+                        related_query_name="scan",
+                        to="api.provider",
+                    ),
+                ),
+                (
+                    "task",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE,
+                        related_name="scans",
+                        related_query_name="scan",
+                        to="api.task",
+                        null=True,
+                        blank=True,
+                    ),
+                ),
+                (
+                    "tenant",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
+                    ),
+                ),
+            ],
+            options={
+                "db_table": "scans",
+                "abstract": False,
+            },
+        ),
+        migrations.AddConstraint(
+            model_name="scan",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_scan",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.AddIndex(
+            model_name="scan",
+            index=models.Index(
+                fields=["provider", "state", "trigger", "scheduled_at"],
+                name="scans_prov_state_trig_sche_idx",
+            ),
+        ),
+        # Resources
+        migrations.RunSQL(
+            sql="""
+          CREATE EXTENSION IF NOT EXISTS pg_trgm;
+          """,
+            reverse_sql="""
+          DROP EXTENSION IF EXISTS pg_trgm;
+          """,
+        ),
+        migrations.CreateModel(
+            name="Resource",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+                ("updated_at", models.DateTimeField(auto_now=True)),
+                (
+                    "uid",
+                    models.TextField(
+                        verbose_name="Unique identifier for the resource, set by the provider"
+                    ),
+                ),
+                (
+                    "name",
+                    models.TextField(
+                        verbose_name="Name of the resource, as set in the provider"
+                    ),
+                ),
+                (
+                    "region",
+                    models.TextField(
+                        verbose_name="Location of the resource, as set by the provider"
+                    ),
+                ),
+                (
+                    "service",
+                    models.TextField(
+                        verbose_name="Service of the resource, as set by the provider"
+                    ),
+                ),
+                (
+                    "type",
+                    models.TextField(
+                        verbose_name="Type of the resource, as set by the provider"
+                    ),
+                ),
+                (
+                    "text_search",
+                    models.GeneratedField(
+                        db_persist=True,
+                        expression=django.contrib.postgres.search.CombinedSearchVector(
+                            django.contrib.postgres.search.CombinedSearchVector(
+                                django.contrib.postgres.search.CombinedSearchVector(
+                                    django.contrib.postgres.search.SearchVector(
+                                        "uid", config="simple", weight="A"
+                                    ),
+                                    "||",
+                                    django.contrib.postgres.search.SearchVector(
+                                        "name", config="simple", weight="B"
+                                    ),
+                                    django.contrib.postgres.search.SearchConfig(
+                                        "simple"
+                                    ),
+                                ),
+                                "||",
+                                django.contrib.postgres.search.SearchVector(
+                                    "region", config="simple", weight="C"
+                                ),
+                                django.contrib.postgres.search.SearchConfig("simple"),
+                            ),
+                            "||",
+                            django.contrib.postgres.search.SearchVector(
+                                "service", "type", config="simple", weight="D"
+                            ),
+                            django.contrib.postgres.search.SearchConfig("simple"),
+                        ),
+                        null=True,
+                        output_field=django.contrib.postgres.search.SearchVectorField(),
+                    ),
+                ),
+                (
+                    "provider",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE,
+                        related_name="resources",
+                        related_query_name="resource",
+                        to="api.provider",
+                    ),
+                ),
+                (
+                    "tenant",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
+                    ),
+                ),
+            ],
+            options={
+                "db_table": "resources",
+                "abstract": False,
+            },
+        ),
+        migrations.AddIndex(
+            model_name="resource",
+            index=models.Index(
+                fields=["uid", "region", "service", "name"],
+                name="resource_uid_reg_serv_name_idx",
+            ),
+        ),
+        migrations.CreateModel(
+            name="ResourceTag",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+                ("updated_at", models.DateTimeField(auto_now=True)),
+                ("key", models.TextField()),
+                ("value", models.TextField()),
+                (
+                    "text_search",
+                    models.GeneratedField(
+                        db_persist=True,
+                        expression=django.contrib.postgres.search.CombinedSearchVector(
+                            django.contrib.postgres.search.SearchVector(
+                                "key", config="simple", weight="A"
+                            ),
+                            "||",
+                            django.contrib.postgres.search.SearchVector(
+                                "value", config="simple", weight="B"
+                            ),
+                            django.contrib.postgres.search.SearchConfig("simple"),
+                        ),
+                        null=True,
+                        output_field=django.contrib.postgres.search.SearchVectorField(),
+                    ),
+                ),
+                (
+                    "tenant",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
+                    ),
+                ),
+            ],
+            options={
+                "db_table": "resource_tags",
+                "abstract": False,
+            },
+        ),
+        migrations.CreateModel(
+            name="ResourceTagMapping",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                (
+                    "resource",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE,
+                        to="api.resource",
+                    ),
+                ),
+                (
+                    "tag",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE,
+                        to="api.resourcetag",
+                    ),
+                ),
+                (
+                    "tenant",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE,
+                        to="api.tenant",
+                    ),
+                ),
+            ],
+            options={
+                "db_table": "resource_tag_mappings",
+                "abstract": False,
+            },
+        ),
+        migrations.AddField(
+            model_name="resource",
+            name="tags",
+            field=models.ManyToManyField(
+                through="api.ResourceTagMapping",
+                to="api.resourcetag",
+                verbose_name="Tags associated with the resource, by provider",
+            ),
+        ),
+        migrations.AddIndex(
+            model_name="resourcetag",
+            index=django.contrib.postgres.indexes.GinIndex(
+                fields=["text_search"], name="gin_resource_tags_search_idx"
+            ),
+        ),
+        migrations.AddIndex(
+            model_name="resource",
+            index=django.contrib.postgres.indexes.GinIndex(
+                fields=["text_search"], name="gin_resources_search_idx"
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="resourcetag",
+            constraint=models.UniqueConstraint(
+                fields=("tenant_id", "key", "value"),
+                name="unique_resource_tags_by_tenant_key_value",
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="resourcetag",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_resourcetag",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="resourcetagmapping",
+            constraint=models.UniqueConstraint(
+                fields=("tenant_id", "resource_id", "tag_id"),
+                name="unique_resource_tag_mappings_by_tenant",
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="resourcetagmapping",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_resourcetagmapping",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="resource",
+            constraint=models.UniqueConstraint(
+                fields=("tenant_id", "provider_id", "uid"),
+                name="unique_resources_by_provider",
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="resource",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_resource",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        # Create and register ScanTypeEnum type
+        migrations.RunPython(
+            FindingDeltaEnumMigration.create_enum_type,
+            reverse_code=FindingDeltaEnumMigration.drop_enum_type,
+        ),
+        migrations.RunPython(
+            StatusEnumMigration.create_enum_type,
+            reverse_code=StatusEnumMigration.drop_enum_type,
+        ),
+        migrations.RunPython(
+            SeverityEnumMigration.create_enum_type,
+            reverse_code=SeverityEnumMigration.drop_enum_type,
+        ),
+        PostgresCreatePartitionedModel(
+            name="Finding",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid7,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+                ("updated_at", models.DateTimeField(auto_now=True)),
+                ("uid", models.CharField(max_length=300)),
+                (
+                    "delta",
+                    api.db_utils.FindingDeltaEnumField(
+                        choices=[("new", "New"), ("changed", "Changed")],
+                        blank=True,
+                        null=True,
+                    ),
+                ),
+                (
+                    "status",
+                    api.db_utils.StatusEnumField(
+                        choices=[
+                            ("FAIL", "Fail"),
+                            ("PASS", "Pass"),
+                            ("MANUAL", "Manual"),
+                            ("MUTED", "Muted"),
+                        ]
+                    ),
+                ),
+                ("status_extended", models.TextField(blank=True, null=True)),
+                (
+                    "severity",
+                    api.db_utils.SeverityEnumField(
+                        choices=[
+                            ("critical", "Critical"),
+                            ("high", "High"),
+                            ("medium", "Medium"),
+                            ("low", "Low"),
+                            ("informational", "Informational"),
+                        ]
+                    ),
+                ),
+                (
+                    "impact",
+                    api.db_utils.SeverityEnumField(
+                        choices=[
+                            ("critical", "Critical"),
+                            ("high", "High"),
+                            ("medium", "Medium"),
+                            ("low", "Low"),
+                            ("informational", "Informational"),
+                        ]
+                    ),
+                ),
+                ("impact_extended", models.TextField(blank=True, null=True)),
+                ("raw_result", models.JSONField(default=dict)),
+                ("check_id", models.CharField(max_length=100, null=False)),
+                ("check_metadata", models.JSONField(default=dict, null=False)),
+                ("tags", models.JSONField(default=dict, blank=True, null=True)),
+                (
+                    "scan",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE,
+                        related_name="findings",
+                        to="api.scan",
+                    ),
+                ),
+                (
+                    "tenant",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
+                    ),
+                ),
+            ],
+            options={
+                "db_table": "findings",
+                "base_manager_name": "objects",
+            },
+            partitioning_options={
+                "method": PostgresPartitioningMethod["RANGE"],
+                "key": ["id"],
+            },
+            bases=(PostgresPartitionedModel,),
+            managers=[
+                ("objects", PostgresManager()),
+            ],
+        ),
+        migrations.RunSQL(
+            sql="""
+              ALTER TABLE findings
+                ADD COLUMN text_search tsvector
+                GENERATED ALWAYS AS (
+                  setweight(to_tsvector('english', coalesce(impact_extended, '')), 'A') ||
+                  setweight(to_tsvector('english', coalesce(status_extended, '')), 'B') ||
+                  setweight(jsonb_to_tsvector('simple', check_metadata, '["string", "numeric"]'), 'D') ||
+                  setweight(jsonb_to_tsvector('simple', tags, '["string", "numeric"]'), 'D')
+                ) STORED;
+            """,
+            reverse_sql="""
+              ALTER TABLE findings
+                DROP COLUMN text_search;
+              """,
+            state_operations=[
+                migrations.AddField(
+                    model_name="finding",
+                    name="text_search",
+                    field=models.GeneratedField(
+                        db_persist=True,
+                        expression=django.contrib.postgres.search.SearchVector(
+                            "impact_extended",
+                            "status_extended",
+                            config="simple",
+                            weight="A",
+                        ),
+                        null=True,
+                        output_field=django.contrib.postgres.search.SearchVectorField(),
+                    ),
+                ),
+            ],
+        ),
+        migrations.AddIndex(
+            model_name="finding",
+            index=models.Index(
+                fields=["uid"],
+                name="findings_uid_idx",
+            ),
+        ),
+        migrations.AddIndex(
+            model_name="finding",
+            index=models.Index(
+                fields=["scan_id", "impact", "severity", "status", "check_id", "delta"],
+                name="findings_filter_idx",
+            ),
+        ),
+        migrations.AddIndex(
+            model_name="finding",
+            index=django.contrib.postgres.indexes.GinIndex(
+                fields=["text_search"], name="gin_findings_search_idx"
+            ),
+        ),
+        PostgresAddDefaultPartition(
+            model_name="Finding",
+            name="default",
+        ),
+        # NOTE: the RLS policy needs to be explicitly set on the partitions
+        migrations.AddConstraint(
+            model_name="finding",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_finding",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="finding",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_finding_default",
+                partition_name="default",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        PostgresCreatePartitionedModel(
+            name="ResourceFindingMapping",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                (
+                    "finding",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE, to="api.finding"
+                    ),
+                ),
+                (
+                    "resource",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE, to="api.resource"
+                    ),
+                ),
+                (
+                    "tenant",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
+                    ),
+                ),
+            ],
+            options={
+                "db_table": "resource_finding_mappings",
+                "abstract": False,
+                "base_manager_name": "objects",
+            },
+            partitioning_options={
+                "method": PostgresPartitioningMethod["RANGE"],
+                "key": ["finding_id"],
+            },
+            bases=(PostgresPartitionedModel,),
+            managers=[
+                ("objects", PostgresManager()),
+            ],
+        ),
+        migrations.AddField(
+            model_name="finding",
+            name="resources",
+            field=models.ManyToManyField(
+                related_name="findings",
+                through="api.ResourceFindingMapping",
+                to="api.resource",
+                verbose_name="Resources associated with the finding",
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="resourcefindingmapping",
+            constraint=models.UniqueConstraint(
+                fields=("tenant_id", "resource_id", "finding_id"),
+                name="unique_resource_finding_mappings_by_tenant",
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="resourcefindingmapping",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_resourcefindingmapping",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        PostgresAddDefaultPartition(
+            model_name="resourcefindingmapping",
+            name="default",
+        ),
+        migrations.AddConstraint(
+            model_name="resourcefindingmapping",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_resource_finding_mappings_default",
+                partition_name="default",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.AlterModelOptions(
+            name="finding",
+            options={},
+        ),
+        migrations.RunPython(
+            ProviderSecretTypeEnumMigration.create_enum_type,
+            reverse_code=ProviderSecretTypeEnumMigration.drop_enum_type,
+        ),
+        migrations.RunPython(partial(register_enum, enum_class=ProviderSecretTypeEnum)),
+        migrations.CreateModel(
+            name="ProviderSecret",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+                ("updated_at", models.DateTimeField(auto_now=True)),
+                (
+                    "name",
+                    models.CharField(
+                        blank=True,
+                        max_length=100,
+                        null=True,
+                        validators=[django.core.validators.MinLengthValidator(3)],
+                    ),
+                ),
+                (
+                    "secret_type",
+                    ProviderSecretTypeEnumField(
+                        choices=ProviderSecret.TypeChoices.choices
+                    ),
+                ),
+                ("_secret", models.BinaryField(db_column="secret")),
+                (
+                    "tenant",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
+                    ),
+                ),
+                (
+                    "provider",
+                    models.OneToOneField(
+                        on_delete=django.db.models.deletion.CASCADE,
+                        related_name="secret",
+                        related_query_name="secret",
+                        to="api.provider",
+                    ),
+                ),
+            ],
+            options={
+                "db_table": "provider_secrets",
+                "abstract": False,
+            },
+        ),
+        migrations.AddConstraint(
+            model_name="providersecret",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_providersecret",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.RunPython(
+            InvitationStateEnumMigration.create_enum_type,
+            reverse_code=InvitationStateEnumMigration.drop_enum_type,
+        ),
+        migrations.RunPython(partial(register_enum, enum_class=InvitationStateEnum)),
+        migrations.CreateModel(
+            name="Invitation",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+                ("updated_at", models.DateTimeField(auto_now=True)),
+                ("email", models.EmailField(max_length=254)),
+                (
+                    "state",
+                    InvitationStateEnumField(
+                        choices=[
+                            ("pending", "Invitation is pending"),
+                            ("accepted", "Invitation was accepted by a user"),
+                            ("expired", "Invitation expired after the configured time"),
+                            ("revoked", "Invitation was revoked by a user"),
+                        ],
+                        default="pending",
+                    ),
+                ),
+                (
+                    "token",
+                    models.CharField(
+                        unique=True,
+                        default=api.db_utils.generate_random_token,
+                        editable=False,
+                        max_length=14,
+                        validators=[django.core.validators.MinLengthValidator(14)],
+                    ),
+                ),
+                (
+                    "expires_at",
+                    models.DateTimeField(default=api.db_utils.one_week_from_now),
+                ),
+                (
+                    "inviter",
+                    models.ForeignKey(
+                        null=True,
+                        on_delete=django.db.models.deletion.SET_NULL,
+                        related_name="invitations",
+                        related_query_name="invitation",
+                        to=settings.AUTH_USER_MODEL,
+                    ),
+                ),
+                (
+                    "tenant",
+                    models.ForeignKey(
+                        on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
+                    ),
+                ),
+            ],
+            options={
+                "db_table": "invitations",
+                "abstract": False,
+            },
+        ),
+        migrations.AddConstraint(
+            model_name="invitation",
+            constraint=models.UniqueConstraint(
+                fields=("tenant", "token", "email"),
+                name="unique_tenant_token_email_by_invitation",
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="invitation",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_invitation",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        migrations.CreateModel(
+            name="ComplianceOverview",
+            fields=[
+                (
+                    "id",
+                    models.UUIDField(
+                        default=uuid.uuid4,
+                        editable=False,
+                        primary_key=True,
+                        serialize=False,
+                    ),
+                ),
+                ("inserted_at", models.DateTimeField(auto_now_add=True)),
+                ("compliance_id", models.CharField(max_length=100)),
+                ("framework", models.CharField(max_length=100)),
+                ("version", models.CharField(blank=True, max_length=50)),
+                ("description", models.TextField(blank=True)),
+                ("region", models.CharField(blank=True, max_length=50)),
+                ("requirements", models.JSONField(default=dict)),
+                ("requirements_passed", models.IntegerField(default=0)),
+                ("requirements_failed", models.IntegerField(default=0)),
+                ("requirements_manual", models.IntegerField(default=0)),
+                ("total_requirements", models.IntegerField(default=0)),
+            ],
+            options={
+                "db_table": "compliance_overviews",
+                "abstract": False,
+            },
+        ),
+        migrations.AddField(
+            model_name="complianceoverview",
+            name="scan",
+            field=models.ForeignKey(
+                null=True,
+                on_delete=django.db.models.deletion.CASCADE,
+                related_name="compliance_overviews",
+                related_query_name="compliance_overview",
+                to="api.scan",
+            ),
+        ),
+        migrations.AddField(
+            model_name="complianceoverview",
+            name="tenant",
+            field=models.ForeignKey(
+                on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="complianceoverview",
+            constraint=models.UniqueConstraint(
+                fields=("tenant", "scan", "compliance_id", "region"),
+                name="unique_tenant_scan_region_compliance_by_compliance_overview",
+            ),
+        ),
+        migrations.AddConstraint(
+            model_name="complianceoverview",
+            constraint=api.rls.RowLevelSecurityConstraint(
+                "tenant_id",
+                name="rls_on_complianceoverview",
+                statements=["SELECT", "INSERT", "DELETE"],
+            ),
+        ),
+        migrations.AddIndex(
+            model_name="complianceoverview",
+            index=models.Index(fields=["compliance_id"], name="comp_ov_cp_id_idx"),
+        ),
+        migrations.AddIndex(
+            model_name="complianceoverview",
+            index=models.Index(
+                fields=["requirements_failed"], name="comp_ov_req_fail_idx"
+            ),
+        ),
+        migrations.AddIndex(
+            model_name="complianceoverview",
+            index=models.Index(
+                fields=["compliance_id", "requirements_failed"],
+                name="comp_ov_cp_id_req_fail_idx",
+            ),
+        ),
+    ]
diff --git a/api/src/backend/api/migrations/0002_token_migrations.py b/api/src/backend/api/migrations/0002_token_migrations.py
new file mode 100644
index 0000000000..754403c62f
--- /dev/null
+++ b/api/src/backend/api/migrations/0002_token_migrations.py
@@ -0,0 +1,23 @@
+from django.conf import settings
+from django.db import migrations
+
+from api.db_utils import DB_PROWLER_USER
+
+DB_NAME = settings.DATABASES["default"]["NAME"]
+
+
+class Migration(migrations.Migration):
+    dependencies = [
+        ("api", "0001_initial"),
+        ("token_blacklist", "0012_alter_outstandingtoken_user"),
+    ]
+
+    operations = [
+        migrations.RunSQL(
+            f"""
+            GRANT SELECT, INSERT, UPDATE, DELETE ON token_blacklist_blacklistedtoken TO {DB_PROWLER_USER};
+            GRANT SELECT, INSERT, UPDATE, DELETE ON token_blacklist_outstandingtoken TO {DB_PROWLER_USER};
+            GRANT SELECT, DELETE ON django_admin_log TO {DB_PROWLER_USER};
+            """
+        ),
+    ]
diff --git a/api/src/backend/api/migrations/__init__.py b/api/src/backend/api/migrations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/src/backend/api/models.py b/api/src/backend/api/models.py
new file mode 100644
index 0000000000..f498988f88
--- /dev/null
+++ b/api/src/backend/api/models.py
@@ -0,0 +1,858 @@
+import json
+import re
+from uuid import uuid4, UUID
+
+from cryptography.fernet import Fernet
+from django.conf import settings
+from django.contrib.auth.models import AbstractBaseUser
+from django.contrib.postgres.indexes import GinIndex
+from django.contrib.postgres.search import SearchVector, SearchVectorField
+from django.core.validators import MinLengthValidator
+from django.db import models
+from django.utils.translation import gettext_lazy as _
+from django_celery_results.models import TaskResult
+from prowler.lib.check.models import Severity
+from psqlextra.models import PostgresPartitionedModel
+from psqlextra.types import PostgresPartitioningMethod
+from uuid6 import uuid7
+
+from api.db_utils import (
+    MemberRoleEnumField,
+    enum_to_choices,
+    ProviderEnumField,
+    StateEnumField,
+    ScanTriggerEnumField,
+    FindingDeltaEnumField,
+    SeverityEnumField,
+    StatusEnumField,
+    CustomUserManager,
+    ProviderSecretTypeEnumField,
+    InvitationStateEnumField,
+    one_week_from_now,
+    generate_random_token,
+)
+from api.exceptions import ModelValidationError
+from api.rls import (
+    RowLevelSecurityProtectedModel,
+)
+from api.rls import (
+    Tenant,
+    RowLevelSecurityConstraint,
+    BaseSecurityConstraint,
+)
+
+fernet = Fernet(settings.SECRETS_ENCRYPTION_KEY.encode())
+
+# Convert Prowler Severity enum to Django TextChoices
+SeverityChoices = enum_to_choices(Severity)
+
+
+class StatusChoices(models.TextChoices):
+    """
+    This list is based on the finding status in the Prowler CLI.
+
+    However, it adds another state, MUTED, which is not in the CLI.
+    """
+
+    FAIL = "FAIL", _("Fail")
+    PASS = "PASS", _("Pass")
+    MANUAL = "MANUAL", _("Manual")
+    MUTED = "MUTED", _("Muted")
+
+
+class StateChoices(models.TextChoices):
+    AVAILABLE = "available", _("Available")
+    SCHEDULED = "scheduled", _("Scheduled")
+    EXECUTING = "executing", _("Executing")
+    COMPLETED = "completed", _("Completed")
+    FAILED = "failed", _("Failed")
+    CANCELLED = "cancelled", _("Cancelled")
+
+
+class User(AbstractBaseUser):
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    name = models.CharField(max_length=150, validators=[MinLengthValidator(3)])
+    email = models.EmailField(
+        max_length=254,
+        unique=True,
+        help_text="Case insensitive",
+        error_messages={"unique": "Please check the email address and try again."},
+    )
+    company_name = models.CharField(max_length=150, blank=True)
+    is_active = models.BooleanField(default=True)
+    date_joined = models.DateTimeField(auto_now_add=True, editable=False)
+
+    USERNAME_FIELD = "email"
+    REQUIRED_FIELDS = ["name"]
+
+    objects = CustomUserManager()
+
+    def is_member_of_tenant(self, tenant_id):
+        return self.memberships.filter(tenant_id=tenant_id).exists()
+
+    def save(self, *args, **kwargs):
+        if self.email:
+            self.email = self.email.strip().lower()
+        super().save(*args, **kwargs)
+
+    class Meta:
+        db_table = "users"
+
+        constraints = [
+            BaseSecurityConstraint(
+                name="statements_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            )
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "users"
+
+
+class Membership(models.Model):
+    class RoleChoices(models.TextChoices):
+        OWNER = "owner", _("Owner")
+        MEMBER = "member", _("Member")
+
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    user = models.ForeignKey(
+        User,
+        on_delete=models.CASCADE,
+        related_name="memberships",
+        related_query_name="membership",
+    )
+    tenant = models.ForeignKey(
+        Tenant,
+        on_delete=models.CASCADE,
+        related_name="memberships",
+        related_query_name="membership",
+    )
+    role = MemberRoleEnumField(choices=RoleChoices.choices, default=RoleChoices.MEMBER)
+    date_joined = models.DateTimeField(auto_now_add=True, editable=False)
+
+    class Meta:
+        db_table = "memberships"
+
+        constraints = [
+            models.UniqueConstraint(
+                fields=("user", "tenant"),
+                name="unique_resources_by_membership",
+            ),
+            BaseSecurityConstraint(
+                name="statements_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "memberships"
+
+
+class Provider(RowLevelSecurityProtectedModel):
+    class ProviderChoices(models.TextChoices):
+        AWS = "aws", _("AWS")
+        AZURE = "azure", _("Azure")
+        GCP = "gcp", _("GCP")
+        KUBERNETES = "kubernetes", _("Kubernetes")
+
+    @staticmethod
+    def validate_aws_uid(value):
+        if not re.match(r"^\d{12}$", value):
+            raise ModelValidationError(
+                detail="AWS provider ID must be exactly 12 digits.",
+                code="aws-uid",
+                pointer="/data/attributes/uid",
+            )
+
+    @staticmethod
+    def validate_azure_uid(value):
+        try:
+            val = UUID(value, version=4)
+            if str(val) != value:
+                raise ValueError
+        except ValueError:
+            raise ModelValidationError(
+                detail="Azure provider ID must be a valid UUID.",
+                code="azure-uid",
+                pointer="/data/attributes/uid",
+            )
+
+    @staticmethod
+    def validate_gcp_uid(value):
+        if not re.match(r"^[a-z][a-z0-9-]{5,29}$", value):
+            raise ModelValidationError(
+                detail="GCP provider ID must be 6 to 30 characters, start with a letter, and contain only lowercase "
+                "letters, numbers, and hyphens.",
+                code="gcp-uid",
+                pointer="/data/attributes/uid",
+            )
+
+    @staticmethod
+    def validate_kubernetes_uid(value):
+        if not re.match(r"^[a-z0-9]([-a-z0-9]{1,61}[a-z0-9])?$", value):
+            raise ModelValidationError(
+                detail="K8s provider ID must be up to 63 characters, start and end with a lowercase letter or number, "
+                "and contain only lowercase alphanumeric characters and hyphens.",
+                code="kubernetes-uid",
+                pointer="/data/attributes/uid",
+            )
+
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+    updated_at = models.DateTimeField(auto_now=True, editable=False)
+    provider = ProviderEnumField(
+        choices=ProviderChoices.choices, default=ProviderChoices.AWS
+    )
+    uid = models.CharField(
+        "Unique identifier for the provider, set by the provider",
+        max_length=63,
+        blank=False,
+        validators=[MinLengthValidator(3)],
+    )
+    alias = models.CharField(
+        blank=True, null=True, max_length=100, validators=[MinLengthValidator(3)]
+    )
+    connected = models.BooleanField(null=True, blank=True)
+    connection_last_checked_at = models.DateTimeField(null=True, blank=True)
+    metadata = models.JSONField(default=dict, blank=True)
+    scanner_args = models.JSONField(default=dict, blank=True)
+
+    def clean(self):
+        super().clean()
+        getattr(self, f"validate_{self.provider}_uid")(self.uid)
+
+    def save(self, *args, **kwargs):
+        self.full_clean()
+        super().save(*args, **kwargs)
+
+    class Meta(RowLevelSecurityProtectedModel.Meta):
+        db_table = "providers"
+
+        constraints = [
+            models.UniqueConstraint(
+                fields=("tenant_id", "provider", "uid"),
+                name="unique_provider_uids",
+            ),
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "providers"
+
+
+class ProviderGroup(RowLevelSecurityProtectedModel):
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    name = models.CharField(max_length=255)
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+    updated_at = models.DateTimeField(auto_now=True, editable=False)
+    providers = models.ManyToManyField(
+        Provider, through="ProviderGroupMembership", related_name="provider_groups"
+    )
+
+    class Meta:
+        db_table = "provider_groups"
+        constraints = [
+            models.UniqueConstraint(
+                fields=["tenant_id", "name"],
+                name="unique_group_name_per_tenant",
+            ),
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "provider-groups"
+
+
+class ProviderGroupMembership(RowLevelSecurityProtectedModel):
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    provider = models.ForeignKey(
+        Provider,
+        on_delete=models.CASCADE,
+    )
+    provider_group = models.ForeignKey(
+        ProviderGroup,
+        on_delete=models.CASCADE,
+    )
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+
+    class Meta:
+        db_table = "provider_group_memberships"
+        constraints = [
+            models.UniqueConstraint(
+                fields=["provider_id", "provider_group"],
+                name="unique_provider_group_membership",
+            ),
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "provider-group-memberships"
+
+
+class Task(RowLevelSecurityProtectedModel):
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+    task_runner_task = models.OneToOneField(
+        TaskResult,
+        on_delete=models.CASCADE,
+        related_name="task",
+        related_query_name="task",
+        null=True,
+        blank=True,
+    )
+
+    class Meta(RowLevelSecurityProtectedModel.Meta):
+        db_table = "tasks"
+
+        constraints = [
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+        indexes = [
+            models.Index(
+                fields=["id", "task_runner_task"],
+                name="tasks_id_trt_id_idx",
+            ),
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "tasks"
+
+
+class Scan(RowLevelSecurityProtectedModel):
+    class TriggerChoices(models.TextChoices):
+        SCHEDULED = "scheduled", _("Scheduled")
+        MANUAL = "manual", _("Manual")
+
+    id = models.UUIDField(primary_key=True, default=uuid7, editable=False)
+    name = models.CharField(
+        blank=True, null=True, max_length=100, validators=[MinLengthValidator(3)]
+    )
+    provider = models.ForeignKey(
+        Provider,
+        on_delete=models.CASCADE,
+        related_name="scans",
+        related_query_name="scan",
+    )
+    task = models.ForeignKey(
+        Task,
+        on_delete=models.CASCADE,
+        related_name="scans",
+        related_query_name="scan",
+        null=True,
+        blank=True,
+    )
+    trigger = ScanTriggerEnumField(
+        choices=TriggerChoices.choices,
+    )
+    state = StateEnumField(choices=StateChoices.choices, default=StateChoices.AVAILABLE)
+    unique_resource_count = models.IntegerField(default=0)
+    progress = models.IntegerField(default=0)
+    scanner_args = models.JSONField(default=dict)
+    duration = models.IntegerField(null=True, blank=True)
+    scheduled_at = models.DateTimeField(null=True, blank=True)
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+    updated_at = models.DateTimeField(auto_now=True, editable=False)
+    started_at = models.DateTimeField(null=True, blank=True)
+    completed_at = models.DateTimeField(null=True, blank=True)
+    # TODO: mutelist foreign key
+
+    class Meta(RowLevelSecurityProtectedModel.Meta):
+        db_table = "scans"
+
+        constraints = [
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+        indexes = [
+            models.Index(
+                fields=["provider", "state", "trigger", "scheduled_at"],
+                name="scans_prov_state_trig_sche_idx",
+            ),
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "scans"
+
+
+class ResourceTag(RowLevelSecurityProtectedModel):
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+    updated_at = models.DateTimeField(auto_now=True, editable=False)
+
+    key = models.TextField(blank=False)
+    value = models.TextField(blank=False)
+
+    text_search = models.GeneratedField(
+        expression=SearchVector("key", weight="A", config="simple")
+        + SearchVector("value", weight="B", config="simple"),
+        output_field=SearchVectorField(),
+        db_persist=True,
+        null=True,
+        editable=False,
+    )
+
+    class Meta(RowLevelSecurityProtectedModel.Meta):
+        db_table = "resource_tags"
+
+        indexes = [
+            GinIndex(fields=["text_search"], name="gin_resource_tags_search_idx"),
+        ]
+
+        constraints = [
+            models.UniqueConstraint(
+                fields=("tenant_id", "key", "value"),
+                name="unique_resource_tags_by_tenant_key_value",
+            ),
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+
+class Resource(RowLevelSecurityProtectedModel):
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+    updated_at = models.DateTimeField(auto_now=True, editable=False)
+
+    provider = models.ForeignKey(
+        Provider,
+        on_delete=models.CASCADE,
+        related_name="resources",
+        related_query_name="resource",
+    )
+
+    uid = models.TextField(
+        "Unique identifier for the resource, set by the provider", blank=False
+    )
+    name = models.TextField("Name of the resource, as set in the provider", blank=False)
+    region = models.TextField(
+        "Location of the resource, as set by the provider", blank=False
+    )
+    service = models.TextField(
+        "Service of the resource, as set by the provider", blank=False
+    )
+    type = models.TextField("Type of the resource, as set by the provider", blank=False)
+
+    text_search = models.GeneratedField(
+        expression=SearchVector("uid", weight="A", config="simple")
+        + SearchVector("name", weight="B", config="simple")
+        + SearchVector("region", weight="C", config="simple")
+        + SearchVector("service", "type", weight="D", config="simple"),
+        output_field=SearchVectorField(),
+        db_persist=True,
+        null=True,
+        editable=False,
+    )
+
+    tags = models.ManyToManyField(
+        ResourceTag,
+        verbose_name="Tags associated with the resource, by provider",
+        through="ResourceTagMapping",
+    )
+
+    def get_tags(self) -> dict:
+        return {tag.key: tag.value for tag in self.tags.all()}
+
+    def clear_tags(self):
+        self.tags.clear()
+        self.save()
+
+    def upsert_or_delete_tags(self, tags: list[ResourceTag] | None):
+        if tags is None:
+            self.clear_tags()
+            return
+
+        # Add new relationships with the tenant_id field
+        for tag in tags:
+            ResourceTagMapping.objects.update_or_create(
+                tag=tag, resource=self, tenant_id=self.tenant_id
+            )
+
+        # Save the instance
+        self.save()
+
+    class Meta(RowLevelSecurityProtectedModel.Meta):
+        db_table = "resources"
+
+        indexes = [
+            models.Index(
+                fields=["uid", "region", "service", "name"],
+                name="resource_uid_reg_serv_name_idx",
+            ),
+            GinIndex(fields=["text_search"], name="gin_resources_search_idx"),
+        ]
+
+        constraints = [
+            models.UniqueConstraint(
+                fields=("tenant_id", "provider_id", "uid"),
+                name="unique_resources_by_provider",
+            ),
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "resources"
+
+
+class ResourceTagMapping(RowLevelSecurityProtectedModel):
+    # NOTE that we don't really need a primary key here,
+    #      but everything is easier with django if we do
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    resource = models.ForeignKey(Resource, on_delete=models.CASCADE)
+    tag = models.ForeignKey(ResourceTag, on_delete=models.CASCADE)
+
+    class Meta(RowLevelSecurityProtectedModel.Meta):
+        db_table = "resource_tag_mappings"
+
+        # django will automatically create indexes for:
+        #   - resource_id
+        #   - tag_id
+        #   - tenant_id
+        #   - id
+
+        constraints = [
+            models.UniqueConstraint(
+                fields=("tenant_id", "resource_id", "tag_id"),
+                name="unique_resource_tag_mappings_by_tenant",
+            ),
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+
+class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
+    """
+    Defines the Finding model.
+
+    Findings uses a partitioned table to store findings. The partitions are created based on the UUIDv7 `id` field.
+
+    Note when creating migrations, you must use `python manage.py pgmakemigrations` to create the migrations.
+    """
+
+    class PartitioningMeta:
+        method = PostgresPartitioningMethod.RANGE
+        key = ["id"]
+
+    class DeltaChoices(models.TextChoices):
+        NEW = "new", _("New")
+        CHANGED = "changed", _("Changed")
+
+    id = models.UUIDField(primary_key=True, default=uuid7, editable=False)
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+    updated_at = models.DateTimeField(auto_now=True, editable=False)
+
+    uid = models.CharField(max_length=300)
+    delta = FindingDeltaEnumField(
+        choices=DeltaChoices.choices,
+        blank=True,
+        null=True,
+    )
+
+    status = StatusEnumField(choices=StatusChoices)
+    status_extended = models.TextField(blank=True, null=True)
+
+    severity = SeverityEnumField(choices=SeverityChoices)
+
+    impact = SeverityEnumField(choices=SeverityChoices)
+    impact_extended = models.TextField(blank=True, null=True)
+
+    raw_result = models.JSONField(default=dict)
+    tags = models.JSONField(default=dict, null=True, blank=True)
+    check_id = models.CharField(max_length=100, blank=False, null=False)
+    check_metadata = models.JSONField(default=dict, null=False)
+
+    # Relationships
+    scan = models.ForeignKey(to=Scan, related_name="findings", on_delete=models.CASCADE)
+
+    # many-to-many Resources. Relationship is defined on Resource
+    resources = models.ManyToManyField(
+        Resource,
+        verbose_name="Resources associated with the finding",
+        through="ResourceFindingMapping",
+        related_name="findings",
+    )
+
+    # TODO: Add resource search
+    text_search = models.GeneratedField(
+        expression=SearchVector(
+            "impact_extended", "status_extended", weight="A", config="simple"
+        ),
+        output_field=SearchVectorField(),
+        db_persist=True,
+        null=True,
+        editable=False,
+    )
+
+    class Meta(RowLevelSecurityProtectedModel.Meta):
+        db_table = "findings"
+
+        constraints = [
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "UPDATE", "INSERT", "DELETE"],
+            ),
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s_default",
+                partition_name="default",
+                statements=["SELECT", "UPDATE", "INSERT", "DELETE"],
+            ),
+        ]
+
+        indexes = [
+            models.Index(fields=["uid"], name="findings_uid_idx"),
+            models.Index(
+                fields=[
+                    "scan_id",
+                    "impact",
+                    "severity",
+                    "status",
+                    "check_id",
+                    "delta",
+                ],
+                name="findings_filter_idx",
+            ),
+            GinIndex(fields=["text_search"], name="gin_findings_search_idx"),
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "findings"
+
+    def add_resources(self, resources: list[Resource] | None):
+        # Add new relationships with the tenant_id field
+        for resource in resources:
+            ResourceFindingMapping.objects.update_or_create(
+                resource=resource, finding=self, tenant_id=self.tenant_id
+            )
+
+        # Save the instance
+        self.save()
+
+
+class ResourceFindingMapping(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
+    """
+    Defines the ResourceFindingMapping model.
+
+    ResourceFindingMapping is used to map a Finding to a Resource.
+
+    It follows the same partitioning strategy as the Finding model.
+    """
+
+    # NOTE that we don't really need a primary key here,
+    #      but everything is easier with django if we do
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    resource = models.ForeignKey(Resource, on_delete=models.CASCADE)
+    finding = models.ForeignKey(Finding, on_delete=models.CASCADE)
+
+    class PartitioningMeta:
+        method = PostgresPartitioningMethod.RANGE
+        key = ["finding_id"]
+
+    class Meta(RowLevelSecurityProtectedModel.Meta):
+        db_table = "resource_finding_mappings"
+        base_manager_name = "objects"
+        abstract = False
+
+        # django will automatically create indexes for:
+        #   - resource_id
+        #   - finding_id
+        #   - tenant_id
+        #   - id
+
+        constraints = [
+            models.UniqueConstraint(
+                fields=("tenant_id", "resource_id", "finding_id"),
+                name="unique_resource_finding_mappings_by_tenant",
+            ),
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+            RowLevelSecurityConstraint(
+                "tenant_id",
+                name=f"rls_on_{db_table}_default",
+                partition_name="default",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+
+class ProviderSecret(RowLevelSecurityProtectedModel):
+    class TypeChoices(models.TextChoices):
+        STATIC = "static", _("Key-value pairs")
+        ROLE = "role", _("Role assumption")
+
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+    updated_at = models.DateTimeField(auto_now=True, editable=False)
+    name = models.CharField(
+        blank=True, null=True, max_length=100, validators=[MinLengthValidator(3)]
+    )
+    secret_type = ProviderSecretTypeEnumField(choices=TypeChoices.choices)
+    _secret = models.BinaryField(db_column="secret")
+    provider = models.OneToOneField(
+        Provider,
+        on_delete=models.CASCADE,
+        related_name="secret",
+        related_query_name="secret",
+    )
+
+    class Meta(RowLevelSecurityProtectedModel.Meta):
+        db_table = "provider_secrets"
+
+        constraints = [
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "provider-secrets"
+
+    @property
+    def secret(self):
+        if isinstance(self._secret, memoryview):
+            encrypted_bytes = self._secret.tobytes()
+        elif isinstance(self._secret, str):
+            encrypted_bytes = self._secret.encode()
+        else:
+            encrypted_bytes = self._secret
+        decrypted_data = fernet.decrypt(encrypted_bytes)
+        return json.loads(decrypted_data.decode())
+
+    @secret.setter
+    def secret(self, value):
+        encrypted_data = fernet.encrypt(json.dumps(value).encode())
+        self._secret = encrypted_data
+
+
+class Invitation(RowLevelSecurityProtectedModel):
+    class State(models.TextChoices):
+        PENDING = "pending", _("Invitation is pending")
+        ACCEPTED = "accepted", _("Invitation was accepted by a user")
+        EXPIRED = "expired", _("Invitation expired after the configured time")
+        REVOKED = "revoked", _("Invitation was revoked by a user")
+
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+    updated_at = models.DateTimeField(auto_now=True, editable=False)
+    email = models.EmailField(max_length=254, blank=False, null=False)
+    state = InvitationStateEnumField(choices=State.choices, default=State.PENDING)
+    token = models.CharField(
+        max_length=14,
+        unique=True,
+        default=generate_random_token,
+        editable=False,
+        blank=False,
+        null=False,
+        validators=[MinLengthValidator(14)],
+    )
+    expires_at = models.DateTimeField(default=one_week_from_now)
+    inviter = models.ForeignKey(
+        User,
+        on_delete=models.SET_NULL,
+        related_name="invitations",
+        related_query_name="invitation",
+        null=True,
+    )
+
+    class Meta(RowLevelSecurityProtectedModel.Meta):
+        db_table = "invitations"
+
+        constraints = [
+            models.UniqueConstraint(
+                fields=("tenant", "token", "email"),
+                name="unique_tenant_token_email_by_invitation",
+            ),
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "invitations"
+
+
+class ComplianceOverview(RowLevelSecurityProtectedModel):
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+    compliance_id = models.CharField(max_length=100, blank=False, null=False)
+    framework = models.CharField(max_length=100, blank=False, null=False)
+    version = models.CharField(max_length=50, blank=True)
+    description = models.TextField(blank=True)
+    region = models.CharField(max_length=50, blank=True)
+    requirements = models.JSONField(default=dict)
+    requirements_passed = models.IntegerField(default=0)
+    requirements_failed = models.IntegerField(default=0)
+    requirements_manual = models.IntegerField(default=0)
+    total_requirements = models.IntegerField(default=0)
+
+    scan = models.ForeignKey(
+        Scan,
+        on_delete=models.CASCADE,
+        related_name="compliance_overviews",
+        related_query_name="compliance_overview",
+        null=True,
+    )
+
+    class Meta(RowLevelSecurityProtectedModel.Meta):
+        db_table = "compliance_overviews"
+
+        constraints = [
+            models.UniqueConstraint(
+                fields=("tenant", "scan", "compliance_id", "region"),
+                name="unique_tenant_scan_region_compliance_by_compliance_overview",
+            ),
+            RowLevelSecurityConstraint(
+                field="tenant_id",
+                name="rls_on_%(class)s",
+                statements=["SELECT", "INSERT", "DELETE"],
+            ),
+        ]
+        indexes = [
+            models.Index(fields=["compliance_id"], name="comp_ov_cp_id_idx"),
+            models.Index(fields=["requirements_failed"], name="comp_ov_req_fail_idx"),
+            models.Index(
+                fields=["compliance_id", "requirements_failed"],
+                name="comp_ov_cp_id_req_fail_idx",
+            ),
+        ]
+
+    class JSONAPIMeta:
+        resource_name = "compliance-overviews"
diff --git a/api/src/backend/api/pagination.py b/api/src/backend/api/pagination.py
new file mode 100644
index 0000000000..8f37c9ba78
--- /dev/null
+++ b/api/src/backend/api/pagination.py
@@ -0,0 +1,6 @@
+from rest_framework_json_api.pagination import JsonApiPageNumberPagination
+
+
+class ComplianceOverviewPagination(JsonApiPageNumberPagination):
+    page_size = 50
+    max_page_size = 100
diff --git a/api/src/backend/api/partitions.py b/api/src/backend/api/partitions.py
new file mode 100644
index 0000000000..92390ffdec
--- /dev/null
+++ b/api/src/backend/api/partitions.py
@@ -0,0 +1,203 @@
+from datetime import datetime, timezone
+from typing import Generator, Optional
+
+from dateutil.relativedelta import relativedelta
+from django.conf import settings
+from psqlextra.partitioning import (
+    PostgresPartitioningManager,
+    PostgresRangePartition,
+    PostgresRangePartitioningStrategy,
+    PostgresTimePartitionSize,
+    PostgresPartitioningError,
+)
+from psqlextra.partitioning.config import PostgresPartitioningConfig
+from uuid6 import UUID
+
+from api.models import Finding, ResourceFindingMapping
+from api.rls import RowLevelSecurityConstraint
+from api.uuid_utils import datetime_to_uuid7
+
+
+class PostgresUUIDv7RangePartition(PostgresRangePartition):
+    def __init__(
+        self,
+        from_values: UUID,
+        to_values: UUID,
+        size: PostgresTimePartitionSize,
+        name_format: Optional[str] = None,
+        **kwargs,
+    ) -> None:
+        self.from_values = from_values
+        self.to_values = to_values
+        self.size = size
+        self.name_format = name_format
+
+        self.rls_statements = None
+        if "rls_statements" in kwargs:
+            self.rls_statements = kwargs["rls_statements"]
+
+        start_timestamp_ms = self.from_values.time
+
+        self.start_datetime = datetime.fromtimestamp(
+            start_timestamp_ms / 1000, timezone.utc
+        )
+
+    def name(self) -> str:
+        if not self.name_format:
+            raise PostgresPartitioningError("Unknown size/unit")
+
+        return self.start_datetime.strftime(self.name_format).lower()
+
+    def deconstruct(self) -> dict:
+        return {
+            **super().deconstruct(),
+            "size_unit": self.size.unit.value,
+            "size_value": self.size.value,
+        }
+
+    def create(
+        self,
+        model,
+        schema_editor,
+        comment,
+    ) -> None:
+        super().create(model, schema_editor, comment)
+
+        # if this model has RLS statements, add them to the partition
+        if isinstance(self.rls_statements, list):
+            schema_editor.add_constraint(
+                model,
+                constraint=RowLevelSecurityConstraint(
+                    "tenant_id",
+                    name=f"rls_on_{self.name()}",
+                    partition_name=self.name(),
+                    statements=self.rls_statements,
+                ),
+            )
+
+
+class PostgresUUIDv7PartitioningStrategy(PostgresRangePartitioningStrategy):
+    def __init__(
+        self,
+        size: PostgresTimePartitionSize,
+        count: int,
+        start_date: datetime = None,
+        max_age: Optional[relativedelta] = None,
+        name_format: Optional[str] = None,
+        **kwargs,
+    ) -> None:
+        self.start_date = start_date.replace(
+            day=1, hour=0, minute=0, second=0, microsecond=0
+        )
+        self.size = size
+        self.count = count
+        self.max_age = max_age
+        self.name_format = name_format
+
+        self.rls_statements = None
+        if "rls_statements" in kwargs:
+            self.rls_statements = kwargs["rls_statements"]
+
+    def to_create(self) -> Generator[PostgresUUIDv7RangePartition, None, None]:
+        current_datetime = (
+            self.start_date if self.start_date else self.get_start_datetime()
+        )
+
+        for _ in range(self.count):
+            end_datetime = (
+                current_datetime + self.size.as_delta() - relativedelta(microseconds=1)
+            )
+            start_uuid7 = datetime_to_uuid7(current_datetime)
+            end_uuid7 = datetime_to_uuid7(end_datetime)
+
+            yield PostgresUUIDv7RangePartition(
+                from_values=start_uuid7,
+                to_values=end_uuid7,
+                size=self.size,
+                name_format=self.name_format,
+                rls_statements=self.rls_statements,
+            )
+
+            current_datetime += self.size.as_delta()
+
+    def to_delete(self) -> Generator[PostgresUUIDv7RangePartition, None, None]:
+        if not self.max_age:
+            return
+
+        current_datetime = self.get_start_datetime() - self.max_age
+
+        while True:
+            end_datetime = current_datetime + self.size.as_delta()
+            start_uuid7 = datetime_to_uuid7(current_datetime)
+            end_uuid7 = datetime_to_uuid7(end_datetime)
+
+            # dropping table will delete indexes and policies
+            yield PostgresUUIDv7RangePartition(
+                from_values=start_uuid7,
+                to_values=end_uuid7,
+                size=self.size,
+                name_format=self.name_format,
+            )
+
+            current_datetime -= self.size.as_delta()
+
+    def get_start_datetime(self) -> datetime:
+        """
+        Gets the start of the current month in UTC timezone.
+
+        This function returns a `datetime` object set to the first day of the current
+        month, at midnight (00:00:00), in UTC.
+
+        Returns:
+            datetime: A `datetime` object representing the start of the current month in UTC.
+        """
+        return datetime.now(timezone.utc).replace(
+            day=1, hour=0, minute=0, second=0, microsecond=0
+        )
+
+
+def relative_days_or_none(value):
+    if value is None:
+        return None
+    return relativedelta(days=value)
+
+
+#
+# To manage the partitions, run `python manage.py pgpartition --using admin`
+#
+# For more info on the partitioning manager, see https://github.com/SectorLabs/django-postgres-extra
+manager = PostgresPartitioningManager(
+    [
+        PostgresPartitioningConfig(
+            model=Finding,
+            strategy=PostgresUUIDv7PartitioningStrategy(
+                start_date=datetime.now(timezone.utc),
+                size=PostgresTimePartitionSize(
+                    months=settings.FINDINGS_TABLE_PARTITION_MONTHS
+                ),
+                count=settings.FINDINGS_TABLE_PARTITION_COUNT,
+                max_age=relative_days_or_none(
+                    settings.FINDINGS_TABLE_PARTITION_MAX_AGE_MONTHS
+                ),
+                name_format="%Y_%b",
+                rls_statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
+            ),
+        ),
+        # ResourceFindingMapping should always follow the Finding partitioning
+        PostgresPartitioningConfig(
+            model=ResourceFindingMapping,
+            strategy=PostgresUUIDv7PartitioningStrategy(
+                start_date=datetime.now(timezone.utc),
+                size=PostgresTimePartitionSize(
+                    months=settings.FINDINGS_TABLE_PARTITION_MONTHS
+                ),
+                count=settings.FINDINGS_TABLE_PARTITION_COUNT,
+                max_age=relative_days_or_none(
+                    settings.FINDINGS_TABLE_PARTITION_MAX_AGE_MONTHS
+                ),
+                name_format="%Y_%b",
+                rls_statements=["SELECT"],
+            ),
+        ),
+    ]
+)
diff --git a/api/src/backend/api/renderers.py b/api/src/backend/api/renderers.py
new file mode 100644
index 0000000000..ccca52c26f
--- /dev/null
+++ b/api/src/backend/api/renderers.py
@@ -0,0 +1,23 @@
+from contextlib import nullcontext
+
+from rest_framework_json_api.renderers import JSONRenderer
+
+from api.db_utils import tenant_transaction
+
+
+class APIJSONRenderer(JSONRenderer):
+    """JSONRenderer override to apply tenant RLS when there are included resources in the request."""
+
+    def render(self, data, accepted_media_type=None, renderer_context=None):
+        request = renderer_context.get("request")
+        tenant_id = getattr(request, "tenant_id", None) if request else None
+        include_param_present = "include" in request.query_params if request else False
+
+        # Use tenant_transaction if needed for included resources, otherwise do nothing
+        context_manager = (
+            tenant_transaction(tenant_id)
+            if tenant_id and include_param_present
+            else nullcontext()
+        )
+        with context_manager:
+            return super().render(data, accepted_media_type, renderer_context)
diff --git a/api/src/backend/api/rls.py b/api/src/backend/api/rls.py
new file mode 100644
index 0000000000..c7b15a82dd
--- /dev/null
+++ b/api/src/backend/api/rls.py
@@ -0,0 +1,188 @@
+from typing import Any
+from uuid import uuid4
+
+from django.core.exceptions import ValidationError
+from django.db import DEFAULT_DB_ALIAS
+from django.db import models
+from django.db.backends.ddl_references import Statement, Table
+
+from api.db_utils import DB_USER, POSTGRES_TENANT_VAR
+
+
+class Tenant(models.Model):
+    """
+    The Tenant is the basic grouping in the system. It is used to separate data between customers.
+    """
+
+    id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
+
+    inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
+    updated_at = models.DateTimeField(auto_now=True, editable=False)
+    name = models.CharField(max_length=100)
+
+    class Meta:
+        db_table = "tenants"
+
+    class JSONAPIMeta:
+        resource_name = "tenants"
+
+
+class RowLevelSecurityConstraint(models.BaseConstraint):
+    """
+    Model constraint to enforce row-level security on a tenant based model, in addition to the least privileges.
+
+    The constraint can be applied to a partitioned table by specifying the `partition_name` keyword argument.
+    """
+
+    rls_sql_query = """
+        ALTER TABLE %(table_name)s ENABLE ROW LEVEL SECURITY;
+        ALTER TABLE %(table_name)s FORCE ROW LEVEL SECURITY;
+    """
+
+    policy_sql_query = """
+        CREATE POLICY %(db_user)s_%(table_name)s_{statement}
+        ON %(table_name)s
+        FOR {statement}
+        TO %(db_user)s
+        {clause} (
+            CASE
+                WHEN current_setting('%(tenant_setting)s', True) IS NULL THEN FALSE
+                ELSE %(field_column)s = current_setting('%(tenant_setting)s')::uuid
+            END
+        );
+    """
+
+    grant_sql_query = """
+        GRANT {statement} ON %(table_name)s TO %(db_user)s;
+    """
+
+    drop_sql_query = """
+        ALTER TABLE %(table_name)s NO FORCE ROW LEVEL SECURITY;
+        ALTER TABLE %(table_name)s DISABLE ROW LEVEL SECURITY;
+        REVOKE ALL ON TABLE %(table_name) TO %(db_user)s;
+    """
+
+    drop_policy_sql_query = """
+        DROP POLICY IF EXISTS %(db_user)s_%(table_name)s_{statement} on %(table_name)s;
+    """
+
+    def __init__(
+        self, field: str, name: str, statements: list | None = None, **kwargs
+    ) -> None:
+        super().__init__(name=name)
+        self.target_field: str = field
+        self.statements = statements or ["SELECT"]
+        self.partition_name = None
+        if "partition_name" in kwargs:
+            self.partition_name = kwargs["partition_name"]
+
+    def create_sql(self, model: Any, schema_editor: Any) -> Any:
+        field_column = schema_editor.quote_name(self.target_field)
+
+        policy_queries = ""
+        grant_queries = ""
+        for statement in self.statements:
+            clause = f"{'WITH CHECK' if statement == 'INSERT' else 'USING'}"
+            policy_queries = f"{policy_queries}{self.policy_sql_query.format(statement=statement, clause=clause)}"
+            grant_queries = (
+                f"{grant_queries}{self.grant_sql_query.format(statement=statement)}"
+            )
+
+        full_create_sql_query = (
+            f"{self.rls_sql_query}" f"{policy_queries}" f"{grant_queries}"
+        )
+
+        table_name = model._meta.db_table
+        if self.partition_name:
+            table_name = f"{table_name}_{self.partition_name}"
+
+        return Statement(
+            full_create_sql_query,
+            table_name=table_name,
+            field_column=field_column,
+            db_user=DB_USER,
+            tenant_setting=POSTGRES_TENANT_VAR,
+            partition_name=self.partition_name,
+        )
+
+    def remove_sql(self, model: Any, schema_editor: Any) -> Any:
+        field_column = schema_editor.quote_name(self.target_field)
+        full_drop_sql_query = (
+            f"{self.drop_sql_query}"
+            f"{''.join([self.drop_policy_sql_query.format(statement) for statement in self.statements])}"
+        )
+        table_name = model._meta.db_table
+        if self.partition_name:
+            table_name = f"{table_name}_{self.partition_name}"
+        return Statement(
+            full_drop_sql_query,
+            table_name=Table(table_name, schema_editor.quote_name),
+            field_column=field_column,
+            db_user=DB_USER,
+            partition_name=self.partition_name,
+        )
+
+    def __eq__(self, other: object) -> bool:
+        if isinstance(other, RowLevelSecurityConstraint):
+            return self.name == other.name and self.target_field == other.target_field
+        return super().__eq__(other)
+
+    def deconstruct(self) -> tuple[str, tuple, dict]:
+        path, _, kwargs = super().deconstruct()
+        return (path, (self.target_field,), kwargs)
+
+    def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS):  # noqa: F841
+        if not hasattr(instance, "tenant_id"):
+            raise ValidationError(f"{model.__name__} does not have a tenant_id field.")
+
+
+class BaseSecurityConstraint(models.BaseConstraint):
+    """Model constraint to grant the least privileges to the API database user."""
+
+    grant_sql_query = """
+        GRANT {statement} ON %(table_name)s TO %(db_user)s;
+    """
+
+    drop_sql_query = """
+        REVOKE ALL ON TABLE %(table_name) TO %(db_user)s;
+    """
+
+    def __init__(self, name: str, statements: list | None = None) -> None:
+        super().__init__(name=name)
+        self.statements = statements or ["SELECT"]
+
+    def create_sql(self, model: Any, schema_editor: Any) -> Any:
+        grant_queries = ""
+        for statement in self.statements:
+            grant_queries = (
+                f"{grant_queries}{self.grant_sql_query.format(statement=statement)}"
+            )
+
+        return Statement(
+            grant_queries,
+            table_name=model._meta.db_table,
+            db_user=DB_USER,
+        )
+
+    def remove_sql(self, model: Any, schema_editor: Any) -> Any:
+        return Statement(
+            self.drop_sql_query,
+            table_name=Table(model._meta.db_table, schema_editor.quote_name),
+            db_user=DB_USER,
+        )
+
+    def __eq__(self, other: object) -> bool:
+        if isinstance(other, BaseSecurityConstraint):
+            return self.name == other.name
+        return super().__eq__(other)
+
+    def deconstruct(self) -> tuple[str, tuple, dict]:
+        path, args, kwargs = super().deconstruct()
+        return path, args, kwargs
+
+
+class RowLevelSecurityProtectedModel(models.Model):
+    tenant = models.ForeignKey("Tenant", on_delete=models.CASCADE)
+
+    class Meta:
+        abstract = True
diff --git a/api/src/backend/api/signals.py b/api/src/backend/api/signals.py
new file mode 100644
index 0000000000..44c2e0b4fd
--- /dev/null
+++ b/api/src/backend/api/signals.py
@@ -0,0 +1,35 @@
+from celery import states
+from celery.signals import before_task_publish
+from django.db.models.signals import post_delete
+from django.dispatch import receiver
+from django_celery_beat.models import PeriodicTask
+from django_celery_results.backends.database import DatabaseBackend
+
+from api.models import Provider
+from config.celery import celery_app
+
+
+def create_task_result_on_publish(sender=None, headers=None, **kwargs):  # noqa: F841
+    """Celery signal to store TaskResult entries when tasks reach the broker."""
+    db_result_backend = DatabaseBackend(celery_app)
+    request = type("request", (object,), headers)
+
+    db_result_backend.store_result(
+        headers["id"],
+        None,
+        states.PENDING,
+        traceback=None,
+        request=request,
+    )
+
+
+before_task_publish.connect(
+    create_task_result_on_publish, dispatch_uid="create_task_result_on_publish"
+)
+
+
+@receiver(post_delete, sender=Provider)
+def delete_provider_scan_task(sender, instance, **kwargs):  # noqa: F841
+    # Delete the associated periodic task when the provider is deleted
+    task_name = f"scan-perform-scheduled-{instance.id}"
+    PeriodicTask.objects.filter(name=task_name).delete()
diff --git a/api/src/backend/api/specs/v1.yaml b/api/src/backend/api/specs/v1.yaml
new file mode 100644
index 0000000000..755ab8c8df
--- /dev/null
+++ b/api/src/backend/api/specs/v1.yaml
@@ -0,0 +1,6951 @@
+openapi: 3.0.3
+info:
+  title: Prowler API
+  version: 1.0.0
+  description: |-
+    Prowler API specification.
+
+    This file is auto-generated.
+paths:
+  /api/v1/compliance-overviews:
+    get:
+      operationId: compliance_overviews_list
+      description: Retrieve an overview of all the compliance in a given scan. If
+        no region filters are provided, the region with the most fails will be returned
+        by default.
+      summary: List compliance overviews for a scan
+      parameters:
+      - in: query
+        name: fields[compliance-overviews]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - compliance_id
+            - framework
+            - version
+            - requirements_status
+            - region
+            - provider_type
+            - scan
+            - url
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[compliance_id]
+        schema:
+          type: string
+      - in: query
+        name: filter[compliance_id__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[framework]
+        schema:
+          type: string
+      - in: query
+        name: filter[framework__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[framework__iexact]
+        schema:
+          type: string
+      - in: query
+        name: filter[inserted_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__date]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[inserted_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[provider_type]
+        schema:
+          type: string
+          enum:
+          - aws
+          - azure
+          - gcp
+          - kubernetes
+        description: |-
+          * `aws` - AWS
+          * `azure` - Azure
+          * `gcp` - GCP
+          * `kubernetes` - Kubernetes
+      - in: query
+        name: filter[provider_type__in]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - aws
+            - azure
+            - gcp
+            - kubernetes
+        description: |-
+          Multiple values may be separated by commas.
+
+          * `aws` - AWS
+          * `azure` - Azure
+          * `gcp` - GCP
+          * `kubernetes` - Kubernetes
+        explode: false
+        style: form
+      - in: query
+        name: filter[region]
+        schema:
+          type: string
+      - in: query
+        name: filter[region__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[region__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[scan_id]
+        schema:
+          type: string
+          format: uuid
+        description: Related scan ID.
+        required: true
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - in: query
+        name: filter[version]
+        schema:
+          type: string
+      - in: query
+        name: filter[version__icontains]
+        schema:
+          type: string
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - -inserted_at
+            - compliance_id
+            - -compliance_id
+            - framework
+            - -framework
+            - region
+            - -region
+        explode: false
+      tags:
+      - Compliance Overview
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedComplianceOverviewList'
+          description: ''
+  /api/v1/compliance-overviews/{id}:
+    get:
+      operationId: compliance_overviews_retrieve
+      description: Fetch detailed information about a specific compliance overview
+        by its ID, including detailed requirement information and check's status.
+      summary: Retrieve data from a specific compliance overview
+      parameters:
+      - in: query
+        name: fields[compliance-overviews]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - compliance_id
+            - framework
+            - version
+            - requirements_status
+            - region
+            - provider_type
+            - scan
+            - url
+            - description
+            - requirements
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this compliance overview.
+        required: true
+      tags:
+      - Compliance Overview
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/ComplianceOverviewFullResponse'
+          description: ''
+  /api/v1/findings:
+    get:
+      operationId: findings_list
+      description: Retrieve a list of all findings with options for filtering by various
+        criteria.
+      summary: List all findings
+      parameters:
+      - in: query
+        name: fields[findings]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - uid
+            - delta
+            - status
+            - status_extended
+            - severity
+            - check_id
+            - check_metadata
+            - raw_result
+            - inserted_at
+            - updated_at
+            - url
+            - scan
+            - resources
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[check_id]
+        schema:
+          type: string
+      - in: query
+        name: filter[check_id__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[check_id__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[delta]
+        schema:
+          type: string
+          nullable: true
+          enum:
+          - changed
+          - new
+        description: |-
+          * `new` - New
+          * `changed` - Changed
+      - in: query
+        name: filter[delta__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[id]
+        schema:
+          type: string
+          format: uuid
+      - in: query
+        name: filter[id__in]
+        schema:
+          type: array
+          items:
+            type: string
+            format: uuid
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[impact]
+        schema:
+          type: string
+          enum:
+          - critical
+          - high
+          - informational
+          - low
+          - medium
+        description: |-
+          * `critical` - Critical
+          * `high` - High
+          * `medium` - Medium
+          * `low` - Low
+          * `informational` - Informational
+      - in: query
+        name: filter[impact__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[inserted_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__date]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__gte]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__lte]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[provider]
+        schema:
+          type: string
+          format: uuid
+      - in: query
+        name: filter[provider__in]
+        schema:
+          type: array
+          items:
+            type: string
+            format: uuid
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[provider_alias]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_alias__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_alias__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[provider_type]
+        schema:
+          type: string
+          enum:
+          - aws
+          - azure
+          - gcp
+          - kubernetes
+        description: |-
+          * `aws` - AWS
+          * `azure` - Azure
+          * `gcp` - GCP
+          * `kubernetes` - Kubernetes
+      - in: query
+        name: filter[provider_type__in]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - aws
+            - azure
+            - gcp
+            - kubernetes
+        description: |-
+          Multiple values may be separated by commas.
+
+          * `aws` - AWS
+          * `azure` - Azure
+          * `gcp` - GCP
+          * `kubernetes` - Kubernetes
+        explode: false
+        style: form
+      - in: query
+        name: filter[provider_uid]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_uid__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_uid__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[region]
+        schema:
+          type: string
+      - in: query
+        name: filter[region__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[region__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[resource_name]
+        schema:
+          type: string
+      - in: query
+        name: filter[resource_name__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[resource_name__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[resource_type]
+        schema:
+          type: string
+      - in: query
+        name: filter[resource_type__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[resource_type__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[resource_uid]
+        schema:
+          type: string
+      - in: query
+        name: filter[resource_uid__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[resource_uid__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[resources]
+        schema:
+          type: array
+          items:
+            type: string
+            format: uuid
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[scan]
+        schema:
+          type: string
+          format: uuid
+      - in: query
+        name: filter[scan__in]
+        schema:
+          type: array
+          items:
+            type: string
+            format: uuid
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - in: query
+        name: filter[service]
+        schema:
+          type: string
+      - in: query
+        name: filter[service__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[service__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[severity]
+        schema:
+          type: string
+          enum:
+          - critical
+          - high
+          - informational
+          - low
+          - medium
+        description: |-
+          * `critical` - Critical
+          * `high` - High
+          * `medium` - Medium
+          * `low` - Low
+          * `informational` - Informational
+      - in: query
+        name: filter[severity__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[status]
+        schema:
+          type: string
+          enum:
+          - FAIL
+          - MANUAL
+          - MUTED
+          - PASS
+        description: |-
+          * `FAIL` - Fail
+          * `PASS` - Pass
+          * `MANUAL` - Manual
+          * `MUTED` - Muted
+      - in: query
+        name: filter[status__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[uid]
+        schema:
+          type: string
+      - in: query
+        name: filter[uid__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[updated_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[updated_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[updated_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: include
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - scan
+            - resources
+        description: include query parameter to allow the client to customize which
+          related resources should be returned.
+        explode: false
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - id
+            - -id
+            - status
+            - -status
+            - severity
+            - -severity
+            - check_id
+            - -check_id
+            - inserted_at
+            - -inserted_at
+            - updated_at
+            - -updated_at
+        explode: false
+      tags:
+      - Finding
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedFindingList'
+          description: ''
+  /api/v1/findings/{id}:
+    get:
+      operationId: findings_retrieve
+      description: Fetch detailed information about a specific finding by its ID.
+      summary: Retrieve data from a specific finding
+      parameters:
+      - in: query
+        name: fields[findings]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - uid
+            - delta
+            - status
+            - status_extended
+            - severity
+            - check_id
+            - check_metadata
+            - raw_result
+            - inserted_at
+            - updated_at
+            - url
+            - scan
+            - resources
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this finding.
+        required: true
+      - in: query
+        name: include
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - scan
+            - resources
+        description: include query parameter to allow the client to customize which
+          related resources should be returned.
+        explode: false
+      tags:
+      - Finding
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/FindingResponse'
+          description: ''
+  /api/v1/invitations/accept:
+    post:
+      operationId: invitations_accept_create
+      description: Accept an invitation to an existing tenant. This invitation cannot
+        be expired and the emails must match.
+      summary: Accept an invitation
+      tags:
+      - Invitation
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/InvitationAcceptRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/InvitationAcceptRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/InvitationAcceptRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '201':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/OpenApiResponseResponse'
+          description: ''
+  /api/v1/overviews/providers:
+    get:
+      operationId: overviews_providers_retrieve
+      description: Fetch aggregated summaries of the latest findings and resources
+        for each provider. This includes counts of passed, failed, and manual findings,
+        as well as the total number of resources managed by each provider.
+      summary: List aggregated overview data for providers
+      parameters:
+      - in: query
+        name: fields[provider-overviews]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - id
+            - findings
+            - resources
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      tags:
+      - Overview
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/OverviewProviderResponse'
+          description: ''
+  /api/v1/provider_groups:
+    get:
+      operationId: provider_groups_list
+      description: Retrieve a list of all provider groups with options for filtering
+        by various criteria.
+      summary: List all provider groups
+      parameters:
+      - in: query
+        name: fields[provider-groups]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - inserted_at
+            - updated_at
+            - providers
+            - url
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[id]
+        schema:
+          type: string
+          format: uuid
+      - in: query
+        name: filter[id__in]
+        schema:
+          type: array
+          items:
+            type: string
+            format: uuid
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[inserted_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[inserted_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[name]
+        schema:
+          type: string
+      - in: query
+        name: filter[name__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - in: query
+        name: filter[updated_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[updated_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[updated_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - id
+            - -id
+            - name
+            - -name
+            - inserted_at
+            - -inserted_at
+            - updated_at
+            - -updated_at
+            - providers
+            - -providers
+            - url
+            - -url
+        explode: false
+      tags:
+      - Provider Group
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedProviderGroupList'
+          description: ''
+    post:
+      operationId: provider_groups_create
+      description: Add a new provider group to the system by providing the required
+        provider group details.
+      summary: Create a new provider group
+      tags:
+      - Provider Group
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/ProviderGroupRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/ProviderGroupRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/ProviderGroupRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '201':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/ProviderGroupResponse'
+          description: ''
+  /api/v1/provider_groups/{id}:
+    get:
+      operationId: provider_groups_retrieve
+      description: Fetch detailed information about a specific provider group by their
+        ID.
+      summary: Retrieve data from a provider group
+      parameters:
+      - in: query
+        name: fields[provider-groups]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - inserted_at
+            - updated_at
+            - providers
+            - url
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this provider group.
+        required: true
+      tags:
+      - Provider Group
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/ProviderGroupResponse'
+          description: ''
+    patch:
+      operationId: provider_groups_partial_update
+      description: Update certain fields of an existing provider group's information
+        without affecting other fields.
+      summary: Partially update a provider group
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this provider group.
+        required: true
+      tags:
+      - Provider Group
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/PatchedProviderGroupUpdateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/PatchedProviderGroupUpdateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/PatchedProviderGroupUpdateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/SerializerMetaclassResponse'
+          description: ''
+    delete:
+      operationId: provider_groups_destroy
+      description: Remove a provider group from the system by their ID.
+      summary: Delete a provider group
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this provider group.
+        required: true
+      tags:
+      - Provider Group
+      security:
+      - jwtAuth: []
+      responses:
+        '204':
+          description: No response body
+  /api/v1/provider_groups/{id}/providers:
+    put:
+      operationId: provider_groups_providers_update
+      description: Add one or more providers to an existing provider group.
+      summary: Add providers to a provider group
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this provider group.
+        required: true
+      tags:
+      - Provider Group
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/ProviderGroupMembershipUpdateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/ProviderGroupMembershipUpdateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/ProviderGroupMembershipUpdateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/OpenApiResponseResponse'
+          description: ''
+  /api/v1/providers:
+    get:
+      operationId: providers_list
+      description: Retrieve a list of all providers with options for filtering by
+        various criteria.
+      summary: List all providers
+      parameters:
+      - in: query
+        name: fields[providers]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - updated_at
+            - provider
+            - uid
+            - alias
+            - connection
+            - secret
+            - url
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[alias]
+        schema:
+          type: string
+      - in: query
+        name: filter[alias__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[alias__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[connected]
+        schema:
+          type: boolean
+      - in: query
+        name: filter[id]
+        schema:
+          type: string
+          format: uuid
+      - in: query
+        name: filter[id__in]
+        schema:
+          type: array
+          items:
+            type: string
+            format: uuid
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[inserted_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[inserted_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[provider]
+        schema:
+          type: string
+          enum:
+          - aws
+          - azure
+          - gcp
+          - kubernetes
+        description: |-
+          * `aws` - AWS
+          * `azure` - Azure
+          * `gcp` - GCP
+          * `kubernetes` - Kubernetes
+      - in: query
+        name: filter[provider__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - in: query
+        name: filter[uid]
+        schema:
+          type: string
+      - in: query
+        name: filter[uid__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[uid__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[updated_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[updated_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[updated_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - provider
+            - -provider
+            - uid
+            - -uid
+            - alias
+            - -alias
+            - connected
+            - -connected
+            - inserted_at
+            - -inserted_at
+            - updated_at
+            - -updated_at
+        explode: false
+      tags:
+      - Provider
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedProviderList'
+          description: ''
+    post:
+      operationId: providers_create
+      description: Add a new provider to the system by providing the required provider
+        details.
+      summary: Create a new provider
+      tags:
+      - Provider
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/ProviderCreateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/ProviderCreateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/ProviderCreateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '201':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/ProviderCreateResponse'
+          description: ''
+  /api/v1/providers/{id}:
+    get:
+      operationId: providers_retrieve
+      description: Fetch detailed information about a specific provider by their ID.
+      summary: Retrieve data from a provider
+      parameters:
+      - in: query
+        name: fields[providers]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - updated_at
+            - provider
+            - uid
+            - alias
+            - connection
+            - secret
+            - url
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this provider.
+        required: true
+      tags:
+      - Provider
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/ProviderResponse'
+          description: ''
+    patch:
+      operationId: providers_partial_update
+      description: Update certain fields of an existing provider's information without
+        affecting other fields.
+      summary: Partially update a provider
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this provider.
+        required: true
+      tags:
+      - Provider
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/PatchedProviderUpdateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/PatchedProviderUpdateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/PatchedProviderUpdateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/SerializerMetaclassResponse'
+          description: ''
+    delete:
+      operationId: providers_destroy
+      description: Remove a provider from the system by their ID.
+      summary: Delete a provider
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this provider.
+        required: true
+      tags:
+      - Provider
+      security:
+      - jwtAuth: []
+      responses:
+        '202':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/OpenApiResponseResponse'
+          description: ''
+  /api/v1/providers/{id}/connection:
+    post:
+      operationId: providers_connection_create
+      description: Try to verify connection. For instance, Role & Credentials are
+        set correctly
+      summary: Check connection
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this provider.
+        required: true
+      tags:
+      - Provider
+      security:
+      - jwtAuth: []
+      responses:
+        '202':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/OpenApiResponseResponse'
+          description: ''
+  /api/v1/providers/secrets:
+    get:
+      operationId: providers_secrets_list
+      description: Retrieve a list of all secrets with options for filtering by various
+        criteria.
+      summary: List all secrets
+      parameters:
+      - in: query
+        name: fields[provider-secrets]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - updated_at
+            - name
+            - secret_type
+            - provider
+            - url
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[inserted_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[name]
+        schema:
+          type: string
+      - in: query
+        name: filter[name__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider]
+        schema:
+          type: string
+          format: uuid
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - in: query
+        name: filter[updated_at]
+        schema:
+          type: string
+          format: date
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - -name
+            - inserted_at
+            - -inserted_at
+            - updated_at
+            - -updated_at
+        explode: false
+      tags:
+      - Provider
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedProviderSecretList'
+          description: ''
+    post:
+      operationId: providers_secrets_create
+      description: Add a new secret to the system by providing the required secret
+        details.
+      summary: Create a new secret
+      tags:
+      - Provider
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/ProviderSecretCreateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/ProviderSecretCreateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/ProviderSecretCreateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '201':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/ProviderSecretCreateResponse'
+          description: ''
+  /api/v1/providers/secrets/{id}:
+    get:
+      operationId: providers_secrets_retrieve
+      description: Fetch detailed information about a specific secret by their ID.
+      summary: Retrieve data from a secret
+      parameters:
+      - in: query
+        name: fields[provider-secrets]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - updated_at
+            - name
+            - secret_type
+            - provider
+            - url
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        required: true
+      tags:
+      - Provider
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/ProviderSecretResponse'
+          description: ''
+    patch:
+      operationId: providers_secrets_partial_update
+      description: Update certain fields of an existing secret's information without
+        affecting other fields.
+      summary: Partially update a secret
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        required: true
+      tags:
+      - Provider
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/PatchedProviderSecretUpdateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/PatchedProviderSecretUpdateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/PatchedProviderSecretUpdateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/ProviderSecretUpdateResponse'
+          description: ''
+    delete:
+      operationId: providers_secrets_destroy
+      description: Remove a secret from the system by their ID.
+      summary: Delete a secret
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        required: true
+      tags:
+      - Provider
+      security:
+      - jwtAuth: []
+      responses:
+        '204':
+          description: No response body
+  /api/v1/resources:
+    get:
+      operationId: resources_list
+      description: Retrieve a list of all resources with options for filtering by
+        various criteria. Resources are objects that are discovered by Prowler. They
+        can be anything from a single host to a whole VPC.
+      summary: List all resources
+      parameters:
+      - in: query
+        name: fields[resources]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - updated_at
+            - uid
+            - name
+            - region
+            - service
+            - tags
+            - provider
+            - findings
+            - url
+            - type
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[inserted_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[inserted_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[name]
+        schema:
+          type: string
+      - in: query
+        name: filter[name__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider]
+        schema:
+          type: string
+          format: uuid
+      - in: query
+        name: filter[provider__in]
+        schema:
+          type: array
+          items:
+            type: string
+            format: uuid
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[provider_alias]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_alias__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_alias__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[provider_type]
+        schema:
+          type: string
+          enum:
+          - aws
+          - azure
+          - gcp
+          - kubernetes
+        description: |-
+          * `aws` - AWS
+          * `azure` - Azure
+          * `gcp` - GCP
+          * `kubernetes` - Kubernetes
+      - in: query
+        name: filter[provider_type__in]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - aws
+            - azure
+            - gcp
+            - kubernetes
+        description: |-
+          Multiple values may be separated by commas.
+
+          * `aws` - AWS
+          * `azure` - Azure
+          * `gcp` - GCP
+          * `kubernetes` - Kubernetes
+        explode: false
+        style: form
+      - in: query
+        name: filter[provider_uid]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_uid__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_uid__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[region]
+        schema:
+          type: string
+      - in: query
+        name: filter[region__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[region__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - in: query
+        name: filter[service]
+        schema:
+          type: string
+      - in: query
+        name: filter[service__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[service__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[tag]
+        schema:
+          type: string
+      - in: query
+        name: filter[tag_key]
+        schema:
+          type: string
+      - in: query
+        name: filter[tag_value]
+        schema:
+          type: string
+      - in: query
+        name: filter[tags]
+        schema:
+          type: string
+      - in: query
+        name: filter[type]
+        schema:
+          type: string
+      - in: query
+        name: filter[type__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[type__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[uid]
+        schema:
+          type: string
+      - in: query
+        name: filter[uid__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[updated_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[updated_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[updated_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: include
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - findings
+            - provider
+        description: include query parameter to allow the client to customize which
+          related resources should be returned.
+        explode: false
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - provider_uid
+            - -provider_uid
+            - uid
+            - -uid
+            - name
+            - -name
+            - region
+            - -region
+            - service
+            - -service
+            - type
+            - -type
+            - inserted_at
+            - -inserted_at
+            - updated_at
+            - -updated_at
+        explode: false
+      tags:
+      - Resource
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedResourceList'
+          description: ''
+  /api/v1/resources/{id}:
+    get:
+      operationId: resources_retrieve
+      description: Fetch detailed information about a specific resource by their ID.
+        A Resource is an object that is discovered by Prowler. It can be anything
+        from a single host to a whole VPC.
+      summary: Retrieve data for a resource
+      parameters:
+      - in: query
+        name: fields[resources]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - updated_at
+            - uid
+            - name
+            - region
+            - service
+            - tags
+            - provider
+            - findings
+            - url
+            - type
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this resource.
+        required: true
+      - in: query
+        name: include
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - findings
+            - provider
+        description: include query parameter to allow the client to customize which
+          related resources should be returned.
+        explode: false
+      tags:
+      - Resource
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/ResourceResponse'
+          description: ''
+  /api/v1/scans:
+    get:
+      operationId: scans_list
+      description: Retrieve a list of all scans with options for filtering by various
+        criteria.
+      summary: List all scans
+      parameters:
+      - in: query
+        name: fields[scans]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - trigger
+            - state
+            - unique_resource_count
+            - progress
+            - duration
+            - provider
+            - task
+            - started_at
+            - completed_at
+            - scheduled_at
+            - url
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[completed_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[name]
+        schema:
+          type: string
+      - in: query
+        name: filter[name__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider]
+        schema:
+          type: string
+          format: uuid
+      - in: query
+        name: filter[provider__in]
+        schema:
+          type: array
+          items:
+            type: string
+            format: uuid
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[provider_alias]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_alias__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_alias__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - in: query
+        name: filter[provider_type]
+        schema:
+          type: string
+          enum:
+          - aws
+          - azure
+          - gcp
+          - kubernetes
+        description: |-
+          * `aws` - AWS
+          * `azure` - Azure
+          * `gcp` - GCP
+          * `kubernetes` - Kubernetes
+      - in: query
+        name: filter[provider_type__in]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - aws
+            - azure
+            - gcp
+            - kubernetes
+        description: |-
+          Multiple values may be separated by commas.
+
+          * `aws` - AWS
+          * `azure` - Azure
+          * `gcp` - GCP
+          * `kubernetes` - Kubernetes
+        explode: false
+        style: form
+      - in: query
+        name: filter[provider_uid]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_uid__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[provider_uid__in]
+        schema:
+          type: array
+          items:
+            type: string
+        description: Multiple values may be separated by commas.
+        explode: false
+        style: form
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - in: query
+        name: filter[started_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[started_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[started_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[trigger]
+        schema:
+          type: string
+          enum:
+          - manual
+          - scheduled
+        description: |-
+          * `scheduled` - Scheduled
+          * `manual` - Manual
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - -name
+            - trigger
+            - -trigger
+            - attempted_at
+            - -attempted_at
+            - scheduled_at
+            - -scheduled_at
+            - inserted_at
+            - -inserted_at
+            - updated_at
+            - -updated_at
+        explode: false
+      tags:
+      - Scan
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedScanList'
+          description: ''
+    post:
+      operationId: scans_create
+      description: Trigger a manual scan by providing the required scan details. If
+        `scanner_args` are not provided, the system will automatically use the default
+        settings from the associated provider. If you do provide `scanner_args`, these
+        settings will be merged with the provider's defaults. This means that your
+        provided settings will override the defaults only where they conflict, while
+        the rest of the default settings will remain intact.
+      summary: Trigger a manual scan
+      tags:
+      - Scan
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/ScanCreateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/ScanCreateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/ScanCreateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '202':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/OpenApiResponseResponse'
+          description: ''
+  /api/v1/scans/{id}:
+    get:
+      operationId: scans_retrieve
+      description: Fetch detailed information about a specific scan by its ID.
+      summary: Retrieve data from a specific scan
+      parameters:
+      - in: query
+        name: fields[scans]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - trigger
+            - state
+            - unique_resource_count
+            - progress
+            - duration
+            - provider
+            - task
+            - started_at
+            - completed_at
+            - scheduled_at
+            - url
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this scan.
+        required: true
+      tags:
+      - Scan
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/ScanResponse'
+          description: ''
+    patch:
+      operationId: scans_partial_update
+      description: Update certain fields of an existing scan without affecting other
+        fields.
+      summary: Partially update a scan
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this scan.
+        required: true
+      tags:
+      - Scan
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/PatchedScanUpdateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/PatchedScanUpdateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/PatchedScanUpdateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/ScanUpdateResponse'
+          description: ''
+  /api/v1/tasks:
+    get:
+      operationId: tasks_list
+      description: Retrieve a list of all tasks with options for filtering by name,
+        state, and other criteria.
+      summary: List all tasks
+      parameters:
+      - in: query
+        name: fields[tasks]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - completed_at
+            - name
+            - state
+            - result
+            - task_args
+            - metadata
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[name]
+        schema:
+          type: string
+      - in: query
+        name: filter[name__icontains]
+        schema:
+          type: string
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - in: query
+        name: filter[state]
+        schema:
+          type: string
+          title: Task State
+          enum:
+          - available
+          - cancelled
+          - completed
+          - executing
+          - failed
+          - scheduled
+        description: |-
+          Current state of the task being run
+
+          * `available` - Available
+          * `scheduled` - Scheduled
+          * `executing` - Executing
+          * `completed` - Completed
+          * `failed` - Failed
+          * `cancelled` - Cancelled
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - -inserted_at
+            - completed_at
+            - -completed_at
+            - name
+            - -name
+            - state
+            - -state
+        explode: false
+      tags:
+      - Task
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedTaskList'
+          description: ''
+  /api/v1/tasks/{id}:
+    get:
+      operationId: tasks_retrieve
+      description: Fetch detailed information about a specific task by its ID.
+      summary: Retrieve data from a specific task
+      parameters:
+      - in: query
+        name: fields[tasks]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - completed_at
+            - name
+            - state
+            - result
+            - task_args
+            - metadata
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this task.
+        required: true
+      tags:
+      - Task
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/TaskResponse'
+          description: ''
+    delete:
+      operationId: tasks_destroy
+      description: Try to revoke a task using its ID. Only tasks that are not yet
+        in progress can be revoked.
+      summary: Revoke a task
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this task.
+        required: true
+      tags:
+      - Task
+      security:
+      - jwtAuth: []
+      responses:
+        '202':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/OpenApiResponseResponse'
+          description: ''
+  /api/v1/tenants:
+    get:
+      operationId: tenants_list
+      description: Retrieve a list of all tenants with options for filtering by various
+        criteria.
+      summary: List all tenants
+      parameters:
+      - in: query
+        name: fields[tenants]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - memberships
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[inserted_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__date]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[inserted_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[name]
+        schema:
+          type: string
+      - in: query
+        name: filter[name__icontains]
+        schema:
+          type: string
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - in: query
+        name: filter[updated_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[updated_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[updated_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - -name
+            - inserted_at
+            - -inserted_at
+            - updated_at
+            - -updated_at
+        explode: false
+      tags:
+      - Tenant
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedTenantList'
+          description: ''
+    post:
+      operationId: tenants_create
+      description: Add a new tenant to the system by providing the required tenant
+        details.
+      summary: Create a new tenant
+      tags:
+      - Tenant
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/TenantRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/TenantRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/TenantRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '201':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/TenantResponse'
+          description: ''
+  /api/v1/tenants/{id}:
+    get:
+      operationId: tenants_retrieve
+      description: Fetch detailed information about a specific tenant by their ID.
+      summary: Retrieve data from a tenant
+      parameters:
+      - in: query
+        name: fields[tenants]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - memberships
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this tenant.
+        required: true
+      tags:
+      - Tenant
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/TenantResponse'
+          description: ''
+    patch:
+      operationId: tenants_partial_update
+      description: Update certain fields of an existing tenant's information without
+        affecting other fields.
+      summary: Partially update a tenant
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this tenant.
+        required: true
+      tags:
+      - Tenant
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/PatchedTenantRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/PatchedTenantRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/PatchedTenantRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/TenantResponse'
+          description: ''
+    delete:
+      operationId: tenants_destroy
+      description: Remove a tenant from the system by their ID.
+      summary: Delete a tenant
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this tenant.
+        required: true
+      tags:
+      - Tenant
+      security:
+      - jwtAuth: []
+      responses:
+        '204':
+          description: No response body
+  /api/v1/tenants/{tenant_pk}/memberships:
+    get:
+      operationId: tenants_memberships_list
+      description: List the membership details of users in a tenant you are a part
+        of.
+      summary: List tenant memberships
+      parameters:
+      - in: query
+        name: fields[memberships]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - user
+            - tenant
+            - role
+            - date_joined
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - id
+            - -id
+            - user
+            - -user
+            - tenant
+            - -tenant
+            - role
+            - -role
+            - date_joined
+            - -date_joined
+        explode: false
+      - in: path
+        name: tenant_pk
+        schema:
+          type: string
+          format: uuid
+        description: Tenant ID
+        required: true
+      tags:
+      - Tenant
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedMembershipList'
+          description: ''
+  /api/v1/tenants/{tenant_pk}/memberships/{id}:
+    delete:
+      operationId: tenants_memberships_destroy
+      description: Delete the membership details of users in a tenant. You need to
+        be one of the owners to delete a membership that is not yours. If you are
+        the last owner of a tenant, you cannot delete your own membership.
+      summary: Delete tenant memberships
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this membership.
+        required: true
+      - in: path
+        name: tenant_pk
+        schema:
+          type: string
+          format: uuid
+        required: true
+      tags:
+      - Tenant
+      security:
+      - jwtAuth: []
+      responses:
+        '204':
+          description: No response body
+  /api/v1/tenants/invitations:
+    get:
+      operationId: tenants_invitations_list
+      description: Retrieve a list of all tenant invitations with options for filtering
+        by various criteria.
+      summary: List all invitations
+      parameters:
+      - in: query
+        name: fields[invitations]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - updated_at
+            - email
+            - state
+            - token
+            - expires_at
+            - inviter
+            - url
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[email]
+        schema:
+          type: string
+      - in: query
+        name: filter[email__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[expires_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[expires_at__date]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[expires_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[expires_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[inserted_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__date]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[inserted_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[inserted_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[inviter]
+        schema:
+          type: string
+          format: uuid
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - in: query
+        name: filter[state]
+        schema:
+          type: string
+          enum:
+          - accepted
+          - expired
+          - pending
+          - revoked
+        description: |-
+          * `pending` - Invitation is pending
+          * `accepted` - Invitation was accepted by a user
+          * `expired` - Invitation expired after the configured time
+          * `revoked` - Invitation was revoked by a user
+      - in: query
+        name: filter[state__in]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - accepted
+            - expired
+            - pending
+            - revoked
+        description: |-
+          Multiple values may be separated by commas.
+
+          * `pending` - Invitation is pending
+          * `accepted` - Invitation was accepted by a user
+          * `expired` - Invitation expired after the configured time
+          * `revoked` - Invitation was revoked by a user
+        explode: false
+        style: form
+      - in: query
+        name: filter[updated_at]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[updated_at__date]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[updated_at__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[updated_at__lte]
+        schema:
+          type: string
+          format: date-time
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - -inserted_at
+            - updated_at
+            - -updated_at
+            - expires_at
+            - -expires_at
+            - state
+            - -state
+            - inviter
+            - -inviter
+        explode: false
+      tags:
+      - Invitation
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedInvitationList'
+          description: ''
+    post:
+      operationId: tenants_invitations_create
+      description: Add a new tenant invitation to the system by providing the required
+        invitation details. The invited user will have to accept the invitations or
+        create an account using the given code.
+      summary: Invite a user to a tenant
+      tags:
+      - Invitation
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/InvitationCreateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/InvitationCreateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/InvitationCreateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '201':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/InvitationCreateResponse'
+          description: ''
+  /api/v1/tenants/invitations/{id}:
+    get:
+      operationId: tenants_invitations_retrieve
+      description: Fetch detailed information about a specific invitation by its ID.
+      summary: Retrieve data from a tenant invitation
+      parameters:
+      - in: query
+        name: fields[invitations]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - inserted_at
+            - updated_at
+            - email
+            - state
+            - token
+            - expires_at
+            - inviter
+            - url
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        required: true
+      tags:
+      - Invitation
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/InvitationResponse'
+          description: ''
+    patch:
+      operationId: tenants_invitations_partial_update
+      description: Update certain fields of an existing tenant invitation's information
+        without affecting other fields.
+      summary: Partially update a tenant invitation
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        required: true
+      tags:
+      - Invitation
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/PatchedInvitationUpdateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/PatchedInvitationUpdateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/PatchedInvitationUpdateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/InvitationUpdateResponse'
+          description: ''
+    delete:
+      operationId: tenants_invitations_destroy
+      description: Revoke a tenant invitation from the system by their ID.
+      summary: Revoke a tenant invitation
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        required: true
+      tags:
+      - Invitation
+      security:
+      - jwtAuth: []
+      responses:
+        '204':
+          description: No response body
+  /api/v1/tokens:
+    post:
+      operationId: tokens_create
+      description: Obtain a token by providing valid credentials and an optional tenant
+        ID.
+      summary: Obtain a token
+      tags:
+      - Token
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/TokenRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/TokenRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/TokenRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      - {}
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/TokenResponse'
+          description: ''
+  /api/v1/tokens/refresh:
+    post:
+      operationId: tokens_refresh_create
+      description: Refresh an access token by providing a valid refresh token. Former
+        refresh tokens are invalidated when a new one is issued.
+      summary: Refresh a token
+      tags:
+      - Token
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/TokenRefreshRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/TokenRefreshRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/TokenRefreshRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      - {}
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/TokenRefreshResponse'
+          description: ''
+  /api/v1/users:
+    get:
+      operationId: users_list
+      description: Retrieve a list of all users with options for filtering by various
+        criteria.
+      summary: List all users
+      parameters:
+      - in: query
+        name: fields[users]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - email
+            - company_name
+            - date_joined
+            - memberships
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[company_name]
+        schema:
+          type: string
+      - in: query
+        name: filter[company_name__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[date_joined]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[date_joined__date]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[date_joined__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[date_joined__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[email]
+        schema:
+          type: string
+      - in: query
+        name: filter[email__icontains]
+        schema:
+          type: string
+      - in: query
+        name: filter[is_active]
+        schema:
+          type: boolean
+      - in: query
+        name: filter[name]
+        schema:
+          type: string
+      - in: query
+        name: filter[name__icontains]
+        schema:
+          type: string
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - -name
+            - email
+            - -email
+            - company_name
+            - -company_name
+            - date_joined
+            - -date_joined
+            - is_active
+            - -is_active
+        explode: false
+      tags:
+      - User
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedUserList'
+          description: ''
+    post:
+      operationId: users_create
+      description: Create a new user account by providing the necessary registration
+        details.
+      summary: Register a new user
+      parameters:
+      - in: query
+        name: invitation_token
+        schema:
+          type: string
+          example: F3NMFPNDZHR4Z9
+        description: Optional invitation code for joining an existing tenant.
+      tags:
+      - User
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/UserCreateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/UserCreateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/UserCreateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      - {}
+      responses:
+        '201':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/UserCreateResponse'
+          description: ''
+  /api/v1/users/{id}:
+    get:
+      operationId: users_retrieve
+      description: Fetch detailed information about an authenticated user.
+      summary: Retrieve a user's information
+      parameters:
+      - in: query
+        name: fields[users]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - email
+            - company_name
+            - date_joined
+            - memberships
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this user.
+        required: true
+      tags:
+      - User
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/UserResponse'
+          description: ''
+    patch:
+      operationId: users_partial_update
+      description: Partially update information about a user.
+      summary: Update user information
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this user.
+        required: true
+      tags:
+      - User
+      requestBody:
+        content:
+          application/vnd.api+json:
+            schema:
+              $ref: '#/components/schemas/PatchedUserUpdateRequest'
+          application/x-www-form-urlencoded:
+            schema:
+              $ref: '#/components/schemas/PatchedUserUpdateRequest'
+          multipart/form-data:
+            schema:
+              $ref: '#/components/schemas/PatchedUserUpdateRequest'
+        required: true
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/UserUpdateResponse'
+          description: ''
+    delete:
+      operationId: users_destroy
+      description: Remove a user account from the system.
+      summary: Delete a user account
+      parameters:
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this user.
+        required: true
+      tags:
+      - User
+      security:
+      - jwtAuth: []
+      responses:
+        '204':
+          description: No response body
+  /api/v1/users/{user_pk}/memberships:
+    get:
+      operationId: users_memberships_list
+      description: Retrieve a list of all user memberships with options for filtering
+        by various criteria.
+      summary: List user memberships
+      parameters:
+      - in: query
+        name: fields[memberships]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - user
+            - tenant
+            - role
+            - date_joined
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: query
+        name: filter[date_joined]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[date_joined__date]
+        schema:
+          type: string
+          format: date
+      - in: query
+        name: filter[date_joined__gte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[date_joined__lte]
+        schema:
+          type: string
+          format: date-time
+      - in: query
+        name: filter[role]
+        schema:
+          type: string
+          enum:
+          - member
+          - owner
+        description: |-
+          * `owner` - Owner
+          * `member` - Member
+      - name: filter[search]
+        required: false
+        in: query
+        description: A search term.
+        schema:
+          type: string
+      - in: query
+        name: filter[tenant]
+        schema:
+          type: string
+          format: uuid
+      - name: page[number]
+        required: false
+        in: query
+        description: A page number within the paginated result set.
+        schema:
+          type: integer
+      - name: page[size]
+        required: false
+        in: query
+        description: Number of results to return per page.
+        schema:
+          type: integer
+      - name: sort
+        required: false
+        in: query
+        description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - tenant
+            - -tenant
+            - role
+            - -role
+            - date_joined
+            - -date_joined
+        explode: false
+      - in: path
+        name: user_pk
+        schema:
+          type: string
+          format: uuid
+        required: true
+      tags:
+      - User
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/PaginatedMembershipList'
+          description: ''
+  /api/v1/users/{user_pk}/memberships/{id}:
+    get:
+      operationId: users_memberships_retrieve
+      description: Fetch detailed information about a specific user membership by
+        their ID.
+      summary: Retrieve membership data from the user
+      parameters:
+      - in: query
+        name: fields[memberships]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - user
+            - tenant
+            - role
+            - date_joined
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      - in: path
+        name: id
+        schema:
+          type: string
+          format: uuid
+        description: A UUID string identifying this membership.
+        required: true
+      - in: path
+        name: user_pk
+        schema:
+          type: string
+          format: uuid
+        required: true
+      tags:
+      - User
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/MembershipResponse'
+          description: ''
+  /api/v1/users/me:
+    get:
+      operationId: users_me_retrieve
+      description: Fetch detailed information about the authenticated user.
+      summary: Retrieve the current user's information
+      parameters:
+      - in: query
+        name: fields[users]
+        schema:
+          type: array
+          items:
+            type: string
+            enum:
+            - name
+            - email
+            - company_name
+            - date_joined
+            - memberships
+        description: endpoint return only specific fields in the response on a per-type
+          basis by including a fields[TYPE] query parameter.
+        explode: false
+      tags:
+      - User
+      security:
+      - jwtAuth: []
+      responses:
+        '200':
+          content:
+            application/vnd.api+json:
+              schema:
+                $ref: '#/components/schemas/UserResponse'
+          description: ''
+components:
+  schemas:
+    ComplianceOverview:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/Type7f7Enum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            inserted_at:
+              type: string
+              format: date-time
+              readOnly: true
+            compliance_id:
+              type: string
+              maxLength: 100
+            framework:
+              type: string
+              maxLength: 100
+            version:
+              type: string
+              maxLength: 50
+            requirements_status:
+              type: object
+              properties:
+                passed:
+                  type: integer
+                failed:
+                  type: integer
+                manual:
+                  type: integer
+                total:
+                  type: integer
+              readOnly: true
+            region:
+              type: string
+              maxLength: 50
+            provider_type:
+              type: string
+              nullable: true
+              readOnly: true
+          required:
+          - compliance_id
+          - framework
+        relationships:
+          type: object
+          properties:
+            scan:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - scans
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+              nullable: true
+    ComplianceOverviewFull:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/Type7f7Enum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            inserted_at:
+              type: string
+              format: date-time
+              readOnly: true
+            compliance_id:
+              type: string
+              maxLength: 100
+            framework:
+              type: string
+              maxLength: 100
+            version:
+              type: string
+              maxLength: 50
+            requirements_status:
+              type: object
+              properties:
+                passed:
+                  type: integer
+                failed:
+                  type: integer
+                manual:
+                  type: integer
+                total:
+                  type: integer
+              readOnly: true
+            region:
+              type: string
+              maxLength: 50
+            provider_type:
+              type: string
+              nullable: true
+              readOnly: true
+            description:
+              type: string
+            requirements:
+              type: object
+              properties:
+                requirement_id:
+                  type: object
+                  properties:
+                    name:
+                      type: string
+                    checks:
+                      type: object
+                      properties:
+                        check_name:
+                          type: object
+                          properties:
+                            status:
+                              type: string
+                              enum:
+                              - PASS
+                              - FAIL
+                              - null
+                      description: Each key in the 'checks' object is a check name,
+                        with values as 'PASS', 'FAIL', or null.
+                    status:
+                      type: string
+                      enum:
+                      - PASS
+                      - FAIL
+                      - MANUAL
+                    attributes:
+                      type: array
+                      items:
+                        type: object
+                    description:
+                      type: string
+                    checks_status:
+                      type: object
+                      properties:
+                        total:
+                          type: integer
+                        pass:
+                          type: integer
+                        fail:
+                          type: integer
+                        manual:
+                          type: integer
+              readOnly: true
+          required:
+          - compliance_id
+          - framework
+        relationships:
+          type: object
+          properties:
+            scan:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - scans
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+              nullable: true
+    ComplianceOverviewFullResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/ComplianceOverviewFull'
+      required:
+      - data
+    Finding:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/FindingTypeEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            uid:
+              type: string
+              maxLength: 300
+            delta:
+              enum:
+              - new
+              - changed
+              - null
+              type: string
+              description: |-
+                * `new` - New
+                * `changed` - Changed
+              nullable: true
+            status:
+              enum:
+              - FAIL
+              - PASS
+              - MANUAL
+              - MUTED
+              type: string
+              description: |-
+                * `FAIL` - Fail
+                * `PASS` - Pass
+                * `MANUAL` - Manual
+                * `MUTED` - Muted
+            status_extended:
+              type: string
+              nullable: true
+            severity:
+              enum:
+              - critical
+              - high
+              - medium
+              - low
+              - informational
+              type: string
+              description: |-
+                * `critical` - Critical
+                * `high` - High
+                * `medium` - Medium
+                * `low` - Low
+                * `informational` - Informational
+            check_id:
+              type: string
+              maxLength: 100
+            check_metadata: {}
+            raw_result: {}
+            inserted_at:
+              type: string
+              format: date-time
+              readOnly: true
+            updated_at:
+              type: string
+              format: date-time
+              readOnly: true
+          required:
+          - uid
+          - status
+          - severity
+          - check_id
+        relationships:
+          type: object
+          properties:
+            scan:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - scans
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+            resources:
+              type: object
+              properties:
+                data:
+                  type: array
+                  items:
+                    type: object
+                    properties:
+                      id:
+                        type: string
+                        format: uuid
+                        title: Resource Identifier
+                        description: The identifier of the related object.
+                      type:
+                        type: string
+                        enum:
+                        - resources
+                        title: Resource Type Name
+                        description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                          member is used to describe resource objects that share common
+                          attributes and relationships.
+                    required:
+                    - id
+                    - type
+              required:
+              - data
+              description: A related resource object from type resources
+              title: resources
+              readOnly: true
+          required:
+          - scan
+    FindingResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/Finding'
+      required:
+      - data
+    FindingTypeEnum:
+      type: string
+      enum:
+      - findings
+    Invitation:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/TypeD4dEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            inserted_at:
+              type: string
+              format: date-time
+              readOnly: true
+            updated_at:
+              type: string
+              format: date-time
+              readOnly: true
+            email:
+              type: string
+              format: email
+              maxLength: 254
+            state:
+              enum:
+              - pending
+              - accepted
+              - expired
+              - revoked
+              type: string
+              description: |-
+                * `pending` - Invitation is pending
+                * `accepted` - Invitation was accepted by a user
+                * `expired` - Invitation expired after the configured time
+                * `revoked` - Invitation was revoked by a user
+            token:
+              type: string
+              readOnly: true
+            expires_at:
+              type: string
+              format: date-time
+          required:
+          - email
+        relationships:
+          type: object
+          properties:
+            inviter:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - users
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+              nullable: true
+    InvitationAcceptRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - invitations
+            attributes:
+              type: object
+              properties:
+                invitation_token:
+                  type: string
+                  writeOnly: true
+                  minLength: 1
+              required:
+              - invitation_token
+      required:
+      - data
+    InvitationCreate:
+      type: object
+      required:
+      - type
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/TypeD4dEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        attributes:
+          type: object
+          properties:
+            email:
+              type: string
+              format: email
+              maxLength: 254
+            expires_at:
+              type: string
+              format: date-time
+              description: UTC. Default 7 days. If this attribute is provided, it
+                must be at least 24 hours in the future.
+            state:
+              enum:
+              - pending
+              - accepted
+              - expired
+              - revoked
+              type: string
+              description: |-
+                * `pending` - Invitation is pending
+                * `accepted` - Invitation was accepted by a user
+                * `expired` - Invitation expired after the configured time
+                * `revoked` - Invitation was revoked by a user
+              readOnly: true
+            token:
+              type: string
+              readOnly: true
+          required:
+          - email
+        relationships:
+          type: object
+          properties:
+            inviter:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - users
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+              readOnly: true
+              nullable: true
+    InvitationCreateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - invitations
+            attributes:
+              type: object
+              properties:
+                email:
+                  type: string
+                  format: email
+                  minLength: 1
+                  maxLength: 254
+                expires_at:
+                  type: string
+                  format: date-time
+                  description: UTC. Default 7 days. If this attribute is provided,
+                    it must be at least 24 hours in the future.
+                state:
+                  enum:
+                  - pending
+                  - accepted
+                  - expired
+                  - revoked
+                  type: string
+                  description: |-
+                    * `pending` - Invitation is pending
+                    * `accepted` - Invitation was accepted by a user
+                    * `expired` - Invitation expired after the configured time
+                    * `revoked` - Invitation was revoked by a user
+                  readOnly: true
+                token:
+                  type: string
+                  readOnly: true
+                  minLength: 1
+              required:
+              - email
+            relationships:
+              type: object
+              properties:
+                inviter:
+                  type: object
+                  properties:
+                    data:
+                      type: object
+                      properties:
+                        id:
+                          type: string
+                          format: uuid
+                        type:
+                          type: string
+                          enum:
+                          - users
+                          title: Resource Type Name
+                          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                            member is used to describe resource objects that share
+                            common attributes and relationships.
+                      required:
+                      - id
+                      - type
+                  required:
+                  - data
+                  description: The identifier of the related object.
+                  title: Resource Identifier
+                  readOnly: true
+                  nullable: true
+      required:
+      - data
+    InvitationCreateResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/InvitationCreate'
+      required:
+      - data
+    InvitationResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/Invitation'
+      required:
+      - data
+    InvitationUpdate:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/TypeD4dEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            email:
+              type: string
+              format: email
+              maxLength: 254
+            expires_at:
+              type: string
+              format: date-time
+            state:
+              enum:
+              - pending
+              - accepted
+              - expired
+              - revoked
+              type: string
+              description: |-
+                * `pending` - Invitation is pending
+                * `accepted` - Invitation was accepted by a user
+                * `expired` - Invitation expired after the configured time
+                * `revoked` - Invitation was revoked by a user
+              readOnly: true
+            token:
+              type: string
+              readOnly: true
+    InvitationUpdateResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/InvitationUpdate'
+      required:
+      - data
+    Membership:
+      type: object
+      required:
+      - type
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/MembershipTypeEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        attributes:
+          type: object
+          properties:
+            role:
+              enum:
+              - owner
+              - member
+              type: string
+              description: |-
+                * `owner` - Owner
+                * `member` - Member
+            date_joined:
+              type: string
+              format: date-time
+              readOnly: true
+          required:
+          - role
+        relationships:
+          type: object
+          properties:
+            user:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uri
+                    type:
+                      type: string
+                      enum:
+                      - users
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+              readOnly: true
+            tenant:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uri
+                    type:
+                      type: string
+                      enum:
+                      - tenants
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+              readOnly: true
+    MembershipResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/Membership'
+      required:
+      - data
+    MembershipTypeEnum:
+      type: string
+      enum:
+      - memberships
+    OpenApiResponseResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/Membership'
+      required:
+      - data
+    OverviewProvider:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/OverviewProviderTypeEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id: {}
+        attributes:
+          type: object
+          properties:
+            id:
+              type: string
+            findings:
+              type: object
+              properties:
+                pass:
+                  type: integer
+                fail:
+                  type: integer
+                manual:
+                  type: integer
+                total:
+                  type: integer
+              readOnly: true
+            resources:
+              type: object
+              properties:
+                total:
+                  type: integer
+              readOnly: true
+          required:
+          - id
+    OverviewProviderResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/OverviewProvider'
+      required:
+      - data
+    OverviewProviderTypeEnum:
+      type: string
+      enum:
+      - provider-overviews
+    PaginatedComplianceOverviewList:
+      type: object
+      required:
+      - count
+      - results
+      properties:
+        count:
+          type: integer
+          example: 123
+        next:
+          type: string
+          nullable: true
+          format: uri
+          example: http://api.example.org/accounts/?page[number]=4
+        previous:
+          type: string
+          nullable: true
+          format: uri
+          example: http://api.example.org/accounts/?page[number]=2
+        results:
+          type: array
+          items:
+            $ref: '#/components/schemas/ComplianceOverview'
+    PaginatedFindingList:
+      type: object
+      properties:
+        data:
+          type: array
+          items:
+            $ref: '#/components/schemas/Finding'
+      required:
+      - data
+    PaginatedInvitationList:
+      type: object
+      properties:
+        data:
+          type: array
+          items:
+            $ref: '#/components/schemas/Invitation'
+      required:
+      - data
+    PaginatedMembershipList:
+      type: object
+      properties:
+        data:
+          type: array
+          items:
+            $ref: '#/components/schemas/Membership'
+      required:
+      - data
+    PaginatedProviderGroupList:
+      type: object
+      properties:
+        data:
+          type: array
+          items:
+            $ref: '#/components/schemas/ProviderGroup'
+      required:
+      - data
+    PaginatedProviderList:
+      type: object
+      properties:
+        data:
+          type: array
+          items:
+            $ref: '#/components/schemas/Provider'
+      required:
+      - data
+    PaginatedProviderSecretList:
+      type: object
+      properties:
+        data:
+          type: array
+          items:
+            $ref: '#/components/schemas/ProviderSecret'
+      required:
+      - data
+    PaginatedResourceList:
+      type: object
+      properties:
+        data:
+          type: array
+          items:
+            $ref: '#/components/schemas/Resource'
+      required:
+      - data
+    PaginatedScanList:
+      type: object
+      properties:
+        data:
+          type: array
+          items:
+            $ref: '#/components/schemas/Scan'
+      required:
+      - data
+    PaginatedTaskList:
+      type: object
+      properties:
+        data:
+          type: array
+          items:
+            $ref: '#/components/schemas/Task'
+      required:
+      - data
+    PaginatedTenantList:
+      type: object
+      properties:
+        data:
+          type: array
+          items:
+            $ref: '#/components/schemas/Tenant'
+      required:
+      - data
+    PaginatedUserList:
+      type: object
+      properties:
+        data:
+          type: array
+          items:
+            $ref: '#/components/schemas/User'
+      required:
+      - data
+    PatchedInvitationUpdateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          - id
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - invitations
+            id:
+              type: string
+              format: uuid
+            attributes:
+              type: object
+              properties:
+                email:
+                  type: string
+                  format: email
+                  minLength: 1
+                  maxLength: 254
+                expires_at:
+                  type: string
+                  format: date-time
+                state:
+                  enum:
+                  - pending
+                  - accepted
+                  - expired
+                  - revoked
+                  type: string
+                  description: |-
+                    * `pending` - Invitation is pending
+                    * `accepted` - Invitation was accepted by a user
+                    * `expired` - Invitation expired after the configured time
+                    * `revoked` - Invitation was revoked by a user
+                  readOnly: true
+                token:
+                  type: string
+                  readOnly: true
+                  minLength: 1
+      required:
+      - data
+    PatchedProviderGroupUpdateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          - id
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - provider-groups
+            id:
+              type: string
+              format: uuid
+            attributes:
+              type: object
+              properties:
+                name:
+                  type: string
+                  minLength: 1
+                  maxLength: 255
+              required:
+              - name
+      required:
+      - data
+    PatchedProviderSecretUpdateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          - id
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - provider-secrets
+            id:
+              type: string
+              format: uuid
+            attributes:
+              type: object
+              properties:
+                inserted_at:
+                  type: string
+                  format: date-time
+                  readOnly: true
+                updated_at:
+                  type: string
+                  format: date-time
+                  readOnly: true
+                name:
+                  type: string
+                  nullable: true
+                  maxLength: 100
+                  minLength: 3
+                secret_type:
+                  enum:
+                  - static
+                  - role
+                  type: string
+                  description: |-
+                    * `static` - Key-value pairs
+                    * `role` - Role assumption
+                  readOnly: true
+                secret:
+                  oneOf:
+                  - type: object
+                    title: AWS Static Credentials
+                    properties:
+                      aws_access_key_id:
+                        type: string
+                        description: The AWS access key ID. Required for environments
+                          where no IAM role is being assumed and direct AWS access
+                          is needed.
+                      aws_secret_access_key:
+                        type: string
+                        description: The AWS secret access key. Must accompany 'aws_access_key_id'
+                          to authorize access to AWS resources.
+                      aws_session_token:
+                        type: string
+                        description: The session token associated with temporary credentials.
+                          Only needed for session-based or temporary AWS access.
+                    required:
+                    - aws_access_key_id
+                    - aws_secret_access_key
+                  - type: object
+                    title: AWS Assume Role
+                    properties:
+                      role_arn:
+                        type: string
+                        description: The Amazon Resource Name (ARN) of the role to
+                          assume. Required for AWS role assumption.
+                      aws_access_key_id:
+                        type: string
+                        description: The AWS access key ID. Only required if the environment
+                          lacks pre-configured AWS credentials.
+                      aws_secret_access_key:
+                        type: string
+                        description: The AWS secret access key. Required if 'aws_access_key_id'
+                          is provided or if no AWS credentials are pre-configured.
+                      aws_session_token:
+                        type: string
+                        description: The session token for temporary credentials,
+                          if applicable.
+                      session_duration:
+                        type: integer
+                        minimum: 900
+                        maximum: 43200
+                        default: 3600
+                        description: The duration (in seconds) for the role session.
+                      external_id:
+                        type: string
+                        description: An optional identifier to enhance security for
+                          role assumption; may be required by the role administrator.
+                      role_session_name:
+                        type: string
+                        description: |-
+                          An identifier for the role session, useful for tracking sessions in AWS logs. The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@-
+
+                          Examples:
+                          - MySession123
+                          - User_Session-1
+                          - Test.Session@2
+                        pattern: ^[a-zA-Z0-9=,.@_-]+$
+                    required:
+                    - role_arn
+                  - type: object
+                    title: Azure Static Credentials
+                    properties:
+                      client_id:
+                        type: string
+                        description: The Azure application (client) ID for authentication
+                          in Azure AD.
+                      client_secret:
+                        type: string
+                        description: The client secret associated with the application
+                          (client) ID, providing secure access.
+                      tenant_id:
+                        type: string
+                        description: The Azure tenant ID, representing the directory
+                          where the application is registered.
+                    required:
+                    - client_id
+                    - client_secret
+                    - tenant_id
+                  - type: object
+                    title: GCP Static Credentials
+                    properties:
+                      client_id:
+                        type: string
+                        description: The client ID from Google Cloud, used to identify
+                          the application for GCP access.
+                      client_secret:
+                        type: string
+                        description: The client secret associated with the GCP client
+                          ID, required for secure access.
+                      refresh_token:
+                        type: string
+                        description: A refresh token that allows the application to
+                          obtain new access tokens for extended use.
+                    required:
+                    - client_id
+                    - client_secret
+                    - refresh_token
+                  - type: object
+                    title: Kubernetes Static Credentials
+                    properties:
+                      kubeconfig_content:
+                        type: string
+                        description: The content of the Kubernetes kubeconfig file,
+                          encoded as a string.
+                    required:
+                    - kubeconfig_content
+                  writeOnly: true
+              required:
+              - secret
+            relationships:
+              type: object
+              properties:
+                provider:
+                  type: object
+                  properties:
+                    data:
+                      type: object
+                      properties:
+                        id:
+                          type: string
+                          format: uuid
+                        type:
+                          type: string
+                          enum:
+                          - providers
+                          title: Resource Type Name
+                          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                            member is used to describe resource objects that share
+                            common attributes and relationships.
+                      required:
+                      - id
+                      - type
+                  required:
+                  - data
+                  description: The identifier of the related object.
+                  title: Resource Identifier
+                  readOnly: true
+      required:
+      - data
+    PatchedProviderUpdateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          - id
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - providers
+            id: {}
+            attributes:
+              type: object
+              properties:
+                alias:
+                  type: string
+                  nullable: true
+                  maxLength: 100
+                  minLength: 3
+      required:
+      - data
+    PatchedScanUpdateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          - id
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - scans
+            id:
+              type: string
+              format: uuid
+            attributes:
+              type: object
+              properties:
+                name:
+                  type: string
+                  nullable: true
+                  maxLength: 100
+                  minLength: 3
+      required:
+      - data
+    PatchedTenantRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          - id
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - tenants
+            id:
+              type: string
+              format: uuid
+            attributes:
+              type: object
+              properties:
+                name:
+                  type: string
+                  minLength: 1
+                  maxLength: 100
+              required:
+              - name
+            relationships:
+              type: object
+              properties:
+                memberships:
+                  type: object
+                  properties:
+                    data:
+                      type: array
+                      items:
+                        type: object
+                        properties:
+                          id:
+                            type: string
+                            format: uuid
+                            title: Resource Identifier
+                            description: The identifier of the related object.
+                          type:
+                            type: string
+                            enum:
+                            - memberships
+                            title: Resource Type Name
+                            description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                              member is used to describe resource objects that share
+                              common attributes and relationships.
+                        required:
+                        - id
+                        - type
+                  required:
+                  - data
+                  description: A related resource object from type memberships
+                  title: memberships
+                  readOnly: true
+      required:
+      - data
+    PatchedUserUpdateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          - id
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - users
+            id:
+              type: string
+              format: uuid
+            attributes:
+              type: object
+              properties:
+                name:
+                  type: string
+                  minLength: 3
+                  maxLength: 150
+                password:
+                  type: string
+                  writeOnly: true
+                  minLength: 1
+                email:
+                  type: string
+                  format: email
+                  minLength: 1
+                  description: Case insensitive
+                  maxLength: 254
+                company_name:
+                  type: string
+                  maxLength: 150
+              required:
+              - name
+              - email
+      required:
+      - data
+    Provider:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/Type227Enum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            inserted_at:
+              type: string
+              format: date-time
+              readOnly: true
+            updated_at:
+              type: string
+              format: date-time
+              readOnly: true
+            provider:
+              enum:
+              - aws
+              - azure
+              - gcp
+              - kubernetes
+              type: string
+              description: |-
+                * `aws` - AWS
+                * `azure` - Azure
+                * `gcp` - GCP
+                * `kubernetes` - Kubernetes
+            uid:
+              type: string
+              title: Unique identifier for the provider, set by the provider
+              maxLength: 63
+              minLength: 3
+            alias:
+              type: string
+              nullable: true
+              maxLength: 100
+              minLength: 3
+            connection:
+              type: object
+              properties:
+                connected:
+                  type: boolean
+                last_checked_at:
+                  type: string
+                  format: date-time
+              readOnly: true
+          required:
+          - provider
+          - uid
+        relationships:
+          type: object
+          properties:
+            secret:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - provider-secrets
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+          required:
+          - secret
+    ProviderCreate:
+      type: object
+      required:
+      - type
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/Type227Enum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        attributes:
+          type: object
+          properties:
+            alias:
+              type: string
+              nullable: true
+              maxLength: 100
+              minLength: 3
+            provider:
+              enum:
+              - aws
+              - azure
+              - gcp
+              - kubernetes
+              type: string
+              description: |-
+                * `aws` - AWS
+                * `azure` - Azure
+                * `gcp` - GCP
+                * `kubernetes` - Kubernetes
+            uid:
+              type: string
+              title: Unique identifier for the provider, set by the provider
+              maxLength: 63
+              minLength: 3
+          required:
+          - uid
+    ProviderCreateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - providers
+            attributes:
+              type: object
+              properties:
+                alias:
+                  type: string
+                  nullable: true
+                  maxLength: 100
+                  minLength: 3
+                provider:
+                  enum:
+                  - aws
+                  - azure
+                  - gcp
+                  - kubernetes
+                  type: string
+                  description: |-
+                    * `aws` - AWS
+                    * `azure` - Azure
+                    * `gcp` - GCP
+                    * `kubernetes` - Kubernetes
+                uid:
+                  type: string
+                  minLength: 3
+                  title: Unique identifier for the provider, set by the provider
+                  maxLength: 63
+              required:
+              - uid
+      required:
+      - data
+    ProviderCreateResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/ProviderCreate'
+      required:
+      - data
+    ProviderGroup:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/ProviderGroupTypeEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            name:
+              type: string
+              maxLength: 255
+            inserted_at:
+              type: string
+              format: date-time
+              readOnly: true
+            updated_at:
+              type: string
+              format: date-time
+              readOnly: true
+          required:
+          - name
+        relationships:
+          type: object
+          properties:
+            providers:
+              type: object
+              properties:
+                data:
+                  type: array
+                  items:
+                    type: object
+                    properties:
+                      id:
+                        type: string
+                        format: uuid
+                        title: Resource Identifier
+                        description: The identifier of the related object.
+                      type:
+                        type: string
+                        enum:
+                        - providers
+                        title: Resource Type Name
+                        description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                          member is used to describe resource objects that share common
+                          attributes and relationships.
+                    required:
+                    - id
+                    - type
+              required:
+              - data
+              description: A related resource object from type providers
+              title: providers
+              readOnly: true
+    ProviderGroupMembershipUpdateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - provider-group-memberships
+            attributes:
+              type: object
+              properties:
+                provider_ids:
+                  type: array
+                  items:
+                    type: string
+                    format: uuid
+                  description: List of provider UUIDs to add to the group
+              required:
+              - provider_ids
+      required:
+      - data
+    ProviderGroupRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - provider-groups
+            attributes:
+              type: object
+              properties:
+                name:
+                  type: string
+                  minLength: 1
+                  maxLength: 255
+                inserted_at:
+                  type: string
+                  format: date-time
+                  readOnly: true
+                updated_at:
+                  type: string
+                  format: date-time
+                  readOnly: true
+              required:
+              - name
+            relationships:
+              type: object
+              properties:
+                providers:
+                  type: object
+                  properties:
+                    data:
+                      type: array
+                      items:
+                        type: object
+                        properties:
+                          id:
+                            type: string
+                            format: uuid
+                            title: Resource Identifier
+                            description: The identifier of the related object.
+                          type:
+                            type: string
+                            enum:
+                            - providers
+                            title: Resource Type Name
+                            description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                              member is used to describe resource objects that share
+                              common attributes and relationships.
+                        required:
+                        - id
+                        - type
+                  required:
+                  - data
+                  description: A related resource object from type providers
+                  title: providers
+                  readOnly: true
+      required:
+      - data
+    ProviderGroupResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/ProviderGroup'
+      required:
+      - data
+    ProviderGroupTypeEnum:
+      type: string
+      enum:
+      - provider-groups
+    ProviderResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/Provider'
+      required:
+      - data
+    ProviderSecret:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/Type049Enum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            inserted_at:
+              type: string
+              format: date-time
+              readOnly: true
+            updated_at:
+              type: string
+              format: date-time
+              readOnly: true
+            name:
+              type: string
+              nullable: true
+              maxLength: 100
+              minLength: 3
+            secret_type:
+              enum:
+              - static
+              - role
+              type: string
+              description: |-
+                * `static` - Key-value pairs
+                * `role` - Role assumption
+          required:
+          - secret_type
+        relationships:
+          type: object
+          properties:
+            provider:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - providers
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+          required:
+          - provider
+    ProviderSecretCreate:
+      type: object
+      required:
+      - type
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/Type049Enum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        attributes:
+          type: object
+          properties:
+            inserted_at:
+              type: string
+              format: date-time
+              readOnly: true
+            updated_at:
+              type: string
+              format: date-time
+              readOnly: true
+            name:
+              type: string
+              nullable: true
+              maxLength: 100
+              minLength: 3
+            secret_type:
+              enum:
+              - static
+              - role
+              type: string
+              description: |-
+                * `static` - Key-value pairs
+                * `role` - Role assumption
+            secret:
+              oneOf:
+              - type: object
+                title: AWS Static Credentials
+                properties:
+                  aws_access_key_id:
+                    type: string
+                    description: The AWS access key ID. Required for environments
+                      where no IAM role is being assumed and direct AWS access is
+                      needed.
+                  aws_secret_access_key:
+                    type: string
+                    description: The AWS secret access key. Must accompany 'aws_access_key_id'
+                      to authorize access to AWS resources.
+                  aws_session_token:
+                    type: string
+                    description: The session token associated with temporary credentials.
+                      Only needed for session-based or temporary AWS access.
+                required:
+                - aws_access_key_id
+                - aws_secret_access_key
+              - type: object
+                title: AWS Assume Role
+                properties:
+                  role_arn:
+                    type: string
+                    description: The Amazon Resource Name (ARN) of the role to assume.
+                      Required for AWS role assumption.
+                  aws_access_key_id:
+                    type: string
+                    description: The AWS access key ID. Only required if the environment
+                      lacks pre-configured AWS credentials.
+                  aws_secret_access_key:
+                    type: string
+                    description: The AWS secret access key. Required if 'aws_access_key_id'
+                      is provided or if no AWS credentials are pre-configured.
+                  aws_session_token:
+                    type: string
+                    description: The session token for temporary credentials, if applicable.
+                  session_duration:
+                    type: integer
+                    minimum: 900
+                    maximum: 43200
+                    default: 3600
+                    description: The duration (in seconds) for the role session.
+                  external_id:
+                    type: string
+                    description: An optional identifier to enhance security for role
+                      assumption; may be required by the role administrator.
+                  role_session_name:
+                    type: string
+                    description: |-
+                      An identifier for the role session, useful for tracking sessions in AWS logs. The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@-
+
+                      Examples:
+                      - MySession123
+                      - User_Session-1
+                      - Test.Session@2
+                    pattern: ^[a-zA-Z0-9=,.@_-]+$
+                required:
+                - role_arn
+              - type: object
+                title: Azure Static Credentials
+                properties:
+                  client_id:
+                    type: string
+                    description: The Azure application (client) ID for authentication
+                      in Azure AD.
+                  client_secret:
+                    type: string
+                    description: The client secret associated with the application
+                      (client) ID, providing secure access.
+                  tenant_id:
+                    type: string
+                    description: The Azure tenant ID, representing the directory where
+                      the application is registered.
+                required:
+                - client_id
+                - client_secret
+                - tenant_id
+              - type: object
+                title: GCP Static Credentials
+                properties:
+                  client_id:
+                    type: string
+                    description: The client ID from Google Cloud, used to identify
+                      the application for GCP access.
+                  client_secret:
+                    type: string
+                    description: The client secret associated with the GCP client
+                      ID, required for secure access.
+                  refresh_token:
+                    type: string
+                    description: A refresh token that allows the application to obtain
+                      new access tokens for extended use.
+                required:
+                - client_id
+                - client_secret
+                - refresh_token
+              - type: object
+                title: Kubernetes Static Credentials
+                properties:
+                  kubeconfig_content:
+                    type: string
+                    description: The content of the Kubernetes kubeconfig file, encoded
+                      as a string.
+                required:
+                - kubeconfig_content
+              writeOnly: true
+          required:
+          - secret_type
+          - secret
+        relationships:
+          type: object
+          properties:
+            provider:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - providers
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+          required:
+          - provider
+    ProviderSecretCreateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - provider-secrets
+            attributes:
+              type: object
+              properties:
+                inserted_at:
+                  type: string
+                  format: date-time
+                  readOnly: true
+                updated_at:
+                  type: string
+                  format: date-time
+                  readOnly: true
+                name:
+                  type: string
+                  nullable: true
+                  maxLength: 100
+                  minLength: 3
+                secret_type:
+                  enum:
+                  - static
+                  - role
+                  type: string
+                  description: |-
+                    * `static` - Key-value pairs
+                    * `role` - Role assumption
+                secret:
+                  oneOf:
+                  - type: object
+                    title: AWS Static Credentials
+                    properties:
+                      aws_access_key_id:
+                        type: string
+                        description: The AWS access key ID. Required for environments
+                          where no IAM role is being assumed and direct AWS access
+                          is needed.
+                      aws_secret_access_key:
+                        type: string
+                        description: The AWS secret access key. Must accompany 'aws_access_key_id'
+                          to authorize access to AWS resources.
+                      aws_session_token:
+                        type: string
+                        description: The session token associated with temporary credentials.
+                          Only needed for session-based or temporary AWS access.
+                    required:
+                    - aws_access_key_id
+                    - aws_secret_access_key
+                  - type: object
+                    title: AWS Assume Role
+                    properties:
+                      role_arn:
+                        type: string
+                        description: The Amazon Resource Name (ARN) of the role to
+                          assume. Required for AWS role assumption.
+                      aws_access_key_id:
+                        type: string
+                        description: The AWS access key ID. Only required if the environment
+                          lacks pre-configured AWS credentials.
+                      aws_secret_access_key:
+                        type: string
+                        description: The AWS secret access key. Required if 'aws_access_key_id'
+                          is provided or if no AWS credentials are pre-configured.
+                      aws_session_token:
+                        type: string
+                        description: The session token for temporary credentials,
+                          if applicable.
+                      session_duration:
+                        type: integer
+                        minimum: 900
+                        maximum: 43200
+                        default: 3600
+                        description: The duration (in seconds) for the role session.
+                      external_id:
+                        type: string
+                        description: An optional identifier to enhance security for
+                          role assumption; may be required by the role administrator.
+                      role_session_name:
+                        type: string
+                        description: |-
+                          An identifier for the role session, useful for tracking sessions in AWS logs. The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@-
+
+                          Examples:
+                          - MySession123
+                          - User_Session-1
+                          - Test.Session@2
+                        pattern: ^[a-zA-Z0-9=,.@_-]+$
+                    required:
+                    - role_arn
+                  - type: object
+                    title: Azure Static Credentials
+                    properties:
+                      client_id:
+                        type: string
+                        description: The Azure application (client) ID for authentication
+                          in Azure AD.
+                      client_secret:
+                        type: string
+                        description: The client secret associated with the application
+                          (client) ID, providing secure access.
+                      tenant_id:
+                        type: string
+                        description: The Azure tenant ID, representing the directory
+                          where the application is registered.
+                    required:
+                    - client_id
+                    - client_secret
+                    - tenant_id
+                  - type: object
+                    title: GCP Static Credentials
+                    properties:
+                      client_id:
+                        type: string
+                        description: The client ID from Google Cloud, used to identify
+                          the application for GCP access.
+                      client_secret:
+                        type: string
+                        description: The client secret associated with the GCP client
+                          ID, required for secure access.
+                      refresh_token:
+                        type: string
+                        description: A refresh token that allows the application to
+                          obtain new access tokens for extended use.
+                    required:
+                    - client_id
+                    - client_secret
+                    - refresh_token
+                  - type: object
+                    title: Kubernetes Static Credentials
+                    properties:
+                      kubeconfig_content:
+                        type: string
+                        description: The content of the Kubernetes kubeconfig file,
+                          encoded as a string.
+                    required:
+                    - kubeconfig_content
+                  writeOnly: true
+              required:
+              - secret_type
+              - secret
+            relationships:
+              type: object
+              properties:
+                provider:
+                  type: object
+                  properties:
+                    data:
+                      type: object
+                      properties:
+                        id:
+                          type: string
+                          format: uuid
+                        type:
+                          type: string
+                          enum:
+                          - providers
+                          title: Resource Type Name
+                          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                            member is used to describe resource objects that share
+                            common attributes and relationships.
+                      required:
+                      - id
+                      - type
+                  required:
+                  - data
+                  description: The identifier of the related object.
+                  title: Resource Identifier
+              required:
+              - provider
+      required:
+      - data
+    ProviderSecretCreateResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/ProviderSecretCreate'
+      required:
+      - data
+    ProviderSecretResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/ProviderSecret'
+      required:
+      - data
+    ProviderSecretUpdate:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/Type049Enum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            inserted_at:
+              type: string
+              format: date-time
+              readOnly: true
+            updated_at:
+              type: string
+              format: date-time
+              readOnly: true
+            name:
+              type: string
+              nullable: true
+              maxLength: 100
+              minLength: 3
+            secret_type:
+              enum:
+              - static
+              - role
+              type: string
+              description: |-
+                * `static` - Key-value pairs
+                * `role` - Role assumption
+              readOnly: true
+            secret:
+              oneOf:
+              - type: object
+                title: AWS Static Credentials
+                properties:
+                  aws_access_key_id:
+                    type: string
+                    description: The AWS access key ID. Required for environments
+                      where no IAM role is being assumed and direct AWS access is
+                      needed.
+                  aws_secret_access_key:
+                    type: string
+                    description: The AWS secret access key. Must accompany 'aws_access_key_id'
+                      to authorize access to AWS resources.
+                  aws_session_token:
+                    type: string
+                    description: The session token associated with temporary credentials.
+                      Only needed for session-based or temporary AWS access.
+                required:
+                - aws_access_key_id
+                - aws_secret_access_key
+              - type: object
+                title: AWS Assume Role
+                properties:
+                  role_arn:
+                    type: string
+                    description: The Amazon Resource Name (ARN) of the role to assume.
+                      Required for AWS role assumption.
+                  aws_access_key_id:
+                    type: string
+                    description: The AWS access key ID. Only required if the environment
+                      lacks pre-configured AWS credentials.
+                  aws_secret_access_key:
+                    type: string
+                    description: The AWS secret access key. Required if 'aws_access_key_id'
+                      is provided or if no AWS credentials are pre-configured.
+                  aws_session_token:
+                    type: string
+                    description: The session token for temporary credentials, if applicable.
+                  session_duration:
+                    type: integer
+                    minimum: 900
+                    maximum: 43200
+                    default: 3600
+                    description: The duration (in seconds) for the role session.
+                  external_id:
+                    type: string
+                    description: An optional identifier to enhance security for role
+                      assumption; may be required by the role administrator.
+                  role_session_name:
+                    type: string
+                    description: |-
+                      An identifier for the role session, useful for tracking sessions in AWS logs. The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@-
+
+                      Examples:
+                      - MySession123
+                      - User_Session-1
+                      - Test.Session@2
+                    pattern: ^[a-zA-Z0-9=,.@_-]+$
+                required:
+                - role_arn
+              - type: object
+                title: Azure Static Credentials
+                properties:
+                  client_id:
+                    type: string
+                    description: The Azure application (client) ID for authentication
+                      in Azure AD.
+                  client_secret:
+                    type: string
+                    description: The client secret associated with the application
+                      (client) ID, providing secure access.
+                  tenant_id:
+                    type: string
+                    description: The Azure tenant ID, representing the directory where
+                      the application is registered.
+                required:
+                - client_id
+                - client_secret
+                - tenant_id
+              - type: object
+                title: GCP Static Credentials
+                properties:
+                  client_id:
+                    type: string
+                    description: The client ID from Google Cloud, used to identify
+                      the application for GCP access.
+                  client_secret:
+                    type: string
+                    description: The client secret associated with the GCP client
+                      ID, required for secure access.
+                  refresh_token:
+                    type: string
+                    description: A refresh token that allows the application to obtain
+                      new access tokens for extended use.
+                required:
+                - client_id
+                - client_secret
+                - refresh_token
+              - type: object
+                title: Kubernetes Static Credentials
+                properties:
+                  kubeconfig_content:
+                    type: string
+                    description: The content of the Kubernetes kubeconfig file, encoded
+                      as a string.
+                required:
+                - kubeconfig_content
+              writeOnly: true
+          required:
+          - secret
+        relationships:
+          type: object
+          properties:
+            provider:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - providers
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+              readOnly: true
+    ProviderSecretUpdateResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/ProviderSecretUpdate'
+      required:
+      - data
+    Resource:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/ResourceTypeEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            inserted_at:
+              type: string
+              format: date-time
+              readOnly: true
+            updated_at:
+              type: string
+              format: date-time
+              readOnly: true
+            uid:
+              type: string
+              title: Unique identifier for the resource, set by the provider
+            name:
+              type: string
+              title: Name of the resource, as set in the provider
+            region:
+              type: string
+              title: Location of the resource, as set by the provider
+            service:
+              type: string
+              title: Service of the resource, as set by the provider
+            tags:
+              type: object
+              description: Tags associated with the resource
+              example:
+                env: prod
+                owner: johndoe
+              readOnly: true
+            type:
+              type: string
+              readOnly: true
+          required:
+          - uid
+          - name
+          - region
+          - service
+        relationships:
+          type: object
+          properties:
+            provider:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - providers
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+            findings:
+              type: object
+              properties:
+                data:
+                  type: array
+                  items:
+                    type: object
+                    properties:
+                      id:
+                        type: string
+                        format: uuid
+                        title: Resource Identifier
+                        description: The identifier of the related object.
+                      type:
+                        type: string
+                        enum:
+                        - findings
+                        title: Resource Type Name
+                        description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                          member is used to describe resource objects that share common
+                          attributes and relationships.
+                    required:
+                    - id
+                    - type
+              required:
+              - data
+              description: A related resource object from type findings
+              title: findings
+              readOnly: true
+          required:
+          - provider
+    ResourceResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/Resource'
+      required:
+      - data
+    ResourceTypeEnum:
+      type: string
+      enum:
+      - resources
+    Scan:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/TypeE53Enum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            name:
+              type: string
+              nullable: true
+              maxLength: 100
+              minLength: 3
+            trigger:
+              enum:
+              - scheduled
+              - manual
+              type: string
+              description: |-
+                * `scheduled` - Scheduled
+                * `manual` - Manual
+              readOnly: true
+            state:
+              enum:
+              - available
+              - scheduled
+              - executing
+              - completed
+              - failed
+              - cancelled
+              type: string
+              description: |-
+                * `available` - Available
+                * `scheduled` - Scheduled
+                * `executing` - Executing
+                * `completed` - Completed
+                * `failed` - Failed
+                * `cancelled` - Cancelled
+              readOnly: true
+            unique_resource_count:
+              type: integer
+              maximum: 2147483647
+              minimum: -2147483648
+            progress:
+              type: integer
+              maximum: 2147483647
+              minimum: -2147483648
+            duration:
+              type: integer
+              maximum: 2147483647
+              minimum: -2147483648
+              nullable: true
+            started_at:
+              type: string
+              format: date-time
+              nullable: true
+            completed_at:
+              type: string
+              format: date-time
+              nullable: true
+            scheduled_at:
+              type: string
+              format: date-time
+              nullable: true
+        relationships:
+          type: object
+          properties:
+            provider:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - providers
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+            task:
+              type: object
+              properties:
+                data:
+                  type: object
+                  properties:
+                    id:
+                      type: string
+                      format: uuid
+                    type:
+                      type: string
+                      enum:
+                      - tasks
+                      title: Resource Type Name
+                      description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                        member is used to describe resource objects that share common
+                        attributes and relationships.
+                  required:
+                  - id
+                  - type
+              required:
+              - data
+              description: The identifier of the related object.
+              title: Resource Identifier
+              nullable: true
+          required:
+          - provider
+    ScanCreateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - scans
+            attributes:
+              type: object
+              properties:
+                name:
+                  type: string
+                  nullable: true
+                  maxLength: 100
+                  minLength: 3
+            relationships:
+              type: object
+              properties:
+                provider:
+                  type: object
+                  properties:
+                    data:
+                      type: object
+                      properties:
+                        id:
+                          type: string
+                          format: uuid
+                        type:
+                          type: string
+                          enum:
+                          - providers
+                          title: Resource Type Name
+                          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                            member is used to describe resource objects that share
+                            common attributes and relationships.
+                      required:
+                      - id
+                      - type
+                  required:
+                  - data
+                  description: The identifier of the related object.
+                  title: Resource Identifier
+              required:
+              - provider
+      required:
+      - data
+    ScanResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/Scan'
+      required:
+      - data
+    ScanUpdate:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/TypeE53Enum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            name:
+              type: string
+              nullable: true
+              maxLength: 100
+              minLength: 3
+    ScanUpdateResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/ScanUpdate'
+      required:
+      - data
+    SerializerMetaclassResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/ProviderGroup'
+      required:
+      - data
+    Task:
+      type: object
+      required:
+      - type
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/TaskTypeEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        attributes:
+          type: object
+          properties:
+            inserted_at:
+              type: string
+              format: date-time
+              readOnly: true
+            completed_at:
+              type: string
+              format: date-time
+              readOnly: true
+            name:
+              type: string
+              readOnly: true
+            state:
+              type: string
+              enum:
+              - available
+              - scheduled
+              - executing
+              - completed
+              - failed
+              - cancelled
+              readOnly: true
+            result:
+              readOnly: true
+            task_args:
+              readOnly: true
+            metadata:
+              readOnly: true
+    TaskResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/Task'
+      required:
+      - data
+    TaskTypeEnum:
+      type: string
+      enum:
+      - tasks
+    Tenant:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/TenantTypeEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            name:
+              type: string
+              maxLength: 100
+          required:
+          - name
+        relationships:
+          type: object
+          properties:
+            memberships:
+              type: object
+              properties:
+                data:
+                  type: array
+                  items:
+                    type: object
+                    properties:
+                      id:
+                        type: string
+                        format: uuid
+                        title: Resource Identifier
+                        description: The identifier of the related object.
+                      type:
+                        type: string
+                        enum:
+                        - memberships
+                        title: Resource Type Name
+                        description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                          member is used to describe resource objects that share common
+                          attributes and relationships.
+                    required:
+                    - id
+                    - type
+              required:
+              - data
+              description: A related resource object from type memberships
+              title: memberships
+              readOnly: true
+    TenantRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - tenants
+            attributes:
+              type: object
+              properties:
+                name:
+                  type: string
+                  minLength: 1
+                  maxLength: 100
+              required:
+              - name
+            relationships:
+              type: object
+              properties:
+                memberships:
+                  type: object
+                  properties:
+                    data:
+                      type: array
+                      items:
+                        type: object
+                        properties:
+                          id:
+                            type: string
+                            format: uuid
+                            title: Resource Identifier
+                            description: The identifier of the related object.
+                          type:
+                            type: string
+                            enum:
+                            - memberships
+                            title: Resource Type Name
+                            description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                              member is used to describe resource objects that share
+                              common attributes and relationships.
+                        required:
+                        - id
+                        - type
+                  required:
+                  - data
+                  description: A related resource object from type memberships
+                  title: memberships
+                  readOnly: true
+      required:
+      - data
+    TenantResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/Tenant'
+      required:
+      - data
+    TenantTypeEnum:
+      type: string
+      enum:
+      - tenants
+    Token:
+      type: object
+      required:
+      - type
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/TokenTypeEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        attributes:
+          type: object
+          properties:
+            email:
+              type: string
+              writeOnly: true
+            password:
+              type: string
+              writeOnly: true
+            tenant_id:
+              type: string
+              format: uuid
+              writeOnly: true
+              description: If not provided, the tenant ID of the first membership
+                that was added to the user will be used.
+            refresh:
+              type: string
+              readOnly: true
+            access:
+              type: string
+              readOnly: true
+          required:
+          - email
+          - password
+    TokenRefresh:
+      type: object
+      required:
+      - type
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/TokenRefreshTypeEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        attributes:
+          type: object
+          properties:
+            refresh:
+              type: string
+            access:
+              type: string
+              readOnly: true
+          required:
+          - refresh
+    TokenRefreshRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - tokens-refresh
+            attributes:
+              type: object
+              properties:
+                refresh:
+                  type: string
+                  minLength: 1
+                access:
+                  type: string
+                  readOnly: true
+                  minLength: 1
+              required:
+              - refresh
+      required:
+      - data
+    TokenRefreshResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/TokenRefresh'
+      required:
+      - data
+    TokenRefreshTypeEnum:
+      type: string
+      enum:
+      - tokens-refresh
+    TokenRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - tokens
+            attributes:
+              type: object
+              properties:
+                email:
+                  type: string
+                  writeOnly: true
+                  minLength: 1
+                password:
+                  type: string
+                  writeOnly: true
+                  minLength: 1
+                tenant_id:
+                  type: string
+                  format: uuid
+                  writeOnly: true
+                  description: If not provided, the tenant ID of the first membership
+                    that was added to the user will be used.
+                refresh:
+                  type: string
+                  readOnly: true
+                  minLength: 1
+                access:
+                  type: string
+                  readOnly: true
+                  minLength: 1
+              required:
+              - email
+              - password
+      required:
+      - data
+    TokenResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/Token'
+      required:
+      - data
+    TokenTypeEnum:
+      type: string
+      enum:
+      - tokens
+    Type049Enum:
+      type: string
+      enum:
+      - provider-secrets
+    Type227Enum:
+      type: string
+      enum:
+      - providers
+    Type7f7Enum:
+      type: string
+      enum:
+      - compliance-overviews
+    Type8cdEnum:
+      type: string
+      enum:
+      - users
+    TypeD4dEnum:
+      type: string
+      enum:
+      - invitations
+    TypeE53Enum:
+      type: string
+      enum:
+      - scans
+    User:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/Type8cdEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            name:
+              type: string
+              maxLength: 150
+              minLength: 3
+            email:
+              type: string
+              format: email
+              description: Case insensitive
+              maxLength: 254
+            company_name:
+              type: string
+              maxLength: 150
+            date_joined:
+              type: string
+              format: date-time
+              readOnly: true
+          required:
+          - name
+          - email
+        relationships:
+          type: object
+          properties:
+            memberships:
+              type: object
+              properties:
+                data:
+                  type: array
+                  items:
+                    type: object
+                    properties:
+                      id:
+                        type: string
+                        format: uuid
+                        title: Resource Identifier
+                        description: The identifier of the related object.
+                      type:
+                        type: string
+                        enum:
+                        - memberships
+                        title: Resource Type Name
+                        description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                          member is used to describe resource objects that share common
+                          attributes and relationships.
+                    required:
+                    - id
+                    - type
+              required:
+              - data
+              description: A related resource object from type memberships
+              title: memberships
+              readOnly: true
+    UserCreate:
+      type: object
+      required:
+      - type
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/Type8cdEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        attributes:
+          type: object
+          properties:
+            name:
+              type: string
+              maxLength: 150
+              minLength: 3
+            password:
+              type: string
+              writeOnly: true
+            email:
+              type: string
+              format: email
+              description: Case insensitive
+              maxLength: 254
+            company_name:
+              type: string
+          required:
+          - name
+          - password
+          - email
+    UserCreateRequest:
+      type: object
+      properties:
+        data:
+          type: object
+          required:
+          - type
+          additionalProperties: false
+          properties:
+            type:
+              type: string
+              description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+                member is used to describe resource objects that share common attributes
+                and relationships.
+              enum:
+              - users
+            attributes:
+              type: object
+              properties:
+                name:
+                  type: string
+                  minLength: 3
+                  maxLength: 150
+                password:
+                  type: string
+                  writeOnly: true
+                  minLength: 1
+                email:
+                  type: string
+                  format: email
+                  minLength: 1
+                  description: Case insensitive
+                  maxLength: 254
+                company_name:
+                  type: string
+                  minLength: 1
+              required:
+              - name
+              - password
+              - email
+      required:
+      - data
+    UserCreateResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/UserCreate'
+      required:
+      - data
+    UserResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/User'
+      required:
+      - data
+    UserUpdate:
+      type: object
+      required:
+      - type
+      - id
+      additionalProperties: false
+      properties:
+        type:
+          allOf:
+          - $ref: '#/components/schemas/Type8cdEnum'
+          description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
+            member is used to describe resource objects that share common attributes
+            and relationships.
+        id:
+          type: string
+          format: uuid
+        attributes:
+          type: object
+          properties:
+            name:
+              type: string
+              maxLength: 150
+              minLength: 3
+            password:
+              type: string
+              writeOnly: true
+            email:
+              type: string
+              format: email
+              description: Case insensitive
+              maxLength: 254
+            company_name:
+              type: string
+              maxLength: 150
+          required:
+          - name
+          - email
+    UserUpdateResponse:
+      type: object
+      properties:
+        data:
+          $ref: '#/components/schemas/UserUpdate'
+      required:
+      - data
+  securitySchemes:
+    jwtAuth:
+      type: http
+      scheme: bearer
+      bearerFormat: JWT
+tags:
+- name: User
+  description: Endpoints for managing user accounts.
+- name: Token
+  description: Endpoints for token management, including obtaining a new token and
+    refreshing an existing token for authentication purposes.
+- name: Tenant
+  description: Endpoints for managing tenants, along with their memberships.
+- name: Invitation
+  description: Endpoints for tenant invitations management, allowing retrieval and
+    filtering of invitations, creating new invitations, accepting and revoking them.
+- name: Provider
+  description: Endpoints for managing providers (AWS, GCP, Azure, etc...).
+- name: Provider Group
+  description: Endpoints for managing provider groups.
+- name: Scan
+  description: Endpoints for triggering manual scans and viewing scan results.
+- name: Resource
+  description: Endpoints for managing resources discovered by scans, allowing retrieval
+    and filtering of resource information.
+- name: Finding
+  description: Endpoints for managing findings, allowing retrieval and filtering of
+    findings that result from scans.
+- name: Overview
+  description: Endpoints for retrieving aggregated summaries of resources from the
+    system.
+- name: Compliance Overview
+  description: Endpoints for checking the compliance overview, allowing filtering
+    by scan, provider or compliance framework ID.
+- name: Task
+  description: Endpoints for task management, allowing retrieval of task status and
+    revoking tasks that have not started.
diff --git a/api/src/backend/api/tests/__init__.py b/api/src/backend/api/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/src/backend/api/tests/integration/test_authentication.py b/api/src/backend/api/tests/integration/test_authentication.py
new file mode 100644
index 0000000000..3233d0f6ee
--- /dev/null
+++ b/api/src/backend/api/tests/integration/test_authentication.py
@@ -0,0 +1,100 @@
+import pytest
+from django.urls import reverse
+from unittest.mock import patch
+from rest_framework.test import APIClient
+
+from conftest import TEST_PASSWORD, get_api_tokens, get_authorization_header
+
+
+@patch("api.v1.views.MainRouter.admin_db", new="default")
+@pytest.mark.django_db
+def test_basic_authentication():
+    client = APIClient()
+
+    test_user = "test_email@prowler.com"
+    test_password = "test_password"
+
+    # Check that a 401 is returned when no basic authentication is provided
+    no_auth_response = client.get(reverse("provider-list"))
+    assert no_auth_response.status_code == 401
+
+    # Check that we can create a new user without any kind of authentication
+    user_creation_response = client.post(
+        reverse("user-list"),
+        data={
+            "data": {
+                "type": "users",
+                "attributes": {
+                    "name": "test",
+                    "email": test_user,
+                    "password": test_password,
+                },
+            }
+        },
+        format="vnd.api+json",
+    )
+    assert user_creation_response.status_code == 201
+
+    # Check that using our new user's credentials we can authenticate and get the providers
+    access_token, _ = get_api_tokens(client, test_user, test_password)
+    auth_headers = get_authorization_header(access_token)
+
+    auth_response = client.get(
+        reverse("provider-list"),
+        headers=auth_headers,
+    )
+    assert auth_response.status_code == 200
+
+
+@pytest.mark.django_db
+def test_refresh_token(create_test_user, tenants_fixture):
+    client = APIClient()
+
+    # Assert that we can obtain a new access token using the refresh one
+    access_token, refresh_token = get_api_tokens(
+        client, create_test_user.email, TEST_PASSWORD
+    )
+    valid_refresh_response = client.post(
+        reverse("token-refresh"),
+        data={
+            "data": {
+                "type": "tokens-refresh",
+                "attributes": {"refresh": refresh_token},
+            }
+        },
+        format="vnd.api+json",
+    )
+    assert valid_refresh_response.status_code == 200
+    assert (
+        valid_refresh_response.json()["data"]["attributes"]["refresh"] != refresh_token
+    )
+
+    # Assert the former refresh token gets invalidated
+    invalid_refresh_response = client.post(
+        reverse("token-refresh"),
+        data={
+            "data": {
+                "type": "tokens-refresh",
+                "attributes": {"refresh": refresh_token},
+            }
+        },
+        format="vnd.api+json",
+    )
+    assert invalid_refresh_response.status_code == 400
+
+    # Assert that the new refresh token could be used
+    new_refresh_response = client.post(
+        reverse("token-refresh"),
+        data={
+            "data": {
+                "type": "tokens-refresh",
+                "attributes": {
+                    "refresh": valid_refresh_response.json()["data"]["attributes"][
+                        "refresh"
+                    ]
+                },
+            }
+        },
+        format="vnd.api+json",
+    )
+    assert new_refresh_response.status_code == 200
diff --git a/api/src/backend/api/tests/integration/test_tenants.py b/api/src/backend/api/tests/integration/test_tenants.py
new file mode 100644
index 0000000000..ee06f49ba8
--- /dev/null
+++ b/api/src/backend/api/tests/integration/test_tenants.py
@@ -0,0 +1,97 @@
+from unittest.mock import patch
+
+import pytest
+from django.urls import reverse
+
+from conftest import TEST_USER, TEST_PASSWORD, get_api_tokens, get_authorization_header
+
+
+@patch("api.v1.views.schedule_provider_scan")
+@pytest.mark.django_db
+def test_check_resources_between_different_tenants(
+    schedule_mock,
+    enforce_test_user_db_connection,
+    authenticated_api_client,
+    tenants_fixture,
+):
+    client = authenticated_api_client
+
+    tenant1 = str(tenants_fixture[0].id)
+    tenant2 = str(tenants_fixture[1].id)
+
+    tenant1_token, _ = get_api_tokens(
+        client, TEST_USER, TEST_PASSWORD, tenant_id=tenant1
+    )
+    tenant2_token, _ = get_api_tokens(
+        client, TEST_USER, TEST_PASSWORD, tenant_id=tenant2
+    )
+
+    tenant1_headers = get_authorization_header(tenant1_token)
+    tenant2_headers = get_authorization_header(tenant2_token)
+
+    # Create a provider on tenant 1
+    provider_data = {
+        "data": {
+            "type": "providers",
+            "attributes": {
+                "alias": "test_provider_tenant_1",
+                "provider": "aws",
+                "uid": "123456789012",
+            },
+        }
+    }
+    provider1_response = client.post(
+        reverse("provider-list"),
+        data=provider_data,
+        format="vnd.api+json",
+        headers=tenant1_headers,
+    )
+    assert provider1_response.status_code == 201
+    provider1_id = provider1_response.json()["data"]["id"]
+
+    # Create a provider on tenant 2
+    provider_data = {
+        "data": {
+            "type": "providers",
+            "attributes": {
+                "alias": "test_provider_tenant_2",
+                "provider": "aws",
+                "uid": "123456789013",
+            },
+        }
+    }
+    provider2_response = client.post(
+        reverse("provider-list"),
+        data=provider_data,
+        format="vnd.api+json",
+        headers=tenant2_headers,
+    )
+    assert provider2_response.status_code == 201
+    provider2_id = provider2_response.json()["data"]["id"]
+
+    # Try to get the provider from tenant 1 on tenant 2 and vice versa
+    tenant1_response = client.get(
+        reverse("provider-detail", kwargs={"pk": provider1_id}),
+        headers=tenant2_headers,
+    )
+    assert tenant1_response.status_code == 404
+    tenant2_response = client.get(
+        reverse("provider-detail", kwargs={"pk": provider1_id}),
+        headers=tenant1_headers,
+    )
+    assert tenant2_response.status_code == 200
+    assert tenant2_response.json()["data"]["id"] == provider1_id
+
+    # Vice versa
+
+    tenant2_response = client.get(
+        reverse("provider-detail", kwargs={"pk": provider2_id}),
+        headers=tenant1_headers,
+    )
+    assert tenant2_response.status_code == 404
+    tenant1_response = client.get(
+        reverse("provider-detail", kwargs={"pk": provider2_id}),
+        headers=tenant2_headers,
+    )
+    assert tenant1_response.status_code == 200
+    assert tenant1_response.json()["data"]["id"] == provider2_id
diff --git a/api/src/backend/api/tests/test_compliance.py b/api/src/backend/api/tests/test_compliance.py
new file mode 100644
index 0000000000..6b405536b3
--- /dev/null
+++ b/api/src/backend/api/tests/test_compliance.py
@@ -0,0 +1,284 @@
+from unittest.mock import patch, MagicMock
+
+from api.compliance import (
+    get_prowler_provider_checks,
+    get_prowler_provider_compliance,
+    load_prowler_compliance,
+    load_prowler_checks,
+    generate_scan_compliance,
+    generate_compliance_overview_template,
+)
+from api.models import Provider
+
+
+class TestCompliance:
+    @patch("api.compliance.CheckMetadata")
+    def test_get_prowler_provider_checks(self, mock_check_metadata):
+        provider_type = Provider.ProviderChoices.AWS
+        mock_check_metadata.get_bulk.return_value = {
+            "check1": MagicMock(),
+            "check2": MagicMock(),
+            "check3": MagicMock(),
+        }
+        checks = get_prowler_provider_checks(provider_type)
+        assert set(checks) == {"check1", "check2", "check3"}
+        mock_check_metadata.get_bulk.assert_called_once_with(provider_type)
+
+    @patch("api.compliance.Compliance")
+    def test_get_prowler_provider_compliance(self, mock_compliance):
+        provider_type = Provider.ProviderChoices.AWS
+        mock_compliance.get_bulk.return_value = {
+            "compliance1": MagicMock(),
+            "compliance2": MagicMock(),
+        }
+        compliance_data = get_prowler_provider_compliance(provider_type)
+        assert compliance_data == mock_compliance.get_bulk.return_value
+        mock_compliance.get_bulk.assert_called_once_with(provider_type)
+
+    @patch("api.models.Provider.ProviderChoices")
+    @patch("api.compliance.get_prowler_provider_compliance")
+    @patch("api.compliance.generate_compliance_overview_template")
+    @patch("api.compliance.load_prowler_checks")
+    def test_load_prowler_compliance(
+        self,
+        mock_load_prowler_checks,
+        mock_generate_compliance_overview_template,
+        mock_get_prowler_provider_compliance,
+        mock_provider_choices,
+    ):
+        mock_provider_choices.values = ["aws", "azure"]
+
+        compliance_data_aws = {"compliance_aws": MagicMock()}
+        compliance_data_azure = {"compliance_azure": MagicMock()}
+
+        compliance_data_dict = {
+            "aws": compliance_data_aws,
+            "azure": compliance_data_azure,
+        }
+
+        def mock_get_compliance(provider_type):
+            return compliance_data_dict[provider_type]
+
+        mock_get_prowler_provider_compliance.side_effect = mock_get_compliance
+
+        mock_generate_compliance_overview_template.return_value = {
+            "template_key": "template_value"
+        }
+
+        mock_load_prowler_checks.return_value = {"checks_key": "checks_value"}
+
+        load_prowler_compliance()
+
+        from api.compliance import PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE, PROWLER_CHECKS
+
+        assert PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE == {
+            "template_key": "template_value"
+        }
+        assert PROWLER_CHECKS == {"checks_key": "checks_value"}
+
+        expected_prowler_compliance = compliance_data_dict
+        mock_get_prowler_provider_compliance.assert_any_call("aws")
+        mock_get_prowler_provider_compliance.assert_any_call("azure")
+        mock_generate_compliance_overview_template.assert_called_once_with(
+            expected_prowler_compliance
+        )
+        mock_load_prowler_checks.assert_called_once_with(expected_prowler_compliance)
+
+    @patch("api.compliance.get_prowler_provider_checks")
+    @patch("api.models.Provider.ProviderChoices")
+    def test_load_prowler_checks(
+        self, mock_provider_choices, mock_get_prowler_provider_checks
+    ):
+        mock_provider_choices.values = ["aws"]
+
+        mock_get_prowler_provider_checks.return_value = ["check1", "check2", "check3"]
+
+        prowler_compliance = {
+            "aws": {
+                "compliance1": MagicMock(
+                    Requirements=[
+                        MagicMock(
+                            Checks=["check1", "check2"],
+                        ),
+                    ],
+                ),
+            },
+        }
+
+        expected_checks = {
+            "aws": {
+                "check1": {"compliance1"},
+                "check2": {"compliance1"},
+                "check3": set(),
+            }
+        }
+
+        checks = load_prowler_checks(prowler_compliance)
+        assert checks == expected_checks
+        mock_get_prowler_provider_checks.assert_called_once_with("aws")
+
+    @patch("api.compliance.PROWLER_CHECKS", new_callable=dict)
+    def test_generate_scan_compliance(self, mock_prowler_checks):
+        mock_prowler_checks["aws"] = {
+            "check1": {"compliance1"},
+            "check2": {"compliance1", "compliance2"},
+        }
+
+        compliance_overview = {
+            "compliance1": {
+                "requirements": {
+                    "requirement1": {
+                        "checks": {"check1": None, "check2": None},
+                        "checks_status": {
+                            "pass": 0,
+                            "fail": 0,
+                            "manual": 0,
+                            "total": 2,
+                        },
+                        "status": "PASS",
+                    }
+                },
+                "requirements_status": {"passed": 1, "failed": 0, "manual": 0},
+            },
+            "compliance2": {
+                "requirements": {
+                    "requirement2": {
+                        "checks": {"check2": None},
+                        "checks_status": {
+                            "pass": 0,
+                            "fail": 0,
+                            "manual": 0,
+                            "total": 1,
+                        },
+                        "status": "PASS",
+                    }
+                },
+                "requirements_status": {"passed": 1, "failed": 0, "manual": 0},
+            },
+        }
+
+        provider_type = "aws"
+        check_id = "check2"
+        status = "FAIL"
+
+        generate_scan_compliance(compliance_overview, provider_type, check_id, status)
+
+        assert (
+            compliance_overview["compliance1"]["requirements"]["requirement1"][
+                "checks"
+            ]["check2"]
+            == "FAIL"
+        )
+        assert (
+            compliance_overview["compliance1"]["requirements"]["requirement1"][
+                "checks_status"
+            ]["fail"]
+            == 1
+        )
+        assert (
+            compliance_overview["compliance1"]["requirements"]["requirement1"]["status"]
+            == "FAIL"
+        )
+        assert compliance_overview["compliance1"]["requirements_status"]["passed"] == 0
+        assert compliance_overview["compliance1"]["requirements_status"]["failed"] == 1
+
+        assert (
+            compliance_overview["compliance2"]["requirements"]["requirement2"][
+                "checks"
+            ]["check2"]
+            == "FAIL"
+        )
+        assert (
+            compliance_overview["compliance2"]["requirements"]["requirement2"][
+                "checks_status"
+            ]["fail"]
+            == 1
+        )
+        assert (
+            compliance_overview["compliance2"]["requirements"]["requirement2"]["status"]
+            == "FAIL"
+        )
+        assert compliance_overview["compliance2"]["requirements_status"]["passed"] == 0
+        assert compliance_overview["compliance2"]["requirements_status"]["failed"] == 1
+
+        assert (
+            compliance_overview["compliance1"]["requirements"]["requirement1"][
+                "checks"
+            ]["check1"]
+            is None
+        )
+
+    @patch("api.models.Provider.ProviderChoices")
+    def test_generate_compliance_overview_template(self, mock_provider_choices):
+        mock_provider_choices.values = ["aws"]
+
+        requirement1 = MagicMock(
+            Id="requirement1",
+            Name="Requirement 1",
+            Description="Description of requirement 1",
+            Attributes=[],
+            Checks=["check1", "check2"],
+        )
+        requirement2 = MagicMock(
+            Id="requirement2",
+            Name="Requirement 2",
+            Description="Description of requirement 2",
+            Attributes=[],
+            Checks=[],
+        )
+        compliance1 = MagicMock(
+            Requirements=[requirement1, requirement2],
+            Framework="Framework 1",
+            Version="1.0",
+            Description="Description of compliance1",
+        )
+        prowler_compliance = {"aws": {"compliance1": compliance1}}
+
+        template = generate_compliance_overview_template(prowler_compliance)
+
+        expected_template = {
+            "aws": {
+                "compliance1": {
+                    "framework": "Framework 1",
+                    "version": "1.0",
+                    "provider": "aws",
+                    "description": "Description of compliance1",
+                    "requirements": {
+                        "requirement1": {
+                            "name": "Requirement 1",
+                            "description": "Description of requirement 1",
+                            "attributes": [],
+                            "checks": {"check1": None, "check2": None},
+                            "checks_status": {
+                                "pass": 0,
+                                "fail": 0,
+                                "manual": 0,
+                                "total": 2,
+                            },
+                            "status": "PASS",
+                        },
+                        "requirement2": {
+                            "name": "Requirement 2",
+                            "description": "Description of requirement 2",
+                            "attributes": [],
+                            "checks": {},
+                            "checks_status": {
+                                "pass": 0,
+                                "fail": 0,
+                                "manual": 0,
+                                "total": 0,
+                            },
+                            "status": "PASS",
+                        },
+                    },
+                    "requirements_status": {
+                        "passed": 1,  # total_requirements - manual
+                        "failed": 0,
+                        "manual": 1,  # requirement2 has 0 checks
+                    },
+                    "total_requirements": 2,
+                }
+            }
+        }
+
+        assert template == expected_template
diff --git a/api/src/backend/api/tests/test_database.py b/api/src/backend/api/tests/test_database.py
new file mode 100644
index 0000000000..e92d6bac19
--- /dev/null
+++ b/api/src/backend/api/tests/test_database.py
@@ -0,0 +1,29 @@
+import pytest
+from django.conf import settings
+from django.db.migrations.recorder import MigrationRecorder
+from django.db.utils import ConnectionRouter
+
+from api.db_router import MainRouter
+from api.rls import Tenant
+from config.django.base import DATABASE_ROUTERS as PROD_DATABASE_ROUTERS
+
+
+class TestMainDatabaseRouter:
+    @pytest.fixture(scope="module")
+    def router(self):
+        testing_routers = settings.DATABASE_ROUTERS.copy()
+        settings.DATABASE_ROUTERS = PROD_DATABASE_ROUTERS
+        yield ConnectionRouter()
+        settings.DATABASE_ROUTERS = testing_routers
+
+    @pytest.mark.parametrize("api_model", [Tenant])
+    def test_router_api_models(self, api_model, router):
+        assert router.db_for_read(api_model) == "default"
+        assert router.db_for_write(api_model) == "default"
+
+        assert router.allow_migrate_model(MainRouter.admin_db, api_model)
+        assert not router.allow_migrate_model("default", api_model)
+
+    def test_router_django_models(self, router):
+        assert router.db_for_read(MigrationRecorder.Migration) == MainRouter.admin_db
+        assert not router.db_for_read(MigrationRecorder.Migration) == "default"
diff --git a/api/src/backend/api/tests/test_db_utils.py b/api/src/backend/api/tests/test_db_utils.py
new file mode 100644
index 0000000000..15cbf88399
--- /dev/null
+++ b/api/src/backend/api/tests/test_db_utils.py
@@ -0,0 +1,108 @@
+from datetime import datetime, timezone
+from enum import Enum
+from unittest.mock import patch
+
+from api.db_utils import enum_to_choices, one_week_from_now, generate_random_token
+
+
+class TestEnumToChoices:
+    def test_enum_to_choices_simple(self):
+        class Color(Enum):
+            RED = 1
+            GREEN = 2
+            BLUE = 3
+
+        expected_result = [
+            (1, "Red"),
+            (2, "Green"),
+            (3, "Blue"),
+        ]
+
+        result = enum_to_choices(Color)
+        assert result == expected_result
+
+    def test_enum_to_choices_with_underscores(self):
+        class Status(Enum):
+            PENDING_APPROVAL = "pending"
+            IN_PROGRESS = "in_progress"
+            COMPLETED_SUCCESSFULLY = "completed"
+
+        expected_result = [
+            ("pending", "Pending Approval"),
+            ("in_progress", "In Progress"),
+            ("completed", "Completed Successfully"),
+        ]
+
+        result = enum_to_choices(Status)
+        assert result == expected_result
+
+    def test_enum_to_choices_empty_enum(self):
+        class EmptyEnum(Enum):
+            pass
+
+        expected_result = []
+
+        result = enum_to_choices(EmptyEnum)
+        assert result == expected_result
+
+    def test_enum_to_choices_numeric_values(self):
+        class Numbers(Enum):
+            ONE = 1
+            TWO = 2
+            THREE = 3
+
+        expected_result = [
+            (1, "One"),
+            (2, "Two"),
+            (3, "Three"),
+        ]
+
+        result = enum_to_choices(Numbers)
+        assert result == expected_result
+
+
+class TestOneWeekFromNow:
+    def test_one_week_from_now(self):
+        with patch("api.db_utils.datetime") as mock_datetime:
+            mock_datetime.now.return_value = datetime(2023, 1, 1, tzinfo=timezone.utc)
+            expected_result = datetime(2023, 1, 8, tzinfo=timezone.utc)
+
+            result = one_week_from_now()
+            assert result == expected_result
+
+    def test_one_week_from_now_with_timezone(self):
+        with patch("api.db_utils.datetime") as mock_datetime:
+            mock_datetime.now.return_value = datetime(
+                2023, 6, 15, 12, 0, tzinfo=timezone.utc
+            )
+            expected_result = datetime(2023, 6, 22, 12, 0, tzinfo=timezone.utc)
+
+            result = one_week_from_now()
+            assert result == expected_result
+
+
+class TestGenerateRandomToken:
+    def test_generate_random_token_default_length(self):
+        token = generate_random_token()
+        assert len(token) == 14
+
+    def test_generate_random_token_custom_length(self):
+        length = 20
+        token = generate_random_token(length=length)
+        assert len(token) == length
+
+    def test_generate_random_token_with_symbols(self):
+        symbols = "ABC123"
+        token = generate_random_token(length=10, symbols=symbols)
+        assert len(token) == 10
+        assert all(char in symbols for char in token)
+
+    def test_generate_random_token_unique(self):
+        tokens = {generate_random_token() for _ in range(1000)}
+        # Assuming that generating 1000 tokens should result in unique values
+        assert len(tokens) == 1000
+
+    def test_generate_random_token_no_symbols_provided(self):
+        token = generate_random_token(length=5, symbols="")
+        # Default symbols
+        assert len(token) == 5
diff --git a/api/src/backend/api/tests/test_decorators.py b/api/src/backend/api/tests/test_decorators.py
new file mode 100644
index 0000000000..a9a333bb7b
--- /dev/null
+++ b/api/src/backend/api/tests/test_decorators.py
@@ -0,0 +1,34 @@
+from unittest.mock import patch, call
+
+import pytest
+
+from api.decorators import set_tenant
+
+
+@pytest.mark.django_db
+class TestSetTenantDecorator:
+    @patch("api.decorators.connection.cursor")
+    def test_set_tenant(self, mock_cursor):
+        mock_cursor.return_value.__enter__.return_value = mock_cursor
+
+        @set_tenant
+        def random_func(arg):
+            return arg
+
+        tenant_id = "1234-abcd-5678"
+
+        result = random_func("test_arg", tenant_id=tenant_id)
+
+        assert (
+            call(f"SELECT set_config('api.tenant_id', '{tenant_id}', TRUE);")
+            in mock_cursor.execute.mock_calls
+        )
+        assert result == "test_arg"
+
+    def test_set_tenant_exception(self):
+        @set_tenant
+        def random_func(arg):
+            return arg
+
+        with pytest.raises(KeyError):
+            random_func("test_arg")
diff --git a/api/src/backend/api/tests/test_middleware.py b/api/src/backend/api/tests/test_middleware.py
new file mode 100644
index 0000000000..1bd8a07351
--- /dev/null
+++ b/api/src/backend/api/tests/test_middleware.py
@@ -0,0 +1,54 @@
+from unittest.mock import MagicMock, patch
+
+import pytest
+from django.http import HttpResponse
+from django.test import RequestFactory
+
+from api.middleware import APILoggingMiddleware
+
+
+@pytest.mark.django_db
+@patch("logging.getLogger")
+def test_api_logging_middleware_logging(mock_logger):
+    factory = RequestFactory()
+
+    request = factory.get("/test-path?param1=value1¶m2=value2")
+    request.method = "GET"
+
+    response = HttpResponse()
+    response.status_code = 200
+
+    get_response = MagicMock(return_value=response)
+
+    with patch("api.middleware.extract_auth_info") as mock_extract_auth_info:
+        mock_extract_auth_info.return_value = {
+            "user_id": "user123",
+            "tenant_id": "tenant456",
+        }
+
+        with patch("api.middleware.logging.getLogger") as mock_get_logger:
+            mock_logger = MagicMock()
+            mock_get_logger.return_value = mock_logger
+
+            middleware = APILoggingMiddleware(get_response)
+
+            with patch("api.middleware.time.time") as mock_time:
+                mock_time.side_effect = [1000.0, 1001.0]  # Start time and end time
+
+                middleware(request)
+
+                get_response.assert_called_once_with(request)
+
+                mock_extract_auth_info.assert_called_once_with(request)
+
+                expected_extra = {
+                    "user_id": "user123",
+                    "tenant_id": "tenant456",
+                    "method": "GET",
+                    "path": "/test-path",
+                    "query_params": {"param1": "value1", "param2": "value2"},
+                    "status_code": 200,
+                    "duration": 1.0,
+                }
+
+                mock_logger.info.assert_called_once_with("", extra=expected_extra)
diff --git a/api/src/backend/api/tests/test_models.py b/api/src/backend/api/tests/test_models.py
new file mode 100644
index 0000000000..c7fdf9deb1
--- /dev/null
+++ b/api/src/backend/api/tests/test_models.py
@@ -0,0 +1,89 @@
+import pytest
+
+from api.models import Resource, ResourceTag
+
+
+@pytest.mark.django_db
+class TestResourceModel:
+    def test_setting_tags(self, providers_fixture):
+        provider, *_ = providers_fixture
+
+        resource = Resource.objects.create(
+            tenant_id=provider.tenant_id,
+            provider=provider,
+            uid="arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef0",
+            name="My Instance 1",
+            region="us-east-1",
+            service="ec2",
+            type="prowler-test",
+        )
+
+        tags = [
+            ResourceTag.objects.create(
+                tenant_id=provider.tenant_id,
+                key="key",
+                value="value",
+            ),
+            ResourceTag.objects.create(
+                tenant_id=provider.tenant_id,
+                key="key2",
+                value="value2",
+            ),
+        ]
+
+        resource.upsert_or_delete_tags(tags)
+
+        assert len(tags) == len(resource.tags.all())
+
+        tags_dict = resource.get_tags()
+
+        for tag in tags:
+            assert tag.key in tags_dict
+            assert tag.value == tags_dict[tag.key]
+
+    def test_adding_tags(self, resources_fixture):
+        resource, *_ = resources_fixture
+
+        tags = [
+            ResourceTag.objects.create(
+                tenant_id=resource.tenant_id,
+                key="env",
+                value="test",
+            ),
+        ]
+        before_count = len(resource.tags.all())
+
+        resource.upsert_or_delete_tags(tags)
+
+        assert before_count + 1 == len(resource.tags.all())
+
+        tags_dict = resource.get_tags()
+
+        assert "env" in tags_dict
+        assert tags_dict["env"] == "test"
+
+    def test_adding_duplicate_tags(self, resources_fixture):
+        resource, *_ = resources_fixture
+
+        tags = resource.tags.all()
+
+        before_count = len(resource.tags.all())
+
+        resource.upsert_or_delete_tags(tags)
+
+        # should be the same number of tags
+        assert before_count == len(resource.tags.all())
+
+    def test_add_tags_none(self, resources_fixture):
+        resource, *_ = resources_fixture
+        resource.upsert_or_delete_tags(None)
+
+        assert len(resource.tags.all()) == 0
+        assert resource.get_tags() == {}
+
+    def test_clear_tags(self, resources_fixture):
+        resource, *_ = resources_fixture
+        resource.clear_tags()
+
+        assert len(resource.tags.all()) == 0
+        assert resource.get_tags() == {}
diff --git a/api/src/backend/api/tests/test_utils.py b/api/src/backend/api/tests/test_utils.py
new file mode 100644
index 0000000000..fcde1a9999
--- /dev/null
+++ b/api/src/backend/api/tests/test_utils.py
@@ -0,0 +1,318 @@
+from datetime import datetime, timedelta, timezone
+from unittest.mock import patch, MagicMock
+
+import pytest
+from prowler.providers.aws.aws_provider import AwsProvider
+from prowler.providers.azure.azure_provider import AzureProvider
+from prowler.providers.gcp.gcp_provider import GcpProvider
+from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
+from rest_framework.exceptions import ValidationError, NotFound
+
+from api.db_router import MainRouter
+from api.exceptions import InvitationTokenExpiredException
+from api.models import Invitation
+from api.models import Provider
+from api.utils import (
+    merge_dicts,
+    return_prowler_provider,
+    initialize_prowler_provider,
+    prowler_provider_connection_test,
+    get_prowler_provider_kwargs,
+)
+from api.utils import validate_invitation
+
+
+class TestMergeDicts:
+    def test_simple_merge(self):
+        default_dict = {"key1": "value1", "key2": "value2"}
+        replacement_dict = {"key2": "new_value2", "key3": "value3"}
+        expected_result = {"key1": "value1", "key2": "new_value2", "key3": "value3"}
+
+        result = merge_dicts(default_dict, replacement_dict)
+        assert result == expected_result
+
+    def test_nested_merge(self):
+        default_dict = {
+            "key1": "value1",
+            "key2": {"nested_key1": "nested_value1", "nested_key2": "nested_value2"},
+        }
+        replacement_dict = {
+            "key2": {
+                "nested_key2": "new_nested_value2",
+                "nested_key3": "nested_value3",
+            },
+            "key3": "value3",
+        }
+        expected_result = {
+            "key1": "value1",
+            "key2": {
+                "nested_key1": "nested_value1",
+                "nested_key2": "new_nested_value2",
+                "nested_key3": "nested_value3",
+            },
+            "key3": "value3",
+        }
+
+        result = merge_dicts(default_dict, replacement_dict)
+        assert result == expected_result
+
+    def test_no_overlap(self):
+        default_dict = {"key1": "value1"}
+        replacement_dict = {"key2": "value2"}
+        expected_result = {"key1": "value1", "key2": "value2"}
+
+        result = merge_dicts(default_dict, replacement_dict)
+        assert result == expected_result
+
+    def test_replacement_dict_empty(self):
+        default_dict = {"key1": "value1", "key2": "value2"}
+        replacement_dict = {}
+        expected_result = {"key1": "value1", "key2": "value2"}
+
+        result = merge_dicts(default_dict, replacement_dict)
+        assert result == expected_result
+
+    def test_default_dict_empty(self):
+        default_dict = {}
+        replacement_dict = {"key1": "value1", "key2": "value2"}
+        expected_result = {"key1": "value1", "key2": "value2"}
+
+        result = merge_dicts(default_dict, replacement_dict)
+        assert result == expected_result
+
+    def test_nested_empty_in_replacement_dict(self):
+        default_dict = {"key1": {"nested_key1": "nested_value1"}}
+        replacement_dict = {"key1": {}}
+        expected_result = {"key1": {}}
+
+        result = merge_dicts(default_dict, replacement_dict)
+        assert result == expected_result
+
+    def test_deep_nested_merge(self):
+        default_dict = {"key1": {"nested_key1": {"deep_key1": "deep_value1"}}}
+        replacement_dict = {"key1": {"nested_key1": {"deep_key1": "new_deep_value1"}}}
+        expected_result = {"key1": {"nested_key1": {"deep_key1": "new_deep_value1"}}}
+
+        result = merge_dicts(default_dict, replacement_dict)
+        assert result == expected_result
+
+
+class TestReturnProwlerProvider:
+    @pytest.mark.parametrize(
+        "provider_type, expected_provider",
+        [
+            (Provider.ProviderChoices.AWS.value, AwsProvider),
+            (Provider.ProviderChoices.GCP.value, GcpProvider),
+            (Provider.ProviderChoices.AZURE.value, AzureProvider),
+            (Provider.ProviderChoices.KUBERNETES.value, KubernetesProvider),
+        ],
+    )
+    def test_return_prowler_provider(self, provider_type, expected_provider):
+        provider = MagicMock()
+        provider.provider = provider_type
+        prowler_provider = return_prowler_provider(provider)
+        assert prowler_provider == expected_provider
+
+    def test_return_prowler_provider_unsupported_provider(self):
+        provider = MagicMock()
+        provider.provider = "UNSUPPORTED_PROVIDER"
+        with pytest.raises(ValueError):
+            return return_prowler_provider(provider)
+
+
+class TestInitializeProwlerProvider:
+    @patch("api.utils.return_prowler_provider")
+    def test_initialize_prowler_provider(self, mock_return_prowler_provider):
+        provider = MagicMock()
+        provider.secret.secret = {"key": "value"}
+        mock_return_prowler_provider.return_value = MagicMock()
+
+        initialize_prowler_provider(provider)
+        mock_return_prowler_provider.return_value.assert_called_once_with(key="value")
+
+
+class TestProwlerProviderConnectionTest:
+    @patch("api.utils.return_prowler_provider")
+    def test_prowler_provider_connection_test(self, mock_return_prowler_provider):
+        provider = MagicMock()
+        provider.uid = "1234567890"
+        provider.secret.secret = {"key": "value"}
+        mock_return_prowler_provider.return_value = MagicMock()
+
+        prowler_provider_connection_test(provider)
+        mock_return_prowler_provider.return_value.test_connection.assert_called_once_with(
+            key="value", provider_id="1234567890", raise_on_exception=False
+        )
+
+
+class TestGetProwlerProviderKwargs:
+    @pytest.mark.parametrize(
+        "provider_type, expected_extra_kwargs",
+        [
+            (
+                Provider.ProviderChoices.AWS.value,
+                {},
+            ),
+            (
+                Provider.ProviderChoices.AZURE.value,
+                {"subscription_ids": ["provider_uid"]},
+            ),
+            (
+                Provider.ProviderChoices.GCP.value,
+                {"project_ids": ["provider_uid"]},
+            ),
+            (
+                Provider.ProviderChoices.KUBERNETES.value,
+                {"context": "provider_uid"},
+            ),
+        ],
+    )
+    def test_get_prowler_provider_kwargs(self, provider_type, expected_extra_kwargs):
+        provider_uid = "provider_uid"
+        secret_dict = {"key": "value"}
+        secret_mock = MagicMock()
+        secret_mock.secret = secret_dict
+
+        provider = MagicMock()
+        provider.provider = provider_type
+        provider.secret = secret_mock
+        provider.uid = provider_uid
+
+        result = get_prowler_provider_kwargs(provider)
+
+        expected_result = {**secret_dict, **expected_extra_kwargs}
+        assert result == expected_result
+
+    def test_get_prowler_provider_kwargs_unsupported_provider(self):
+        # Setup
+        provider_uid = "provider_uid"
+        secret_dict = {"key": "value"}
+        secret_mock = MagicMock()
+        secret_mock.secret = secret_dict
+
+        provider = MagicMock()
+        provider.provider = "UNSUPPORTED_PROVIDER"
+        provider.secret = secret_mock
+        provider.uid = provider_uid
+
+        result = get_prowler_provider_kwargs(provider)
+
+        expected_result = secret_dict.copy()
+        assert result == expected_result
+
+    def test_get_prowler_provider_kwargs_no_secret(self):
+        # Setup
+        provider_uid = "provider_uid"
+        secret_mock = MagicMock()
+        secret_mock.secret = {}
+
+        provider = MagicMock()
+        provider.provider = Provider.ProviderChoices.AWS.value
+        provider.secret = secret_mock
+        provider.uid = provider_uid
+
+        result = get_prowler_provider_kwargs(provider)
+
+        expected_result = {}
+        assert result == expected_result
+
+
+class TestValidateInvitation:
+    @pytest.fixture
+    def invitation(self):
+        invitation = MagicMock(spec=Invitation)
+        invitation.token = "VALID_TOKEN"
+        invitation.email = "user@example.com"
+        invitation.expires_at = datetime.now(timezone.utc) + timedelta(days=1)
+        invitation.state = Invitation.State.PENDING
+        invitation.tenant = MagicMock()
+        return invitation
+
+    def test_valid_invitation(self, invitation):
+        with patch("api.utils.Invitation.objects.using") as mock_using:
+            mock_db = mock_using.return_value
+            mock_db.get.return_value = invitation
+
+            result = validate_invitation("VALID_TOKEN", "user@example.com")
+
+            assert result == invitation
+            mock_db.get.assert_called_once_with(
+                token="VALID_TOKEN", email="user@example.com"
+            )
+
+    def test_invitation_not_found_raises_validation_error(self):
+        with patch("api.utils.Invitation.objects.using") as mock_using:
+            mock_db = mock_using.return_value
+            mock_db.get.side_effect = Invitation.DoesNotExist
+
+            with pytest.raises(ValidationError) as exc_info:
+                validate_invitation("INVALID_TOKEN", "user@example.com")
+
+            assert exc_info.value.detail == {
+                "invitation_token": "Invalid invitation code."
+            }
+            mock_db.get.assert_called_once_with(
+                token="INVALID_TOKEN", email="user@example.com"
+            )
+
+    def test_invitation_not_found_raises_not_found(self):
+        with patch("api.utils.Invitation.objects.using") as mock_using:
+            mock_db = mock_using.return_value
+            mock_db.get.side_effect = Invitation.DoesNotExist
+
+            with pytest.raises(NotFound) as exc_info:
+                validate_invitation(
+                    "INVALID_TOKEN", "user@example.com", raise_not_found=True
+                )
+
+            assert exc_info.value.detail == "Invitation is not valid."
+            mock_db.get.assert_called_once_with(
+                token="INVALID_TOKEN", email="user@example.com"
+            )
+
+    def test_invitation_expired(self, invitation):
+        expired_time = datetime.now(timezone.utc) - timedelta(days=1)
+        invitation.expires_at = expired_time
+
+        with patch("api.utils.Invitation.objects.using") as mock_using, patch(
+            "api.utils.datetime"
+        ) as mock_datetime:
+            mock_db = mock_using.return_value
+            mock_db.get.return_value = invitation
+            mock_datetime.now.return_value = datetime.now(timezone.utc)
+
+            with pytest.raises(InvitationTokenExpiredException):
+                validate_invitation("VALID_TOKEN", "user@example.com")
+
+            # Ensure the invitation state was updated to EXPIRED
+            assert invitation.state == Invitation.State.EXPIRED
+            invitation.save.assert_called_once_with(using=MainRouter.admin_db)
+
+    def test_invitation_not_pending(self, invitation):
+        invitation.state = Invitation.State.ACCEPTED
+
+        with patch("api.utils.Invitation.objects.using") as mock_using:
+            mock_db = mock_using.return_value
+            mock_db.get.return_value = invitation
+
+            with pytest.raises(ValidationError) as exc_info:
+                validate_invitation("VALID_TOKEN", "user@example.com")
+
+            assert exc_info.value.detail == {
+                "invitation_token": "This invitation is no longer valid."
+            }
+
+    def test_invitation_with_different_email(self):
+        with patch("api.utils.Invitation.objects.using") as mock_using:
+            mock_db = mock_using.return_value
+            mock_db.get.side_effect = Invitation.DoesNotExist
+
+            with pytest.raises(ValidationError) as exc_info:
+                validate_invitation("VALID_TOKEN", "different@example.com")
+
+            assert exc_info.value.detail == {
+                "invitation_token": "Invalid invitation code."
+            }
+            mock_db.get.assert_called_once_with(
+                token="VALID_TOKEN", email="different@example.com"
+            )
diff --git a/api/src/backend/api/tests/test_uuid_utils.py b/api/src/backend/api/tests/test_uuid_utils.py
new file mode 100644
index 0000000000..e202d087f3
--- /dev/null
+++ b/api/src/backend/api/tests/test_uuid_utils.py
@@ -0,0 +1,113 @@
+from datetime import datetime, timezone
+from uuid import uuid4
+
+import pytest
+from dateutil.relativedelta import relativedelta
+from rest_framework_json_api.serializers import ValidationError
+from uuid6 import UUID
+
+from api.uuid_utils import (
+    transform_into_uuid7,
+    datetime_to_uuid7,
+    datetime_from_uuid7,
+    uuid7_start,
+    uuid7_end,
+    uuid7_range,
+)
+
+
+def test_transform_into_uuid7_valid():
+    uuid_v7 = datetime_to_uuid7(datetime.now(timezone.utc))
+    transformed_uuid = transform_into_uuid7(uuid_v7)
+    assert transformed_uuid == UUID(hex=uuid_v7.hex.upper())
+    assert transformed_uuid.version == 7
+
+
+def test_transform_into_uuid7_invalid_version():
+    uuid_v4 = uuid4()
+    with pytest.raises(ValidationError) as exc_info:
+        transform_into_uuid7(UUID(str(uuid_v4)))
+    assert str(exc_info.value.detail[0]) == "Invalid UUIDv7 value."
+
+
+@pytest.mark.parametrize(
+    "input_datetime",
+    [
+        datetime(2024, 9, 11, 7, 20, 27, tzinfo=timezone.utc),
+        datetime(2023, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
+    ],
+)
+def test_datetime_to_uuid7(input_datetime):
+    uuid7 = datetime_to_uuid7(input_datetime)
+    assert isinstance(uuid7, UUID)
+    assert uuid7.version == 7
+    expected_timestamp_ms = int(input_datetime.timestamp() * 1000) & 0xFFFFFFFFFFFF
+    assert uuid7.time == expected_timestamp_ms
+
+
+@pytest.mark.parametrize(
+    "input_datetime",
+    [
+        datetime(2024, 9, 11, 7, 20, 27, tzinfo=timezone.utc),
+        datetime(2023, 1, 1, 0, 0, 0, tzinfo=timezone.utc),
+    ],
+)
+def test_datetime_from_uuid7(input_datetime):
+    uuid7 = datetime_to_uuid7(input_datetime)
+    extracted_datetime = datetime_from_uuid7(uuid7)
+    assert extracted_datetime == input_datetime
+
+
+def test_datetime_from_uuid7_invalid():
+    uuid_v4 = uuid4()
+    with pytest.raises(ValueError):
+        datetime_from_uuid7(UUID(str(uuid_v4)))
+
+
+def test_uuid7_start():
+    dt = datetime.now(timezone.utc)
+    uuid = datetime_to_uuid7(dt)
+    start_uuid = uuid7_start(uuid)
+    expected_dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
+    expected_timestamp_ms = int(expected_dt.timestamp() * 1000) & 0xFFFFFFFFFFFF
+    assert start_uuid.time == expected_timestamp_ms
+    assert start_uuid.version == 7
+
+
+@pytest.mark.parametrize("months_offset", [0, 1, 10, 30, 60])
+def test_uuid7_end(months_offset):
+    dt = datetime.now(timezone.utc)
+    uuid = datetime_to_uuid7(dt)
+    end_uuid = uuid7_end(uuid, months_offset)
+    expected_dt = dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
+    expected_dt += relativedelta(months=months_offset, microseconds=-1)
+    expected_timestamp_ms = int(expected_dt.timestamp() * 1000) & 0xFFFFFFFFFFFF
+    assert end_uuid.time == expected_timestamp_ms
+    assert end_uuid.version == 7
+
+
+def test_uuid7_range():
+    dt_now = datetime.now(timezone.utc)
+    uuid_list = [
+        datetime_to_uuid7(dt_now),
+        datetime_to_uuid7(dt_now.replace(year=2023)),
+        datetime_to_uuid7(dt_now.replace(year=2024)),
+        datetime_to_uuid7(dt_now.replace(year=2025)),
+    ]
+    start_uuid, end_uuid = uuid7_range(uuid_list)
+
+    # Expected start of range
+    start_dt = datetime_from_uuid7(min(uuid_list, key=lambda u: u.time))
+    start_dt = start_dt.replace(hour=0, minute=0, second=0, microsecond=0)
+    expected_start_timestamp_ms = int(start_dt.timestamp() * 1000) & 0xFFFFFFFFFFFF
+
+    # Expected end of range
+    end_dt = datetime_from_uuid7(max(uuid_list, key=lambda u: u.time))
+    end_dt = end_dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
+    end_dt += relativedelta(months=1, microseconds=-1)
+    expected_end_timestamp_ms = int(end_dt.timestamp() * 1000) & 0xFFFFFFFFFFFF
+
+    assert start_uuid.time == expected_start_timestamp_ms
+    assert end_uuid.time == expected_end_timestamp_ms
+    assert start_uuid.version == 7
+    assert end_uuid.version == 7
diff --git a/api/src/backend/api/tests/test_views.py b/api/src/backend/api/tests/test_views.py
new file mode 100644
index 0000000000..79589968bd
--- /dev/null
+++ b/api/src/backend/api/tests/test_views.py
@@ -0,0 +1,3264 @@
+import json
+from datetime import datetime
+from datetime import timezone, timedelta
+from unittest.mock import ANY, Mock, patch
+
+import jwt
+import pytest
+from django.urls import reverse
+from rest_framework import status
+
+from api.models import (
+    User,
+    Membership,
+    Provider,
+    ProviderGroup,
+    ProviderGroupMembership,
+    Scan,
+    ProviderSecret,
+    Invitation,
+)
+from api.rls import Tenant
+from conftest import (
+    API_JSON_CONTENT_TYPE,
+    TEST_PASSWORD,
+    TEST_USER,
+)
+
+TODAY = str(datetime.today().date())
+
+
+@pytest.mark.django_db
+class TestUserViewSet:
+    def test_users_list(self, authenticated_client, create_test_user):
+        user = create_test_user
+        user.refresh_from_db()
+        response = authenticated_client.get(reverse("user-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 1
+        assert response.json()["data"][0]["attributes"]["email"] == user.email
+        assert response.json()["data"][0]["attributes"]["name"] == user.name
+        assert (
+            response.json()["data"][0]["attributes"]["company_name"]
+            == user.company_name
+        )
+
+    def test_users_retrieve(self, authenticated_client, create_test_user):
+        response = authenticated_client.get(
+            reverse("user-detail", kwargs={"pk": create_test_user.id})
+        )
+        assert response.status_code == status.HTTP_200_OK
+
+    def test_users_me(self, authenticated_client, create_test_user):
+        response = authenticated_client.get(reverse("user-me"))
+        assert response.status_code == status.HTTP_200_OK
+        assert response.json()["data"]["attributes"]["email"] == create_test_user.email
+
+    @patch("api.db_router.MainRouter.admin_db", new="default")
+    def test_users_create(self, client):
+        valid_user_payload = {
+            "name": "test",
+            "password": "newpassword123",
+            "email": "NeWuSeR@example.com",
+        }
+        response = client.post(
+            reverse("user-list"), data=valid_user_payload, format="json"
+        )
+        assert response.status_code == status.HTTP_201_CREATED
+        assert User.objects.filter(email__iexact=valid_user_payload["email"]).exists()
+        assert (
+            response.json()["data"]["attributes"]["email"]
+            == valid_user_payload["email"].lower()
+        )
+
+    @patch("api.db_router.MainRouter.admin_db", new="default")
+    def test_users_create_duplicated_email(self, client):
+        # Create a user
+        self.test_users_create(client)
+
+        # Try to create it again and expect a 400
+        with pytest.raises(AssertionError) as assertion_error:
+            self.test_users_create(client)
+
+        assert "Response status_code=400" in str(assertion_error)
+
+    @pytest.mark.parametrize(
+        "password",
+        [
+            # Fails MinimumLengthValidator (too short)
+            "short",
+            "1234567",
+            # Fails CommonPasswordValidator (common passwords)
+            "password",
+            "12345678",
+            "qwerty",
+            "abc123",
+            # Fails NumericPasswordValidator (entirely numeric)
+            "12345678",
+            "00000000",
+            # Fails multiple validators
+            "password1",  # Common password and too similar to a common password
+            "dev12345",  # Similar to username
+            ("querty12" * 9) + "a",  # Too long, 73 characters
+        ],
+    )
+    def test_users_create_invalid_passwords(self, authenticated_client, password):
+        invalid_user_payload = {
+            "name": "test",
+            "password": password,
+            "email": "thisisafineemail@prowler.com",
+        }
+        response = authenticated_client.post(
+            reverse("user-list"), data=invalid_user_payload, format="json"
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert (
+            response.json()["errors"][0]["source"]["pointer"]
+            == "/data/attributes/password"
+        )
+
+    @pytest.mark.parametrize(
+        "email",
+        [
+            # Same email, validation error
+            "nonexistentemail@prowler.com",
+            # Same email with capital letters, validation error
+            "NonExistentEmail@prowler.com",
+        ],
+    )
+    @patch("api.db_router.MainRouter.admin_db", new="default")
+    def test_users_create_used_email(self, authenticated_client, email):
+        # First user created; no errors should occur
+        user_payload = {
+            "name": "test_email_validator",
+            "password": "newpassword123",
+            "email": "nonexistentemail@prowler.com",
+        }
+        response = authenticated_client.post(
+            reverse("user-list"), data=user_payload, format="json"
+        )
+        assert response.status_code == status.HTTP_201_CREATED
+
+        user_payload = {
+            "name": "test_email_validator",
+            "password": "newpassword123",
+            "email": email,
+        }
+        response = authenticated_client.post(
+            reverse("user-list"), data=user_payload, format="json"
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert (
+            response.json()["errors"][0]["source"]["pointer"]
+            == "/data/attributes/email"
+        )
+        assert (
+            response.json()["errors"][0]["detail"]
+            == "Please check the email address and try again."
+        )
+
+    def test_users_partial_update(self, authenticated_client, create_test_user):
+        new_company_name = "new company test"
+        payload = {
+            "data": {
+                "type": "users",
+                "id": str(create_test_user.id),
+                "attributes": {"company_name": new_company_name},
+            },
+        }
+        response = authenticated_client.patch(
+            reverse("user-detail", kwargs={"pk": create_test_user.id}),
+            data=payload,
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_200_OK
+        create_test_user.refresh_from_db()
+        assert create_test_user.company_name == new_company_name
+
+    def test_users_partial_update_invalid_content_type(
+        self, authenticated_client, create_test_user
+    ):
+        response = authenticated_client.patch(
+            reverse("user-detail", kwargs={"pk": create_test_user.id}), data={}
+        )
+        assert response.status_code == status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
+
+    def test_users_partial_update_invalid_content(
+        self, authenticated_client, create_test_user
+    ):
+        payload = {"email": "newemail@example.com"}
+        response = authenticated_client.patch(
+            reverse("user-detail", kwargs={"pk": create_test_user.id}),
+            data=payload,
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    def test_users_partial_update_invalid_user(
+        self, authenticated_client, create_test_user
+    ):
+        another_user = User.objects.create_user(
+            password="otherpassword", email="other@example.com"
+        )
+        new_email = "new@example.com"
+        payload = {
+            "data": {
+                "type": "users",
+                "id": str(another_user.id),
+                "attributes": {"email": new_email},
+            },
+        }
+        response = authenticated_client.patch(
+            reverse("user-detail", kwargs={"pk": another_user.id}),
+            data=payload,
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+        another_user.refresh_from_db()
+        assert another_user.email != new_email
+
+    @pytest.mark.parametrize(
+        "password",
+        [
+            # Fails MinimumLengthValidator (too short)
+            "short",
+            "1234567",
+            # Fails CommonPasswordValidator (common passwords)
+            "password",
+            "12345678",
+            "qwerty",
+            "abc123",
+            # Fails NumericPasswordValidator (entirely numeric)
+            "12345678",
+            "00000000",
+            # Fails UserAttributeSimilarityValidator (too similar to email)
+            "dev12345",
+            "test@prowler.com",
+        ],
+    )
+    def test_users_partial_update_invalid_password(
+        self, authenticated_client, create_test_user, password
+    ):
+        payload = {
+            "data": {
+                "type": "users",
+                "id": str(create_test_user.id),
+                "attributes": {"password": password},
+            },
+        }
+
+        response = authenticated_client.patch(
+            reverse("user-detail", kwargs={"pk": str(create_test_user.id)}),
+            data=payload,
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert (
+            response.json()["errors"][0]["source"]["pointer"]
+            == "/data/attributes/password"
+        )
+
+    def test_users_destroy(self, authenticated_client, create_test_user):
+        response = authenticated_client.delete(
+            reverse("user-detail", kwargs={"pk": create_test_user.id})
+        )
+        assert response.status_code == status.HTTP_204_NO_CONTENT
+        assert not User.objects.filter(id=create_test_user.id).exists()
+
+    def test_users_destroy_invalid_user(self, authenticated_client, create_test_user):
+        another_user = User.objects.create_user(
+            password="otherpassword", email="other@example.com"
+        )
+        response = authenticated_client.delete(
+            reverse("user-detail", kwargs={"pk": another_user.id})
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+        assert User.objects.filter(id=another_user.id).exists()
+
+    @pytest.mark.parametrize(
+        "attribute_key, attribute_value, error_field",
+        [
+            ("password", "", "password"),
+            ("email", "invalidemail", "email"),
+        ],
+    )
+    def test_users_create_invalid_fields(
+        self, client, attribute_key, attribute_value, error_field
+    ):
+        invalid_payload = {
+            "name": "test",
+            "password": "testpassword",
+            "email": "test@example.com",
+        }
+        invalid_payload[attribute_key] = attribute_value
+        response = client.post(
+            reverse("user-list"), data=invalid_payload, format="json"
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert error_field in response.json()["errors"][0]["source"]["pointer"]
+
+
+@pytest.mark.django_db
+class TestTenantViewSet:
+    @pytest.fixture
+    def valid_tenant_payload(self):
+        return {
+            "name": "Tenant Three",
+            "inserted_at": "2023-01-05",
+            "updated_at": "2023-01-06",
+        }
+
+    @pytest.fixture
+    def invalid_tenant_payload(self):
+        return {
+            "name": "",
+            "inserted_at": "2023-01-05",
+            "updated_at": "2023-01-06",
+        }
+
+    @pytest.fixture
+    def extra_users(self, tenants_fixture):
+        _, tenant2, _ = tenants_fixture
+        user2 = User.objects.create_user(
+            name="testing2",
+            password=TEST_PASSWORD,
+            email="testing2@gmail.com",
+        )
+        user3 = User.objects.create_user(
+            name="testing3",
+            password=TEST_PASSWORD,
+            email="testing3@gmail.com",
+        )
+        membership2 = Membership.objects.create(
+            user=user2,
+            tenant=tenant2,
+            role=Membership.RoleChoices.OWNER,
+        )
+        membership3 = Membership.objects.create(
+            user=user3,
+            tenant=tenant2,
+            role=Membership.RoleChoices.MEMBER,
+        )
+        return (user2, membership2), (user3, membership3)
+
+    def test_tenants_list(self, authenticated_client, tenants_fixture):
+        response = authenticated_client.get(reverse("tenant-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == len(tenants_fixture)
+
+    def test_tenants_retrieve(self, authenticated_client, tenants_fixture):
+        tenant1, *_ = tenants_fixture
+        response = authenticated_client.get(
+            reverse("tenant-detail", kwargs={"pk": tenant1.id})
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert response.json()["data"]["attributes"]["name"] == tenant1.name
+
+    def test_tenants_invalid_retrieve(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("tenant-detail", kwargs={"pk": "random_id"})
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    def test_tenants_create(self, authenticated_client, valid_tenant_payload):
+        response = authenticated_client.post(
+            reverse("tenant-list"), data=valid_tenant_payload, format="json"
+        )
+        assert response.status_code == status.HTTP_201_CREATED
+        # Two tenants from the fixture + the new one
+        assert Tenant.objects.count() == 4
+        assert (
+            response.json()["data"]["attributes"]["name"]
+            == valid_tenant_payload["name"]
+        )
+
+    def test_tenants_invalid_create(self, authenticated_client, invalid_tenant_payload):
+        response = authenticated_client.post(
+            reverse("tenant-list"),
+            data=invalid_tenant_payload,
+            format="json",
+            content_type=API_JSON_CONTENT_TYPE,
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    def test_tenants_partial_update(self, authenticated_client, tenants_fixture):
+        tenant1, *_ = tenants_fixture
+        new_name = "This is the new name"
+        payload = {
+            "data": {
+                "type": "tenants",
+                "id": tenant1.id,
+                "attributes": {"name": new_name},
+            },
+        }
+        response = authenticated_client.patch(
+            reverse("tenant-detail", kwargs={"pk": tenant1.id}),
+            data=payload,
+            content_type=API_JSON_CONTENT_TYPE,
+        )
+        assert response.status_code == status.HTTP_200_OK
+        tenant1.refresh_from_db()
+        assert tenant1.name == new_name
+
+    def test_tenants_partial_update_invalid_content_type(
+        self, authenticated_client, tenants_fixture
+    ):
+        tenant1, *_ = tenants_fixture
+        response = authenticated_client.patch(
+            reverse("tenant-detail", kwargs={"pk": tenant1.id}), data={}
+        )
+        assert response.status_code == status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
+
+    def test_tenants_partial_update_invalid_content(
+        self, authenticated_client, tenants_fixture
+    ):
+        tenant1, *_ = tenants_fixture
+        new_name = "This is the new name"
+        payload = {"name": new_name}
+        response = authenticated_client.patch(
+            reverse("tenant-detail", kwargs={"pk": tenant1.id}),
+            data=payload,
+            content_type=API_JSON_CONTENT_TYPE,
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    def test_tenants_delete(self, authenticated_client, tenants_fixture):
+        tenant1, *_ = tenants_fixture
+        response = authenticated_client.delete(
+            reverse("tenant-detail", kwargs={"pk": tenant1.id})
+        )
+        assert response.status_code == status.HTTP_204_NO_CONTENT
+        assert Tenant.objects.count() == len(tenants_fixture) - 1
+
+    def test_tenants_delete_invalid(self, authenticated_client):
+        response = authenticated_client.delete(
+            reverse("tenant-detail", kwargs={"pk": "random_id"})
+        )
+        # To change if we implement RBAC
+        # (user might not have permissions to see if the tenant exists or not -> 200 empty)
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    def test_tenants_list_filter_search(self, authenticated_client, tenants_fixture):
+        """Search is applied to tenants_fixture  name."""
+        tenant1, *_ = tenants_fixture
+        response = authenticated_client.get(
+            reverse("tenant-list"), {"filter[search]": tenant1.name}
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 1
+        assert response.json()["data"][0]["attributes"]["name"] == tenant1.name
+
+    def test_tenants_list_query_param_name(self, authenticated_client, tenants_fixture):
+        tenant1, *_ = tenants_fixture
+        response = authenticated_client.get(
+            reverse("tenant-list"), {"name": tenant1.name}
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    def test_tenants_list_invalid_query_param(self, authenticated_client):
+        response = authenticated_client.get(reverse("tenant-list"), {"random": "value"})
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    @pytest.mark.parametrize(
+        "filter_name, filter_value, expected_count",
+        (
+            [
+                ("name", "Tenant One", 1),
+                ("name.icontains", "Tenant", 3),
+                ("inserted_at", TODAY, 3),
+                ("inserted_at.gte", "2024-01-01", 3),
+                ("inserted_at.lte", "2024-01-01", 0),
+                ("updated_at.gte", "2024-01-01", 3),
+                ("updated_at.lte", "2024-01-01", 0),
+            ]
+        ),
+    )
+    def test_tenants_filters(
+        self,
+        authenticated_client,
+        tenants_fixture,
+        filter_name,
+        filter_value,
+        expected_count,
+    ):
+        response = authenticated_client.get(
+            reverse("tenant-list"),
+            {f"filter[{filter_name}]": filter_value},
+        )
+
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == expected_count
+
+    def test_tenants_list_filter_invalid(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("tenant-list"), {"filter[invalid]": "whatever"}
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    def test_tenants_list_page_size(self, authenticated_client, tenants_fixture):
+        page_size = 1
+
+        response = authenticated_client.get(
+            reverse("tenant-list"), {"page[size]": page_size}
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == page_size
+        assert response.json()["meta"]["pagination"]["page"] == 1
+        assert response.json()["meta"]["pagination"]["pages"] == len(tenants_fixture)
+
+    def test_tenants_list_page_number(self, authenticated_client, tenants_fixture):
+        page_size = 1
+        page_number = 2
+
+        response = authenticated_client.get(
+            reverse("tenant-list"),
+            {"page[size]": page_size, "page[number]": page_number},
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == page_size
+        assert response.json()["meta"]["pagination"]["page"] == page_number
+        assert response.json()["meta"]["pagination"]["pages"] == len(tenants_fixture)
+
+    def test_tenants_list_sort_name(self, authenticated_client, tenants_fixture):
+        _, tenant2, _ = tenants_fixture
+        response = authenticated_client.get(reverse("tenant-list"), {"sort": "-name"})
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 3
+        assert response.json()["data"][0]["attributes"]["name"] == tenant2.name
+
+    def test_tenants_list_memberships_as_owner(
+        self, authenticated_client, tenants_fixture, extra_users
+    ):
+        _, tenant2, _ = tenants_fixture
+        response = authenticated_client.get(
+            reverse("tenant-membership-list", kwargs={"tenant_pk": tenant2.id})
+        )
+        assert response.status_code == status.HTTP_200_OK
+        # Test user + 2 extra users for tenant 2
+        assert len(response.json()["data"]) == 3
+
+    def test_tenants_list_memberships_as_member(
+        self, authenticated_client, tenants_fixture, extra_users
+    ):
+        _, tenant2, _ = tenants_fixture
+        _, user3_membership = extra_users
+        user3, membership3 = user3_membership
+        token_response = authenticated_client.post(
+            reverse("token-obtain"),
+            data={"email": user3.email, "password": TEST_PASSWORD},
+            format="json",
+        )
+        access_token = token_response.json()["data"]["attributes"]["access"]
+
+        response = authenticated_client.get(
+            reverse("tenant-membership-list", kwargs={"tenant_pk": tenant2.id}),
+            headers={"Authorization": f"Bearer {access_token}"},
+        )
+        assert response.status_code == status.HTTP_200_OK
+        # User is a member and can only see its own membership
+        assert len(response.json()["data"]) == 1
+        assert response.json()["data"][0]["id"] == str(membership3.id)
+
+    def test_tenants_delete_own_membership_as_member(
+        self, authenticated_client, tenants_fixture, extra_users
+    ):
+        tenant1, *_ = tenants_fixture
+        membership = Membership.objects.get(tenant=tenant1, user__email=TEST_USER)
+
+        response = authenticated_client.delete(
+            reverse(
+                "tenant-membership-detail",
+                kwargs={"tenant_pk": tenant1.id, "pk": membership.id},
+            )
+        )
+        assert response.status_code == status.HTTP_204_NO_CONTENT
+        assert not Membership.objects.filter(id=membership.id).exists()
+
+    def test_tenants_delete_own_membership_as_owner(
+        self, authenticated_client, tenants_fixture, extra_users
+    ):
+        # With extra_users, tenant2 has 2 owners
+        _, tenant2, _ = tenants_fixture
+        user_membership = Membership.objects.get(tenant=tenant2, user__email=TEST_USER)
+        response = authenticated_client.delete(
+            reverse(
+                "tenant-membership-detail",
+                kwargs={"tenant_pk": tenant2.id, "pk": user_membership.id},
+            )
+        )
+        assert response.status_code == status.HTTP_204_NO_CONTENT
+        assert not Membership.objects.filter(id=user_membership.id).exists()
+
+    def test_tenants_delete_own_membership_as_last_owner(
+        self, authenticated_client, tenants_fixture
+    ):
+        _, tenant2, _ = tenants_fixture
+        user_membership = Membership.objects.get(tenant=tenant2, user__email=TEST_USER)
+        response = authenticated_client.delete(
+            reverse(
+                "tenant-membership-detail",
+                kwargs={"tenant_pk": tenant2.id, "pk": user_membership.id},
+            )
+        )
+        assert response.status_code == status.HTTP_403_FORBIDDEN
+        assert Membership.objects.filter(id=user_membership.id).exists()
+
+    def test_tenants_delete_another_membership_as_owner(
+        self, authenticated_client, tenants_fixture, extra_users
+    ):
+        _, tenant2, _ = tenants_fixture
+        _, user3_membership = extra_users
+        user3, membership3 = user3_membership
+
+        response = authenticated_client.delete(
+            reverse(
+                "tenant-membership-detail",
+                kwargs={"tenant_pk": tenant2.id, "pk": membership3.id},
+            )
+        )
+        assert response.status_code == status.HTTP_204_NO_CONTENT
+        assert not Membership.objects.filter(id=membership3.id).exists()
+
+    def test_tenants_delete_another_membership_as_member(
+        self, authenticated_client, tenants_fixture, extra_users
+    ):
+        _, tenant2, _ = tenants_fixture
+        _, user3_membership = extra_users
+        user3, membership3 = user3_membership
+
+        # Downgrade membership role manually
+        user_membership = Membership.objects.get(tenant=tenant2, user__email=TEST_USER)
+        user_membership.role = Membership.RoleChoices.MEMBER
+        user_membership.save()
+
+        response = authenticated_client.delete(
+            reverse(
+                "tenant-membership-detail",
+                kwargs={"tenant_pk": tenant2.id, "pk": membership3.id},
+            )
+        )
+        assert response.status_code == status.HTTP_403_FORBIDDEN
+        assert Membership.objects.filter(id=membership3.id).exists()
+
+    def test_tenants_list_memberships_not_member_of_tenant(self, authenticated_client):
+        # Create a tenant the user is not a member of
+        tenant4 = Tenant.objects.create(name="Tenant Four")
+
+        response = authenticated_client.get(
+            reverse("tenant-membership-list", kwargs={"tenant_pk": tenant4.id})
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+
+@pytest.mark.django_db
+class TestMembershipViewSet:
+    def test_memberships_list(self, authenticated_client, tenants_fixture):
+        user_id = authenticated_client.user.pk
+        response = authenticated_client.get(
+            reverse("user-membership-list", kwargs={"user_pk": user_id}),
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 2
+
+    def test_memberships_retrieve(self, authenticated_client, tenants_fixture):
+        user_id = authenticated_client.user.pk
+        list_response = authenticated_client.get(
+            reverse("user-membership-list", kwargs={"user_pk": user_id}),
+        )
+        assert list_response.status_code == status.HTTP_200_OK
+        membership = list_response.json()["data"][0]
+
+        response = authenticated_client.get(
+            reverse(
+                "user-membership-detail",
+                kwargs={"user_pk": user_id, "pk": membership["id"]},
+            ),
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert (
+            response.json()["data"]["relationships"]["tenant"]["data"]["id"]
+            == membership["relationships"]["tenant"]["data"]["id"]
+        )
+        assert (
+            response.json()["data"]["relationships"]["user"]["data"]["id"]
+            == membership["relationships"]["user"]["data"]["id"]
+        )
+
+    def test_memberships_invalid_retrieve(self, authenticated_client):
+        user_id = authenticated_client.user.pk
+        response = authenticated_client.get(
+            reverse(
+                "user-membership-detail",
+                kwargs={
+                    "user_pk": user_id,
+                    "pk": "b91c5eff-13f5-469c-9fd8-917b3a3037b6",
+                },
+            ),
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    @pytest.mark.parametrize(
+        "filter_name, filter_value, expected_count",
+        [
+            ("role", "owner", 1),
+            ("role", "member", 1),
+            ("date_joined", TODAY, 2),
+            ("date_joined.gte", "2024-01-01", 2),
+            ("date_joined.lte", "2024-01-01", 0),
+        ],
+    )
+    def test_memberships_filters(
+        self,
+        authenticated_client,
+        tenants_fixture,
+        filter_name,
+        filter_value,
+        expected_count,
+    ):
+        user_id = authenticated_client.user.pk
+        response = authenticated_client.get(
+            reverse("user-membership-list", kwargs={"user_pk": user_id}),
+            {f"filter[{filter_name}]": filter_value},
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == expected_count
+
+    def test_memberships_filters_relationships(
+        self, authenticated_client, tenants_fixture
+    ):
+        user_id = authenticated_client.user.pk
+        tenant, *_ = tenants_fixture
+        # No filter
+        response = authenticated_client.get(
+            reverse("user-membership-list", kwargs={"user_pk": user_id}),
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 2
+
+        # Filter by tenant
+        response = authenticated_client.get(
+            reverse("user-membership-list", kwargs={"user_pk": user_id}),
+            {"filter[tenant]": tenant.id},
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 1
+
+    @pytest.mark.parametrize(
+        "filter_name",
+        [
+            "role",  # Valid filter, invalid value
+            "tenant",  # Valid filter, invalid value
+            "invalid",  # Invalid filter
+        ],
+    )
+    def test_memberships_filters_invalid(
+        self, authenticated_client, tenants_fixture, filter_name
+    ):
+        user_id = authenticated_client.user.pk
+        response = authenticated_client.get(
+            reverse("user-membership-list", kwargs={"user_pk": user_id}),
+            {f"filter[{filter_name}]": "whatever"},
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    @pytest.mark.parametrize(
+        "sort_field",
+        [
+            "tenant",
+            "role",
+            "date_joined",
+        ],
+    )
+    def test_memberships_sort(self, authenticated_client, tenants_fixture, sort_field):
+        user_id = authenticated_client.user.pk
+        response = authenticated_client.get(
+            reverse("user-membership-list", kwargs={"user_pk": user_id}),
+            {"sort": sort_field},
+        )
+        assert response.status_code == status.HTTP_200_OK
+
+    def test_memberships_sort_invalid(self, authenticated_client, tenants_fixture):
+        user_id = authenticated_client.user.pk
+        response = authenticated_client.get(
+            reverse("user-membership-list", kwargs={"user_pk": user_id}),
+            {"sort": "invalid"},
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+class TestProviderViewSet:
+    def test_providers_list(self, authenticated_client, providers_fixture):
+        response = authenticated_client.get(reverse("provider-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == len(providers_fixture)
+
+    def test_providers_retrieve(self, authenticated_client, providers_fixture):
+        provider1, *_ = providers_fixture
+        response = authenticated_client.get(
+            reverse("provider-detail", kwargs={"pk": provider1.id}),
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert response.json()["data"]["attributes"]["provider"] == provider1.provider
+        assert response.json()["data"]["attributes"]["uid"] == provider1.uid
+        assert response.json()["data"]["attributes"]["alias"] == provider1.alias
+
+    def test_providers_invalid_retrieve(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("provider-detail", kwargs={"pk": "random_id"})
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    @pytest.mark.parametrize(
+        "provider_json_payload",
+        (
+            [
+                {"provider": "aws", "uid": "111111111111", "alias": "test"},
+                {"provider": "gcp", "uid": "a12322-test54321", "alias": "test"},
+                {
+                    "provider": "kubernetes",
+                    "uid": "kubernetes-test-123456789",
+                    "alias": "test",
+                },
+                {
+                    "provider": "azure",
+                    "uid": "8851db6b-42e5-4533-aa9e-30a32d67e875",
+                    "alias": "test",
+                },
+            ]
+        ),
+    )
+    def test_providers_create_valid(self, authenticated_client, provider_json_payload):
+        response = authenticated_client.post(
+            reverse("provider-list"), data=provider_json_payload, format="json"
+        )
+        assert response.status_code == status.HTTP_201_CREATED
+        assert Provider.objects.count() == 1
+        assert Provider.objects.get().provider == provider_json_payload["provider"]
+        assert Provider.objects.get().uid == provider_json_payload["uid"]
+        assert Provider.objects.get().alias == provider_json_payload["alias"]
+
+    @pytest.mark.parametrize(
+        "provider_json_payload, error_code, error_pointer",
+        (
+            [
+                (
+                    {"provider": "aws", "uid": "1", "alias": "test"},
+                    "min_length",
+                    "uid",
+                ),
+                (
+                    {
+                        "provider": "aws",
+                        "uid": "1111111111111",
+                        "alias": "test",
+                    },
+                    "aws-uid",
+                    "uid",
+                ),
+                (
+                    {"provider": "aws", "uid": "aaaaaaaaaaaa", "alias": "test"},
+                    "aws-uid",
+                    "uid",
+                ),
+                (
+                    {"provider": "gcp", "uid": "1234asdf", "alias": "test"},
+                    "gcp-uid",
+                    "uid",
+                ),
+                (
+                    {
+                        "provider": "kubernetes",
+                        "uid": "-1234asdf",
+                        "alias": "test",
+                    },
+                    "kubernetes-uid",
+                    "uid",
+                ),
+                (
+                    {
+                        "provider": "azure",
+                        "uid": "8851db6b-42e5-4533-aa9e-30a32d67e87",
+                        "alias": "test",
+                    },
+                    "azure-uid",
+                    "uid",
+                ),
+                (
+                    {
+                        "provider": "does-not-exist",
+                        "uid": "8851db6b-42e5-4533-aa9e-30a32d67e87",
+                        "alias": "test",
+                    },
+                    "invalid_choice",
+                    "provider",
+                ),
+            ]
+        ),
+    )
+    def test_providers_invalid_create(
+        self,
+        authenticated_client,
+        provider_json_payload,
+        error_code,
+        error_pointer,
+    ):
+        response = authenticated_client.post(
+            reverse("provider-list"), data=provider_json_payload, format="json"
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == error_code
+        assert (
+            response.json()["errors"][0]["source"]["pointer"]
+            == f"/data/attributes/{error_pointer}"
+        )
+
+    def test_providers_partial_update(self, authenticated_client, providers_fixture):
+        provider1, *_ = providers_fixture
+        new_alias = "This is the new name"
+        payload = {
+            "data": {
+                "type": "providers",
+                "id": provider1.id,
+                "attributes": {"alias": new_alias},
+            },
+        }
+        response = authenticated_client.patch(
+            reverse("provider-detail", kwargs={"pk": provider1.id}),
+            data=payload,
+            content_type=API_JSON_CONTENT_TYPE,
+        )
+        assert response.status_code == status.HTTP_200_OK
+        provider1.refresh_from_db()
+        assert provider1.alias == new_alias
+
+    def test_providers_partial_update_invalid_content_type(
+        self, authenticated_client, providers_fixture
+    ):
+        provider1, *_ = providers_fixture
+        response = authenticated_client.patch(
+            reverse("provider-detail", kwargs={"pk": provider1.id}),
+            data={},
+        )
+        assert response.status_code == status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
+
+    def test_providers_partial_update_invalid_content(
+        self, authenticated_client, providers_fixture
+    ):
+        provider1, *_ = providers_fixture
+        new_name = "This is the new name"
+        payload = {"alias": new_name}
+        response = authenticated_client.patch(
+            reverse("provider-detail", kwargs={"pk": provider1.id}),
+            data=payload,
+            content_type=API_JSON_CONTENT_TYPE,
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    @pytest.mark.parametrize(
+        "attribute_key, attribute_value",
+        [
+            ("provider", "aws"),
+            ("uid", "123456789012"),
+        ],
+    )
+    def test_providers_partial_update_invalid_fields(
+        self,
+        authenticated_client,
+        providers_fixture,
+        attribute_key,
+        attribute_value,
+    ):
+        provider1, *_ = providers_fixture
+        payload = {
+            "data": {
+                "type": "providers",
+                "id": provider1.id,
+                "attributes": {attribute_key: attribute_value},
+            },
+        }
+        response = authenticated_client.patch(
+            reverse("provider-detail", kwargs={"pk": provider1.id}),
+            data=payload,
+            content_type=API_JSON_CONTENT_TYPE,
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    @patch("api.v1.views.Task.objects.get")
+    @patch("api.v1.views.delete_provider_task.delay")
+    def test_providers_delete(
+        self,
+        mock_delete_task,
+        mock_task_get,
+        authenticated_client,
+        providers_fixture,
+        tasks_fixture,
+    ):
+        prowler_task = tasks_fixture[0]
+        task_mock = Mock()
+        task_mock.id = prowler_task.id
+        mock_delete_task.return_value = task_mock
+        mock_task_get.return_value = prowler_task
+
+        provider1, *_ = providers_fixture
+        response = authenticated_client.delete(
+            reverse("provider-detail", kwargs={"pk": provider1.id})
+        )
+        assert response.status_code == status.HTTP_202_ACCEPTED
+        mock_delete_task.assert_called_once_with(
+            provider_id=str(provider1.id), tenant_id=ANY
+        )
+        assert "Content-Location" in response.headers
+        assert response.headers["Content-Location"] == f"/api/v1/tasks/{task_mock.id}"
+
+    def test_providers_delete_invalid(self, authenticated_client):
+        response = authenticated_client.delete(
+            reverse("provider-detail", kwargs={"pk": "random_id"})
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    @patch("api.v1.views.Task.objects.get")
+    @patch("api.v1.views.check_provider_connection_task.delay")
+    def test_providers_connection(
+        self,
+        mock_provider_connection,
+        mock_task_get,
+        authenticated_client,
+        providers_fixture,
+        tasks_fixture,
+    ):
+        prowler_task = tasks_fixture[0]
+        task_mock = Mock()
+        task_mock.id = prowler_task.id
+        task_mock.status = "PENDING"
+        mock_provider_connection.return_value = task_mock
+        mock_task_get.return_value = prowler_task
+
+        provider1, *_ = providers_fixture
+        assert provider1.connected is None
+        assert provider1.connection_last_checked_at is None
+
+        response = authenticated_client.post(
+            reverse("provider-connection", kwargs={"pk": provider1.id})
+        )
+        assert response.status_code == status.HTTP_202_ACCEPTED
+        mock_provider_connection.assert_called_once_with(
+            provider_id=str(provider1.id), tenant_id=ANY
+        )
+        assert "Content-Location" in response.headers
+        assert response.headers["Content-Location"] == f"/api/v1/tasks/{task_mock.id}"
+
+    def test_providers_connection_invalid_provider(
+        self, authenticated_client, providers_fixture
+    ):
+        response = authenticated_client.post(
+            reverse("provider-connection", kwargs={"pk": "random_id"})
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    @pytest.mark.parametrize(
+        "filter_name, filter_value, expected_count",
+        (
+            [
+                ("provider", "aws", 2),
+                ("provider.in", "azure,gcp", 2),
+                ("uid", "123456789012", 1),
+                ("uid.icontains", "1", 5),
+                ("alias", "aws_testing_1", 1),
+                ("alias.icontains", "aws", 2),
+                ("inserted_at", TODAY, 5),
+                ("inserted_at.gte", "2024-01-01", 5),
+                ("inserted_at.lte", "2024-01-01", 0),
+                ("updated_at.gte", "2024-01-01", 5),
+                ("updated_at.lte", "2024-01-01", 0),
+            ]
+        ),
+    )
+    def test_providers_filters(
+        self,
+        authenticated_client,
+        providers_fixture,
+        filter_name,
+        filter_value,
+        expected_count,
+    ):
+        response = authenticated_client.get(
+            reverse("provider-list"),
+            {f"filter[{filter_name}]": filter_value},
+        )
+
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == expected_count
+
+    @pytest.mark.parametrize(
+        "filter_name",
+        (
+            [
+                "provider",  # Valid filter, invalid value
+                "invalid",
+            ]
+        ),
+    )
+    def test_providers_filters_invalid(self, authenticated_client, filter_name):
+        response = authenticated_client.get(
+            reverse("provider-list"),
+            {f"filter[{filter_name}]": "whatever"},
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    @pytest.mark.parametrize(
+        "sort_field",
+        (
+            [
+                "provider",
+                "uid",
+                "alias",
+                "connected",
+                "inserted_at",
+                "updated_at",
+            ]
+        ),
+    )
+    def test_providers_sort(self, authenticated_client, sort_field):
+        response = authenticated_client.get(
+            reverse("provider-list"), {"sort": sort_field}
+        )
+        assert response.status_code == status.HTTP_200_OK
+
+    def test_providers_sort_invalid(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("provider-list"), {"sort": "invalid"}
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+class TestProviderGroupViewSet:
+    def test_provider_group_list(self, authenticated_client, provider_groups_fixture):
+        response = authenticated_client.get(reverse("providergroup-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == len(provider_groups_fixture)
+
+    def test_provider_group_retrieve(
+        self, authenticated_client, provider_groups_fixture
+    ):
+        provider_group = provider_groups_fixture[0]
+        response = authenticated_client.get(
+            reverse("providergroup-detail", kwargs={"pk": provider_group.id})
+        )
+        assert response.status_code == status.HTTP_200_OK
+        data = response.json()["data"]
+        assert data["id"] == str(provider_group.id)
+        assert data["attributes"]["name"] == provider_group.name
+
+    def test_provider_group_create(self, authenticated_client):
+        data = {
+            "data": {
+                "type": "provider-groups",
+                "attributes": {
+                    "name": "Test Provider Group",
+                },
+            }
+        }
+        response = authenticated_client.post(
+            reverse("providergroup-list"),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_201_CREATED
+        response_data = response.json()["data"]
+        assert response_data["attributes"]["name"] == "Test Provider Group"
+        assert ProviderGroup.objects.filter(name="Test Provider Group").exists()
+
+    def test_provider_group_create_invalid(self, authenticated_client):
+        data = {
+            "data": {
+                "type": "provider-groups",
+                "attributes": {
+                    # Name is missing
+                },
+            }
+        }
+        response = authenticated_client.post(
+            reverse("providergroup-list"),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        errors = response.json()["errors"]
+        assert errors[0]["source"]["pointer"] == "/data/attributes/name"
+
+    def test_provider_group_partial_update(
+        self, authenticated_client, provider_groups_fixture
+    ):
+        provider_group = provider_groups_fixture[1]
+        data = {
+            "data": {
+                "id": str(provider_group.id),
+                "type": "provider-groups",
+                "attributes": {
+                    "name": "Updated Provider Group Name",
+                },
+            }
+        }
+        response = authenticated_client.patch(
+            reverse("providergroup-detail", kwargs={"pk": provider_group.id}),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_200_OK
+        provider_group.refresh_from_db()
+        assert provider_group.name == "Updated Provider Group Name"
+
+    def test_provider_group_partial_update_invalid(
+        self, authenticated_client, provider_groups_fixture
+    ):
+        provider_group = provider_groups_fixture[2]
+        data = {
+            "data": {
+                "id": str(provider_group.id),
+                "type": "provider-groups",
+                "attributes": {
+                    "name": "",  # Invalid name
+                },
+            }
+        }
+        response = authenticated_client.patch(
+            reverse("providergroup-detail", kwargs={"pk": provider_group.id}),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        errors = response.json()["errors"]
+        assert errors[0]["source"]["pointer"] == "/data/attributes/name"
+
+    def test_provider_group_destroy(
+        self, authenticated_client, provider_groups_fixture
+    ):
+        provider_group = provider_groups_fixture[2]
+        response = authenticated_client.delete(
+            reverse("providergroup-detail", kwargs={"pk": provider_group.id})
+        )
+        assert response.status_code == status.HTTP_204_NO_CONTENT
+        assert not ProviderGroup.objects.filter(id=provider_group.id).exists()
+
+    def test_provider_group_destroy_invalid(self, authenticated_client):
+        response = authenticated_client.delete(
+            reverse("providergroup-detail", kwargs={"pk": "non-existent-id"})
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    def test_provider_group_providers_update(
+        self, authenticated_client, provider_groups_fixture, providers_fixture
+    ):
+        provider_group = provider_groups_fixture[0]
+        provider_ids = [str(provider.id) for provider in providers_fixture]
+
+        data = {
+            "data": {
+                "type": "provider-group-memberships",
+                "id": str(provider_group.id),
+                "attributes": {"provider_ids": provider_ids},
+            }
+        }
+
+        response = authenticated_client.put(
+            reverse("providergroup-providers", kwargs={"pk": provider_group.id}),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_200_OK
+        memberships = ProviderGroupMembership.objects.filter(
+            provider_group=provider_group
+        )
+        assert memberships.count() == len(provider_ids)
+        for membership in memberships:
+            assert str(membership.provider_id) in provider_ids
+
+    def test_provider_group_providers_update_non_existent_provider(
+        self, authenticated_client, provider_groups_fixture, providers_fixture
+    ):
+        provider_group = provider_groups_fixture[0]
+        provider_ids = [str(provider.id) for provider in providers_fixture]
+        provider_ids[-1] = "1b59e032-3eb6-4694-93a5-df84cd9b3ce2"
+
+        data = {
+            "data": {
+                "type": "provider-group-memberships",
+                "id": str(provider_group.id),
+                "attributes": {"provider_ids": provider_ids},
+            }
+        }
+
+        response = authenticated_client.put(
+            reverse("providergroup-providers", kwargs={"pk": provider_group.id}),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        errors = response.json()["errors"]
+        assert (
+            errors[0]["detail"]
+            == f"The following provider IDs do not exist: {provider_ids[-1]}"
+        )
+
+    def test_provider_group_providers_update_invalid_provider(
+        self, authenticated_client, provider_groups_fixture
+    ):
+        provider_group = provider_groups_fixture[1]
+        invalid_provider_id = "non-existent-id"
+        data = {
+            "data": {
+                "type": "provider-group-memberships",
+                "id": str(provider_group.id),
+                "attributes": {"provider_ids": [invalid_provider_id]},
+            }
+        }
+
+        response = authenticated_client.put(
+            reverse("providergroup-providers", kwargs={"pk": provider_group.id}),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        errors = response.json()["errors"]
+        assert errors[0]["detail"] == "Must be a valid UUID."
+
+    def test_provider_group_providers_update_invalid_payload(
+        self, authenticated_client, provider_groups_fixture
+    ):
+        provider_group = provider_groups_fixture[2]
+        data = {
+            # Missing "provider_ids"
+        }
+
+        response = authenticated_client.put(
+            reverse("providergroup-providers", kwargs={"pk": provider_group.id}),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        errors = response.json()["errors"]
+        assert errors[0]["detail"] == "Received document does not contain primary data"
+
+    def test_provider_group_retrieve_not_found(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("providergroup-detail", kwargs={"pk": "non-existent-id"})
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    def test_provider_group_list_filters(
+        self, authenticated_client, provider_groups_fixture
+    ):
+        provider_group = provider_groups_fixture[0]
+        response = authenticated_client.get(
+            reverse("providergroup-list"), {"filter[name]": provider_group.name}
+        )
+        assert response.status_code == status.HTTP_200_OK
+        data = response.json()["data"]
+        assert len(data) == 1
+        assert data[0]["attributes"]["name"] == provider_group.name
+
+    def test_provider_group_list_sorting(
+        self, authenticated_client, provider_groups_fixture
+    ):
+        response = authenticated_client.get(
+            reverse("providergroup-list"), {"sort": "name"}
+        )
+        assert response.status_code == status.HTTP_200_OK
+        data = response.json()["data"]
+        names = [item["attributes"]["name"] for item in data]
+        assert names == sorted(names)
+
+    def test_provider_group_invalid_method(self, authenticated_client):
+        response = authenticated_client.put(reverse("providergroup-list"))
+        assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
+
+
+@pytest.mark.django_db
+class TestProviderSecretViewSet:
+    def test_provider_secrets_list(self, authenticated_client, provider_secret_fixture):
+        response = authenticated_client.get(reverse("providersecret-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == len(provider_secret_fixture)
+
+    def test_provider_secrets_retrieve(
+        self, authenticated_client, provider_secret_fixture
+    ):
+        provider_secret1, *_ = provider_secret_fixture
+        response = authenticated_client.get(
+            reverse("providersecret-detail", kwargs={"pk": provider_secret1.id}),
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert response.json()["data"]["attributes"]["name"] == provider_secret1.name
+        assert (
+            response.json()["data"]["attributes"]["secret_type"]
+            == provider_secret1.secret_type
+        )
+
+    def test_provider_secrets_invalid_retrieve(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse(
+                "providersecret-detail",
+                kwargs={"pk": "f498b103-c760-4785-9a3e-e23fafbb7b02"},
+            )
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    @pytest.mark.parametrize(
+        "provider_type, secret_type, secret_data",
+        [
+            # AWS with STATIC secret
+            (
+                Provider.ProviderChoices.AWS.value,
+                ProviderSecret.TypeChoices.STATIC,
+                {
+                    "aws_access_key_id": "value",
+                    "aws_secret_access_key": "value",
+                    "aws_session_token": "value",
+                },
+            ),
+            # AWS with ROLE secret
+            (
+                Provider.ProviderChoices.AWS.value,
+                ProviderSecret.TypeChoices.ROLE,
+                {
+                    "role_arn": "arn:aws:iam::123456789012:role/example-role",
+                    # Optional fields
+                    "external_id": "external-id",
+                    "role_session_name": "session-name",
+                    "session_duration": 3600,
+                    "aws_access_key_id": "value",
+                    "aws_secret_access_key": "value",
+                    "aws_session_token": "value",
+                },
+            ),
+            # Azure with STATIC secret
+            (
+                Provider.ProviderChoices.AZURE.value,
+                ProviderSecret.TypeChoices.STATIC,
+                {
+                    "client_id": "client-id",
+                    "client_secret": "client-secret",
+                    "tenant_id": "tenant-id",
+                },
+            ),
+            # GCP with STATIC secret
+            (
+                Provider.ProviderChoices.GCP.value,
+                ProviderSecret.TypeChoices.STATIC,
+                {
+                    "client_id": "client-id",
+                    "client_secret": "client-secret",
+                    "refresh_token": "refresh-token",
+                },
+            ),
+            # Kubernetes with STATIC secret
+            (
+                Provider.ProviderChoices.KUBERNETES.value,
+                ProviderSecret.TypeChoices.STATIC,
+                {
+                    "kubeconfig_content": "kubeconfig-content",
+                },
+            ),
+        ],
+    )
+    def test_provider_secrets_create_valid(
+        self,
+        authenticated_client,
+        providers_fixture,
+        provider_type,
+        secret_type,
+        secret_data,
+    ):
+        # Get the provider from the fixture and set its type
+        provider = Provider.objects.filter(provider=provider_type)[0]
+
+        data = {
+            "data": {
+                "type": "provider-secrets",
+                "attributes": {
+                    "name": "My Secret",
+                    "secret_type": secret_type,
+                    "secret": secret_data,
+                },
+                "relationships": {
+                    "provider": {"data": {"type": "providers", "id": str(provider.id)}}
+                },
+            }
+        }
+        response = authenticated_client.post(
+            reverse("providersecret-list"),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_201_CREATED
+        assert ProviderSecret.objects.count() == 1
+        provider_secret = ProviderSecret.objects.first()
+        assert provider_secret.name == data["data"]["attributes"]["name"]
+        assert provider_secret.secret_type == data["data"]["attributes"]["secret_type"]
+        assert (
+            str(provider_secret.provider.id)
+            == data["data"]["relationships"]["provider"]["data"]["id"]
+        )
+
+    @pytest.mark.parametrize(
+        "attributes, error_code, error_pointer",
+        (
+            [
+                (
+                    {
+                        "name": "testing",
+                        "secret_type": "static",
+                        "secret": {"invalid": "test"},
+                    },
+                    "required",
+                    "secret/aws_access_key_id",
+                ),
+                (
+                    {
+                        "name": "testing",
+                        "secret_type": "invalid",
+                        "secret": {"invalid": "test"},
+                    },
+                    "invalid_choice",
+                    "secret_type",
+                ),
+                (
+                    {
+                        "name": "a" * 151,
+                        "secret_type": "static",
+                        "secret": {
+                            "aws_access_key_id": "value",
+                            "aws_secret_access_key": "value",
+                            "aws_session_token": "value",
+                        },
+                    },
+                    "max_length",
+                    "name",
+                ),
+            ]
+        ),
+    )
+    def test_provider_secrets_invalid_create(
+        self,
+        providers_fixture,
+        authenticated_client,
+        attributes,
+        error_code,
+        error_pointer,
+    ):
+        provider, *_ = providers_fixture
+        data = {
+            "data": {
+                "type": "provider-secrets",
+                "attributes": attributes,
+                "relationships": {
+                    "provider": {"data": {"type": "providers", "id": str(provider.id)}}
+                },
+            }
+        }
+        response = authenticated_client.post(
+            reverse("providersecret-list"),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == error_code
+        assert (
+            response.json()["errors"][0]["source"]["pointer"]
+            == f"/data/attributes/{error_pointer}"
+        )
+
+    def test_provider_secrets_partial_update(
+        self, authenticated_client, provider_secret_fixture
+    ):
+        provider_secret, *_ = provider_secret_fixture
+        data = {
+            "data": {
+                "type": "provider-secrets",
+                "id": str(provider_secret.id),
+                "attributes": {
+                    "name": "new_name",
+                    "secret": {
+                        "aws_access_key_id": "new_value",
+                        "aws_secret_access_key": "new_value",
+                        "aws_session_token": "new_value",
+                    },
+                },
+                "relationships": {
+                    "provider": {
+                        "data": {
+                            "type": "providers",
+                            "id": str(provider_secret.provider.id),
+                        }
+                    }
+                },
+            }
+        }
+        response = authenticated_client.patch(
+            reverse("providersecret-detail", kwargs={"pk": provider_secret.id}),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_200_OK
+        provider_secret.refresh_from_db()
+        assert provider_secret.name == "new_name"
+        for value in provider_secret.secret.values():
+            assert value == "new_value"
+
+    def test_provider_secrets_partial_update_invalid_content_type(
+        self, authenticated_client, provider_secret_fixture
+    ):
+        provider_secret, *_ = provider_secret_fixture
+        response = authenticated_client.patch(
+            reverse("providersecret-detail", kwargs={"pk": provider_secret.id}),
+            data={},
+        )
+        assert response.status_code == status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
+
+    def test_provider_secrets_partial_update_invalid_content(
+        self, authenticated_client, provider_secret_fixture
+    ):
+        provider_secret, *_ = provider_secret_fixture
+        data = {
+            "data": {
+                "type": "provider-secrets",
+                "id": str(provider_secret.id),
+                "attributes": {"invalid_secret": "value"},
+                "relationships": {
+                    "provider": {
+                        "data": {
+                            "type": "providers",
+                            "id": str(provider_secret.provider.id),
+                        }
+                    }
+                },
+            }
+        }
+        response = authenticated_client.patch(
+            reverse("providersecret-detail", kwargs={"pk": provider_secret.id}),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    def test_provider_secrets_delete(
+        self,
+        authenticated_client,
+        provider_secret_fixture,
+    ):
+        provider_secret, *_ = provider_secret_fixture
+        response = authenticated_client.delete(
+            reverse("providersecret-detail", kwargs={"pk": provider_secret.id})
+        )
+        assert response.status_code == status.HTTP_204_NO_CONTENT
+
+    def test_provider_secrets_delete_invalid(self, authenticated_client):
+        response = authenticated_client.delete(
+            reverse(
+                "providersecret-detail",
+                kwargs={"pk": "e67d0283-440f-48d1-b5f8-38d0763474f4"},
+            )
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    @pytest.mark.parametrize(
+        "filter_name, filter_value, expected_count",
+        (
+            [
+                ("name", "aws_testing_1", 1),
+                ("name.icontains", "aws", 2),
+            ]
+        ),
+    )
+    def test_provider_secrets_filters(
+        self,
+        authenticated_client,
+        provider_secret_fixture,
+        filter_name,
+        filter_value,
+        expected_count,
+    ):
+        response = authenticated_client.get(
+            reverse("providersecret-list"),
+            {f"filter[{filter_name}]": filter_value},
+        )
+
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == expected_count
+
+    @pytest.mark.parametrize(
+        "filter_name",
+        (
+            [
+                "invalid",
+            ]
+        ),
+    )
+    def test_provider_secrets_filters_invalid(self, authenticated_client, filter_name):
+        response = authenticated_client.get(
+            reverse("providersecret-list"),
+            {f"filter[{filter_name}]": "whatever"},
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    @pytest.mark.parametrize(
+        "sort_field",
+        (
+            [
+                "name",
+                "inserted_at",
+                "updated_at",
+            ]
+        ),
+    )
+    def test_provider_secrets_sort(self, authenticated_client, sort_field):
+        response = authenticated_client.get(
+            reverse("providersecret-list"), {"sort": sort_field}
+        )
+        assert response.status_code == status.HTTP_200_OK
+
+    def test_provider_secrets_sort_invalid(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("providersecret-list"), {"sort": "invalid"}
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+class TestScanViewSet:
+    def test_scans_list(self, authenticated_client, scans_fixture):
+        response = authenticated_client.get(reverse("scan-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == len(scans_fixture)
+
+    def test_scans_retrieve(self, authenticated_client, scans_fixture):
+        scan1, *_ = scans_fixture
+        response = authenticated_client.get(
+            reverse("scan-detail", kwargs={"pk": scan1.id})
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert response.json()["data"]["attributes"]["name"] == scan1.name
+        assert response.json()["data"]["relationships"]["provider"]["data"][
+            "id"
+        ] == str(scan1.provider.id)
+
+    def test_scans_invalid_retrieve(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("scan-detail", kwargs={"pk": "random_id"})
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    @pytest.mark.parametrize(
+        "scan_json_payload, expected_scanner_args",
+        [
+            # Case 1: No scanner_args in payload (should use provider's scanner_args)
+            (
+                {
+                    "data": {
+                        "type": "scans",
+                        "attributes": {
+                            "name": "New Scan",
+                        },
+                        "relationships": {
+                            "provider": {
+                                "data": {"type": "providers", "id": "provider-id-1"}
+                            }
+                        },
+                    }
+                },
+                {"key1": "value1", "key2": {"key21": "value21"}},
+            ),
+            (
+                {
+                    "data": {
+                        "type": "scans",
+                        "attributes": {
+                            "name": "New Scan",
+                            "scanner_args": {
+                                "key2": {"key21": "test21"},
+                                "key3": "test3",
+                            },
+                        },
+                        "relationships": {
+                            "provider": {
+                                "data": {"type": "providers", "id": "provider-id-1"}
+                            }
+                        },
+                    }
+                },
+                {"key1": "value1", "key2": {"key21": "test21"}, "key3": "test3"},
+            ),
+        ],
+    )
+    @patch("api.v1.views.Task.objects.get")
+    @patch("api.v1.views.perform_scan_task.delay")
+    def test_scans_create_valid(
+        self,
+        mock_perform_scan_task,
+        mock_task_get,
+        authenticated_client,
+        scan_json_payload,
+        expected_scanner_args,
+        providers_fixture,
+        tasks_fixture,
+    ):
+        prowler_task = tasks_fixture[0]
+        mock_perform_scan_task.return_value.id = prowler_task.id
+        mock_task_get.return_value = prowler_task
+        *_, provider5 = providers_fixture
+        # Provider5 has these scanner_args
+        # scanner_args={"key1": "value1", "key2": {"key21": "value21"}}
+
+        # scanner_args will be disabled in the first release
+        scan_json_payload["data"]["attributes"].pop("scanner_args", None)
+
+        scan_json_payload["data"]["relationships"]["provider"]["data"]["id"] = str(
+            provider5.id
+        )
+
+        response = authenticated_client.post(
+            reverse("scan-list"),
+            data=scan_json_payload,
+            content_type=API_JSON_CONTENT_TYPE,
+        )
+
+        assert response.status_code == status.HTTP_202_ACCEPTED
+        assert Scan.objects.count() == 1
+
+        scan = Scan.objects.get()
+        assert scan.name == scan_json_payload["data"]["attributes"]["name"]
+        assert scan.provider == provider5
+        assert scan.trigger == Scan.TriggerChoices.MANUAL
+        # assert scan.scanner_args == expected_scanner_args
+
+    @pytest.mark.parametrize(
+        "scan_json_payload, error_code",
+        [
+            (
+                {
+                    "data": {
+                        "type": "scans",
+                        "attributes": {
+                            "name": "a",
+                            "trigger": Scan.TriggerChoices.MANUAL,
+                        },
+                        "relationships": {
+                            "provider": {
+                                "data": {"type": "providers", "id": "provider-id-1"}
+                            }
+                        },
+                    }
+                },
+                "min_length",
+            ),
+        ],
+    )
+    def test_scans_invalid_create(
+        self,
+        authenticated_client,
+        scan_json_payload,
+        providers_fixture,
+        error_code,
+    ):
+        provider1, *_ = providers_fixture
+        scan_json_payload["data"]["relationships"]["provider"]["data"]["id"] = str(
+            provider1.id
+        )
+        response = authenticated_client.post(
+            reverse("scan-list"),
+            data=scan_json_payload,
+            content_type=API_JSON_CONTENT_TYPE,
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == error_code
+        assert (
+            response.json()["errors"][0]["source"]["pointer"] == "/data/attributes/name"
+        )
+
+    def test_scans_partial_update(self, authenticated_client, scans_fixture):
+        scan1, *_ = scans_fixture
+        new_name = "Updated Scan Name"
+        payload = {
+            "data": {
+                "type": "scans",
+                "id": scan1.id,
+                "attributes": {"name": new_name},
+            },
+        }
+        response = authenticated_client.patch(
+            reverse("scan-detail", kwargs={"pk": scan1.id}),
+            data=payload,
+            content_type=API_JSON_CONTENT_TYPE,
+        )
+        assert response.status_code == status.HTTP_200_OK
+        scan1.refresh_from_db()
+        assert scan1.name == new_name
+
+    def test_scans_partial_update_invalid_content_type(
+        self, authenticated_client, scans_fixture
+    ):
+        scan1, *_ = scans_fixture
+        response = authenticated_client.patch(
+            reverse("scan-detail", kwargs={"pk": scan1.id}),
+            data={},
+        )
+        assert response.status_code == status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
+
+    def test_scans_partial_update_invalid_content(
+        self, authenticated_client, scans_fixture
+    ):
+        scan1, *_ = scans_fixture
+        new_name = "Updated Scan Name"
+        payload = {"name": new_name}
+        response = authenticated_client.patch(
+            reverse("scan-detail", kwargs={"pk": scan1.id}),
+            data=payload,
+            content_type=API_JSON_CONTENT_TYPE,
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    @pytest.mark.parametrize(
+        "filter_name, filter_value, expected_count",
+        (
+            [
+                ("provider_type", "aws", 3),
+                ("provider_type.in", "gcp,azure", 0),
+                ("provider_uid", "123456789012", 2),
+                ("provider_uid.icontains", "1", 3),
+                ("provider_uid.in", "123456789012,123456789013", 3),
+                ("provider_alias", "aws_testing_1", 2),
+                ("provider_alias.icontains", "aws", 3),
+                ("provider_alias.in", "aws_testing_1,aws_testing_2", 3),
+                ("name", "Scan 1", 1),
+                ("name.icontains", "Scan", 3),
+                ("started_at", "2024-01-02", 3),
+                ("started_at.gte", "2024-01-01", 3),
+                ("started_at.lte", "2024-01-01", 0),
+                ("trigger", Scan.TriggerChoices.MANUAL, 1),
+            ]
+        ),
+    )
+    def test_scans_filters(
+        self,
+        authenticated_client,
+        scans_fixture,
+        filter_name,
+        filter_value,
+        expected_count,
+    ):
+        response = authenticated_client.get(
+            reverse("scan-list"),
+            {f"filter[{filter_name}]": filter_value},
+        )
+
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == expected_count
+
+    @pytest.mark.parametrize(
+        "filter_name",
+        [
+            "provider",  # Valid filter, invalid value
+            "invalid",
+        ],
+    )
+    def test_scans_filters_invalid(self, authenticated_client, filter_name):
+        response = authenticated_client.get(
+            reverse("scan-list"),
+            {f"filter[{filter_name}]": "invalid_value"},
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    def test_scan_filter_by_provider_id_exact(
+        self, authenticated_client, scans_fixture
+    ):
+        response = authenticated_client.get(
+            reverse("scan-list"),
+            {"filter[provider]": scans_fixture[0].provider.id},
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 2
+
+    def test_scan_filter_by_provider_id_in(self, authenticated_client, scans_fixture):
+        response = authenticated_client.get(
+            reverse("scan-list"),
+            {
+                "filter[provider.in]": [
+                    scans_fixture[0].provider.id,
+                    scans_fixture[1].provider.id,
+                ]
+            },
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 2
+
+    @pytest.mark.parametrize(
+        "sort_field",
+        [
+            "name",
+            "trigger",
+            "inserted_at",
+            "updated_at",
+        ],
+    )
+    def test_scans_sort(self, authenticated_client, sort_field):
+        response = authenticated_client.get(reverse("scan-list"), {"sort": sort_field})
+        assert response.status_code == status.HTTP_200_OK
+
+    def test_scans_sort_invalid(self, authenticated_client):
+        response = authenticated_client.get(reverse("scan-list"), {"sort": "invalid"})
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+class TestTaskViewSet:
+    def test_tasks_list(self, authenticated_client, tasks_fixture):
+        response = authenticated_client.get(reverse("task-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == len(tasks_fixture)
+
+    def test_tasks_retrieve(self, authenticated_client, tasks_fixture):
+        task1, *_ = tasks_fixture
+        response = authenticated_client.get(
+            reverse("task-detail", kwargs={"pk": task1.id}),
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert (
+            response.json()["data"]["attributes"]["name"]
+            == task1.task_runner_task.task_name
+        )
+
+    def test_tasks_invalid_retrieve(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("task-detail", kwargs={"pk": "invalid_id"})
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    @patch("api.v1.views.AsyncResult", return_value=Mock())
+    def test_tasks_revoke(self, mock_async_result, authenticated_client, tasks_fixture):
+        _, task2 = tasks_fixture
+        response = authenticated_client.delete(
+            reverse("task-detail", kwargs={"pk": task2.id})
+        )
+        assert response.status_code == status.HTTP_202_ACCEPTED
+        assert response.headers["Content-Location"] == f"/api/v1/tasks/{task2.id}"
+        mock_async_result.return_value.revoke.assert_called_once()
+
+    def test_tasks_invalid_revoke(self, authenticated_client):
+        response = authenticated_client.delete(
+            reverse("task-detail", kwargs={"pk": "invalid_id"})
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    def test_tasks_revoke_invalid_status(self, authenticated_client, tasks_fixture):
+        task1, _ = tasks_fixture
+        response = authenticated_client.delete(
+            reverse("task-detail", kwargs={"pk": task1.id})
+        )
+        # Task status is SUCCESS
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+class TestResourceViewSet:
+    def test_resources_list_none(self, authenticated_client):
+        response = authenticated_client.get(reverse("resource-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 0
+
+    def test_resources_list(self, authenticated_client, resources_fixture):
+        response = authenticated_client.get(reverse("resource-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == len(resources_fixture)
+
+    @pytest.mark.parametrize(
+        "include_values, expected_resources",
+        [
+            ("provider", ["providers"]),
+            ("findings", ["findings"]),
+            ("provider,findings", ["providers", "findings"]),
+        ],
+    )
+    def test_resources_list_include(
+        self,
+        include_values,
+        expected_resources,
+        authenticated_client,
+        resources_fixture,
+        findings_fixture,
+    ):
+        response = authenticated_client.get(
+            reverse("resource-list"), {"include": include_values}
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == len(resources_fixture)
+        assert "included" in response.json()
+
+        included_data = response.json()["included"]
+        for expected_type in expected_resources:
+            assert any(
+                d.get("type") == expected_type for d in included_data
+            ), f"Expected type '{expected_type}' not found in included data"
+
+    @pytest.mark.parametrize(
+        "filter_name, filter_value, expected_count",
+        (
+            [
+                (
+                    "uid",
+                    "arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef0",
+                    1,
+                ),
+                ("uid.icontains", "i-1234567890abcdef", 3),
+                ("name", "My Instance 2", 1),
+                ("name.icontains", "ce 2", 1),
+                ("region", "eu-west-1", 1),
+                ("region.icontains", "west", 1),
+                ("service", "ec2", 2),
+                ("service.icontains", "ec", 2),
+                ("inserted_at.gte", "2024-01-01 00:00:00", 3),
+                ("updated_at.lte", "2024-01-01 00:00:00", 0),
+                ("type.icontains", "prowler", 2),
+                # provider filters
+                ("provider_type", "aws", 3),
+                ("provider_type.in", "azure,gcp", 0),
+                ("provider_uid", "123456789012", 2),
+                ("provider_uid.in", "123456789012", 2),
+                ("provider_uid.in", "123456789012,123456789012", 2),
+                ("provider_uid.icontains", "1", 3),
+                ("provider_alias", "aws_testing_1", 2),
+                ("provider_alias.icontains", "aws", 3),
+                # tags searching
+                ("tag", "key3:value:value", 0),
+                ("tag_key", "key3", 1),
+                ("tag_value", "value2", 2),
+                ("tag", "key3:multi word value3", 1),
+                ("tags", "key3:multi word value3", 1),
+                ("tags", "multi word", 1),
+                # full text search on resource
+                ("search", "arn", 3),
+                ("search", "def1", 1),
+                # full text search on resource tags
+                ("search", "multi word", 1),
+                ("search", "key2", 2),
+            ]
+        ),
+    )
+    def test_resource_filters(
+        self,
+        authenticated_client,
+        resources_fixture,
+        filter_name,
+        filter_value,
+        expected_count,
+    ):
+        response = authenticated_client.get(
+            reverse("resource-list"),
+            {f"filter[{filter_name}]": filter_value},
+        )
+
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == expected_count
+
+    def test_resource_filter_by_provider_id_in(
+        self, authenticated_client, resources_fixture
+    ):
+        response = authenticated_client.get(
+            reverse("resource-list"),
+            {
+                "filter[provider.in]": [
+                    resources_fixture[0].provider.id,
+                    resources_fixture[1].provider.id,
+                ]
+            },
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 2
+
+    @pytest.mark.parametrize(
+        "filter_name",
+        (
+            [
+                "resource",  # Invalid filter name
+                "invalid",
+            ]
+        ),
+    )
+    def test_resources_filters_invalid(self, authenticated_client, filter_name):
+        response = authenticated_client.get(
+            reverse("resource-list"),
+            {f"filter[{filter_name}]": "whatever"},
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    @pytest.mark.parametrize(
+        "sort_field",
+        [
+            "uid",
+            "uid",
+            "name",
+            "region",
+            "service",
+            "type",
+            "inserted_at",
+            "updated_at",
+        ],
+    )
+    def test_resources_sort(self, authenticated_client, sort_field):
+        response = authenticated_client.get(
+            reverse("resource-list"), {"sort": sort_field}
+        )
+        assert response.status_code == status.HTTP_200_OK
+
+    def test_resources_sort_invalid(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("resource-list"), {"sort": "invalid"}
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == "invalid"
+        assert response.json()["errors"][0]["source"]["pointer"] == "/data"
+        assert (
+            response.json()["errors"][0]["detail"] == "invalid sort parameter: invalid"
+        )
+
+    def test_resources_retrieve(self, authenticated_client, resources_fixture):
+        resource_1, *_ = resources_fixture
+        response = authenticated_client.get(
+            reverse("resource-detail", kwargs={"pk": resource_1.id}),
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert response.json()["data"]["attributes"]["uid"] == resource_1.uid
+        assert response.json()["data"]["attributes"]["name"] == resource_1.name
+        assert response.json()["data"]["attributes"]["region"] == resource_1.region
+        assert response.json()["data"]["attributes"]["service"] == resource_1.service
+        assert response.json()["data"]["attributes"]["type"] == resource_1.type
+        assert response.json()["data"]["attributes"]["tags"] == resource_1.get_tags()
+
+    def test_resources_invalid_retrieve(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("resource-detail", kwargs={"pk": "random_id"}),
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+
+@pytest.mark.django_db
+class TestFindingViewSet:
+    def test_findings_list_none(self, authenticated_client):
+        response = authenticated_client.get(reverse("finding-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 0
+
+    def test_findings_list(self, authenticated_client, findings_fixture):
+        response = authenticated_client.get(reverse("finding-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == len(findings_fixture)
+        assert (
+            response.json()["data"][0]["attributes"]["status"]
+            == findings_fixture[0].status
+        )
+
+    @pytest.mark.parametrize(
+        "include_values, expected_resources",
+        [
+            ("resources", ["resources"]),
+            ("scan", ["scans"]),
+            ("resources.provider,scan", ["resources", "scans", "providers"]),
+        ],
+    )
+    def test_findings_list_include(
+        self, include_values, expected_resources, authenticated_client, findings_fixture
+    ):
+        response = authenticated_client.get(
+            reverse("finding-list"), {"include": include_values}
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == len(findings_fixture)
+        assert "included" in response.json()
+
+        included_data = response.json()["included"]
+        for expected_type in expected_resources:
+            assert any(
+                d.get("type") == expected_type for d in included_data
+            ), f"Expected type '{expected_type}' not found in included data"
+
+    @pytest.mark.parametrize(
+        "filter_name, filter_value, expected_count",
+        (
+            [
+                ("delta", "new", 1),
+                ("provider_type", "aws", 2),
+                ("provider_uid", "123456789012", 2),
+                (
+                    "resource_uid",
+                    "arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef0",
+                    1,
+                ),
+                ("resource_uid.icontains", "i-1234567890abcdef", 2),
+                ("resource_name", "My Instance 2", 1),
+                ("resource_name.icontains", "ce 2", 1),
+                ("region", "eu-west-1", 1),
+                ("region.in", "eu-west-1,eu-west-2", 1),
+                ("region.icontains", "east", 1),
+                ("service", "ec2", 1),
+                ("service.in", "ec2,s3", 2),
+                ("service.icontains", "ec", 1),
+                ("inserted_at", "2024-01-01", 0),
+                ("inserted_at.date", "2024-01-01", 0),
+                ("inserted_at.gte", "2024-01-01", 2),
+                ("inserted_at.lte", "2024-12-31", 2),
+                ("updated_at.lte", "2024-01-01", 0),
+                ("resource_type.icontains", "prowler", 2),
+                # full text search on finding
+                ("search", "dev-qa", 1),
+                ("search", "orange juice", 1),
+                # full text search on resource
+                ("search", "ec2", 2),
+                # full text search on finding tags
+                ("search", "value2", 2),
+            ]
+        ),
+    )
+    def test_finding_filters(
+        self,
+        authenticated_client,
+        findings_fixture,
+        filter_name,
+        filter_value,
+        expected_count,
+    ):
+        response = authenticated_client.get(
+            reverse("finding-list"),
+            {f"filter[{filter_name}]": filter_value},
+        )
+
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == expected_count
+
+    def test_finding_filter_by_scan_id(self, authenticated_client, findings_fixture):
+        response = authenticated_client.get(
+            reverse("finding-list"),
+            {
+                "filter[scan]": findings_fixture[0].scan.id,
+            },
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 2
+
+    def test_finding_filter_by_scan_id_in(self, authenticated_client, findings_fixture):
+        response = authenticated_client.get(
+            reverse("finding-list"),
+            {
+                "filter[scan.in]": [
+                    findings_fixture[0].scan.id,
+                    findings_fixture[1].scan.id,
+                ]
+            },
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 2
+
+    def test_finding_filter_by_provider(self, authenticated_client, findings_fixture):
+        response = authenticated_client.get(
+            reverse("finding-list"),
+            {
+                "filter[provider]": findings_fixture[0].scan.provider.id,
+            },
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 2
+
+    def test_finding_filter_by_provider_id_in(
+        self, authenticated_client, findings_fixture
+    ):
+        response = authenticated_client.get(
+            reverse("finding-list"),
+            {
+                "filter[provider.in]": [
+                    findings_fixture[0].scan.provider.id,
+                    findings_fixture[1].scan.provider.id,
+                ]
+            },
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 2
+
+    @pytest.mark.parametrize(
+        "filter_name",
+        (
+            [
+                "finding",  # Invalid filter name
+                "invalid",
+            ]
+        ),
+    )
+    def test_findings_filters_invalid(self, authenticated_client, filter_name):
+        response = authenticated_client.get(
+            reverse("finding-list"),
+            {f"filter[{filter_name}]": "whatever"},
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    @pytest.mark.parametrize(
+        "sort_field",
+        [
+            "status",
+            "severity",
+            "check_id",
+            "inserted_at",
+            "updated_at",
+        ],
+    )
+    def test_findings_sort(self, authenticated_client, sort_field):
+        response = authenticated_client.get(
+            reverse("finding-list"), {"sort": sort_field}
+        )
+        assert response.status_code == status.HTTP_200_OK
+
+    def test_findings_sort_invalid(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("finding-list"), {"sort": "invalid"}
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == "invalid"
+        assert response.json()["errors"][0]["source"]["pointer"] == "/data"
+        assert (
+            response.json()["errors"][0]["detail"] == "invalid sort parameter: invalid"
+        )
+
+    def test_findings_retrieve(self, authenticated_client, findings_fixture):
+        finding_1, *_ = findings_fixture
+        response = authenticated_client.get(
+            reverse("finding-detail", kwargs={"pk": finding_1.id}),
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert response.json()["data"]["attributes"]["status"] == finding_1.status
+        assert (
+            response.json()["data"]["attributes"]["status_extended"]
+            == finding_1.status_extended
+        )
+        assert response.json()["data"]["attributes"]["severity"] == finding_1.severity
+        assert response.json()["data"]["attributes"]["check_id"] == finding_1.check_id
+
+        assert response.json()["data"]["relationships"]["scan"]["data"]["id"] == str(
+            finding_1.scan.id
+        )
+
+        assert response.json()["data"]["relationships"]["resources"]["data"][0][
+            "id"
+        ] == str(finding_1.resources.first().id)
+
+    def test_findings_invalid_retrieve(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("finding-detail", kwargs={"pk": "random_id"}),
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+
+@pytest.mark.django_db
+class TestJWTFields:
+    def test_jwt_fields(self, authenticated_client, create_test_user):
+        data = {"type": "tokens", "email": TEST_USER, "password": TEST_PASSWORD}
+        response = authenticated_client.post(
+            reverse("token-obtain"), data, format="json"
+        )
+
+        assert (
+            response.status_code == status.HTTP_200_OK
+        ), f"Unexpected status code: {response.status_code}"
+
+        access_token = response.data["attributes"]["access"]
+        payload = jwt.decode(access_token, options={"verify_signature": False})
+
+        expected_fields = {
+            "typ": "access",
+            "aud": "https://api.prowler.com",
+            "iss": "https://api.prowler.com",
+        }
+
+        # Verify expected fields
+        for field in expected_fields:
+            assert field in payload, f"The field '{field}' is not in the JWT"
+            assert (
+                payload[field] == expected_fields[field]
+            ), f"The value of '{field}' does not match"
+
+        # Verify time fields are integers
+        for time_field in ["exp", "iat", "nbf"]:
+            assert time_field in payload, f"The field '{time_field}' is not in the JWT"
+            assert isinstance(
+                payload[time_field], int
+            ), f"The field '{time_field}' is not an integer"
+
+        # Verify identification fields are non-empty strings
+        for id_field in ["jti", "sub", "tenant_id"]:
+            assert id_field in payload, f"The field '{id_field}' is not in the JWT"
+            assert (
+                isinstance(payload[id_field], str) and payload[id_field]
+            ), f"The field '{id_field}' is not a valid string"
+
+
+@pytest.mark.django_db
+class TestInvitationViewSet:
+    TOMORROW = datetime.now(timezone.utc) + timedelta(days=1, hours=1)
+    TOMORROW_ISO = TOMORROW.isoformat()
+
+    def test_invitations_list(self, authenticated_client, invitations_fixture):
+        response = authenticated_client.get(reverse("invitation-list"))
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == len(invitations_fixture)
+
+    def test_invitations_retrieve(self, authenticated_client, invitations_fixture):
+        invitation1, _ = invitations_fixture
+        response = authenticated_client.get(
+            reverse(
+                "invitation-detail",
+                kwargs={"pk": invitation1.id},
+            ),
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert response.json()["data"]["attributes"]["email"] == invitation1.email
+        assert response.json()["data"]["attributes"]["state"] == invitation1.state
+        assert response.json()["data"]["attributes"]["token"] == invitation1.token
+        assert response.json()["data"]["relationships"]["inviter"]["data"]["id"] == str(
+            invitation1.inviter.id
+        )
+
+    def test_invitations_invalid_retrieve(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse(
+                "invitation-detail",
+                kwargs={
+                    "pk": "f498b103-c760-4785-9a3e-e23fafbb7b02",
+                },
+            ),
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    def test_invitations_create_valid(self, authenticated_client, create_test_user):
+        user = create_test_user
+        data = {
+            "data": {
+                "type": "invitations",
+                "attributes": {
+                    "email": "any_email@prowler.com",
+                    "expires_at": self.TOMORROW_ISO,
+                },
+            }
+        }
+        response = authenticated_client.post(
+            reverse("invitation-list"),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_201_CREATED
+        assert Invitation.objects.count() == 1
+        assert (
+            response.json()["data"]["attributes"]["email"]
+            == data["data"]["attributes"]["email"]
+        )
+        assert response.json()["data"]["attributes"]["expires_at"] == data["data"][
+            "attributes"
+        ]["expires_at"].replace("+00:00", "Z")
+        assert (
+            response.json()["data"]["attributes"]["state"]
+            == Invitation.State.PENDING.value
+        )
+        assert response.json()["data"]["relationships"]["inviter"]["data"]["id"] == str(
+            user.id
+        )
+
+    @pytest.mark.parametrize(
+        "email",
+        [
+            "invalid_email",
+            "invalid_email@",
+            # There is a pending invitation with this email
+            "testing@prowler.com",
+            # User is already a member of the tenant
+            TEST_USER,
+        ],
+    )
+    def test_invitations_create_invalid_email(
+        self, email, authenticated_client, invitations_fixture
+    ):
+        data = {
+            "data": {
+                "type": "invitations",
+                "attributes": {
+                    "email": email,
+                    "expires_at": self.TOMORROW_ISO,
+                },
+            }
+        }
+        response = authenticated_client.post(
+            reverse("invitation-list"),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == "invalid"
+        assert (
+            response.json()["errors"][0]["source"]["pointer"]
+            == "/data/attributes/email"
+        )
+
+    def test_invitations_create_invalid_expires_at(
+        self, authenticated_client, invitations_fixture
+    ):
+        data = {
+            "data": {
+                "type": "invitations",
+                "attributes": {
+                    "email": "thisisarandomemail@prowler.com",
+                    "expires_at": (
+                        datetime.now(timezone.utc) + timedelta(hours=23)
+                    ).isoformat(),
+                },
+            }
+        }
+        response = authenticated_client.post(
+            reverse("invitation-list"),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == "invalid"
+        assert (
+            response.json()["errors"][0]["source"]["pointer"]
+            == "/data/attributes/expires_at"
+        )
+
+    def test_invitations_partial_update_valid(
+        self, authenticated_client, invitations_fixture
+    ):
+        invitation, *_ = invitations_fixture
+        new_email = "new_email@prowler.com"
+        new_expires_at = datetime.now(timezone.utc) + timedelta(days=7)
+        new_expires_at_iso = new_expires_at.isoformat()
+        data = {
+            "data": {
+                "id": str(invitation.id),
+                "type": "invitations",
+                "attributes": {
+                    "email": new_email,
+                    "expires_at": new_expires_at_iso,
+                },
+            }
+        }
+        assert invitation.email != new_email
+        assert invitation.expires_at != new_expires_at
+
+        response = authenticated_client.patch(
+            reverse(
+                "invitation-detail",
+                kwargs={"pk": str(invitation.id)},
+            ),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_200_OK
+        invitation.refresh_from_db()
+
+        assert invitation.email == new_email
+        assert invitation.expires_at == new_expires_at
+
+    @pytest.mark.parametrize(
+        "email",
+        [
+            "invalid_email",
+            "invalid_email@",
+            # There is a pending invitation with this email
+            "testing@prowler.com",
+            # User is already a member of the tenant
+            TEST_USER,
+        ],
+    )
+    def test_invitations_partial_update_invalid_email(
+        self, email, authenticated_client, invitations_fixture
+    ):
+        invitation, *_ = invitations_fixture
+        data = {
+            "data": {
+                "id": str(invitation.id),
+                "type": "invitations",
+                "attributes": {
+                    "email": email,
+                    "expires_at": self.TOMORROW_ISO,
+                },
+            }
+        }
+        response = authenticated_client.patch(
+            reverse(
+                "invitation-detail",
+                kwargs={"pk": str(invitation.id)},
+            ),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == "invalid"
+        assert (
+            response.json()["errors"][0]["source"]["pointer"]
+            == "/data/attributes/email"
+        )
+
+    def test_invitations_partial_update_invalid_expires_at(
+        self, authenticated_client, invitations_fixture
+    ):
+        invitation, *_ = invitations_fixture
+        data = {
+            "data": {
+                "id": str(invitation.id),
+                "type": "invitations",
+                "attributes": {
+                    "expires_at": (
+                        datetime.now(timezone.utc) + timedelta(hours=23)
+                    ).isoformat(),
+                },
+            }
+        }
+        response = authenticated_client.patch(
+            reverse(
+                "invitation-detail",
+                kwargs={"pk": str(invitation.id)},
+            ),
+            data=json.dumps(data),
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == "invalid"
+        assert (
+            response.json()["errors"][0]["source"]["pointer"]
+            == "/data/attributes/expires_at"
+        )
+
+    def test_invitations_partial_update_invalid_content_type(
+        self, authenticated_client, invitations_fixture
+    ):
+        invitation, *_ = invitations_fixture
+        response = authenticated_client.patch(
+            reverse(
+                "invitation-detail",
+                kwargs={"pk": str(invitation.id)},
+            ),
+            data={},
+        )
+        assert response.status_code == status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
+
+    def test_invitations_partial_update_invalid_content(
+        self, authenticated_client, invitations_fixture
+    ):
+        invitation, *_ = invitations_fixture
+        response = authenticated_client.patch(
+            reverse(
+                "invitation-detail",
+                kwargs={"pk": str(invitation.id)},
+            ),
+            data={"email": "invalid_email"},
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    def test_invitations_partial_update_invalid_invitation(self, authenticated_client):
+        response = authenticated_client.patch(
+            reverse(
+                "invitation-detail",
+                kwargs={"pk": "54611fc8-b02e-4cc1-aaaa-34acae625629"},
+            ),
+            data={},
+            content_type="application/vnd.api+json",
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    def test_invitations_delete(self, authenticated_client, invitations_fixture):
+        invitation, *_ = invitations_fixture
+        assert invitation.state == Invitation.State.PENDING.value
+
+        response = authenticated_client.delete(
+            reverse(
+                "invitation-detail",
+                kwargs={"pk": str(invitation.id)},
+            )
+        )
+        invitation.refresh_from_db()
+        assert response.status_code == status.HTTP_204_NO_CONTENT
+        assert invitation.state == Invitation.State.REVOKED.value
+
+    def test_invitations_invalid_delete(self, authenticated_client):
+        response = authenticated_client.delete(
+            reverse(
+                "invitation-detail",
+                kwargs={"pk": "54611fc8-b02e-4cc1-aaaa-34acae625629"},
+            )
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    def test_invitations_invalid_delete_invalid_state(
+        self, authenticated_client, invitations_fixture
+    ):
+        invitation, *_ = invitations_fixture
+        invitation.state = Invitation.State.ACCEPTED.value
+        invitation.save()
+
+        response = authenticated_client.delete(
+            reverse(
+                "invitation-detail",
+                kwargs={"pk": str(invitation.id)},
+            )
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == "invalid"
+        assert response.json()["errors"][0]["source"]["pointer"] == "/data"
+        assert (
+            response.json()["errors"][0]["detail"]
+            == "This invitation cannot be revoked."
+        )
+
+    @patch("api.db_router.MainRouter.admin_db", new="default")
+    def test_invitations_accept_invitation_new_user(self, client, invitations_fixture):
+        invitation, *_ = invitations_fixture
+
+        data = {
+            "name": "test",
+            "password": "newpassword123",
+            "email": invitation.email,
+        }
+        assert invitation.state == Invitation.State.PENDING.value
+        assert not User.objects.filter(email__iexact=invitation.email).exists()
+
+        response = client.post(
+            reverse("user-list") + f"?invitation_token={invitation.token}",
+            data=data,
+            format="json",
+        )
+
+        invitation.refresh_from_db()
+        assert response.status_code == status.HTTP_201_CREATED
+        assert User.objects.filter(email__iexact=invitation.email).exists()
+        assert invitation.state == Invitation.State.ACCEPTED.value
+        assert Membership.objects.filter(
+            user__email__iexact=invitation.email, tenant=invitation.tenant
+        ).exists()
+
+    @patch("api.db_router.MainRouter.admin_db", new="default")
+    def test_invitations_accept_invitation_existing_user(
+        self, authenticated_client, create_test_user, tenants_fixture
+    ):
+        *_, tenant = tenants_fixture
+        user = create_test_user
+
+        invitation = Invitation.objects.create(
+            tenant=tenant,
+            email=TEST_USER,
+            inviter=user,
+            expires_at=self.TOMORROW,
+        )
+
+        data = {
+            "invitation_token": invitation.token,
+        }
+
+        assert not Membership.objects.filter(
+            user__email__iexact=user.email, tenant=tenant
+        ).exists()
+
+        response = authenticated_client.post(
+            reverse("invitation-accept"), data=data, format="json"
+        )
+
+        assert response.status_code == status.HTTP_201_CREATED
+        invitation.refresh_from_db()
+        assert Membership.objects.filter(
+            user__email__iexact=user.email, tenant=tenant
+        ).exists()
+        assert invitation.state == Invitation.State.ACCEPTED.value
+
+    @patch("api.db_router.MainRouter.admin_db", new="default")
+    def test_invitations_accept_invitation_invalid_token(self, authenticated_client):
+        data = {
+            "invitation_token": "invalid_token",
+        }
+
+        response = authenticated_client.post(
+            reverse("invitation-accept"), data=data, format="json"
+        )
+
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+        assert response.json()["errors"][0]["code"] == "not_found"
+
+    @patch("api.db_router.MainRouter.admin_db", new="default")
+    def test_invitations_accept_invitation_invalid_token_expired(
+        self, authenticated_client, invitations_fixture
+    ):
+        invitation, *_ = invitations_fixture
+        invitation.expires_at = datetime.now(timezone.utc) - timedelta(days=1)
+        invitation.email = TEST_USER
+        invitation.save()
+
+        data = {
+            "invitation_token": invitation.token,
+        }
+
+        response = authenticated_client.post(
+            reverse("invitation-accept"), data=data, format="json"
+        )
+
+        assert response.status_code == status.HTTP_410_GONE
+
+    @patch("api.db_router.MainRouter.admin_db", new="default")
+    def test_invitations_accept_invitation_invalid_token_expired_new_user(
+        self, client, invitations_fixture
+    ):
+        new_email = "new_email@prowler.com"
+        invitation, *_ = invitations_fixture
+        invitation.expires_at = datetime.now(timezone.utc) - timedelta(days=1)
+        invitation.email = new_email
+        invitation.save()
+
+        data = {
+            "name": "test",
+            "password": "newpassword123",
+            "email": new_email,
+        }
+
+        response = client.post(
+            reverse("user-list") + f"?invitation_token={invitation.token}",
+            data=data,
+            format="json",
+        )
+
+        assert response.status_code == status.HTTP_410_GONE
+
+    @patch("api.db_router.MainRouter.admin_db", new="default")
+    def test_invitations_accept_invitation_invalid_token_accepted(
+        self, authenticated_client, invitations_fixture
+    ):
+        invitation, *_ = invitations_fixture
+        invitation.state = Invitation.State.ACCEPTED.value
+        invitation.email = TEST_USER
+        invitation.save()
+
+        data = {
+            "invitation_token": invitation.token,
+        }
+
+        response = authenticated_client.post(
+            reverse("invitation-accept"), data=data, format="json"
+        )
+
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == "invalid"
+        assert (
+            response.json()["errors"][0]["detail"]
+            == "This invitation is no longer valid."
+        )
+
+    @patch("api.db_router.MainRouter.admin_db", new="default")
+    def test_invitations_accept_invitation_invalid_token_revoked(
+        self, authenticated_client, invitations_fixture
+    ):
+        invitation, *_ = invitations_fixture
+        invitation.state = Invitation.State.REVOKED.value
+        invitation.email = TEST_USER
+        invitation.save()
+
+        data = {
+            "invitation_token": invitation.token,
+        }
+
+        response = authenticated_client.post(
+            reverse("invitation-accept"), data=data, format="json"
+        )
+
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert (
+            response.json()["errors"][0]["detail"]
+            == "This invitation is no longer valid."
+        )
+
+    @pytest.mark.parametrize(
+        "filter_name, filter_value, expected_count",
+        (
+            [
+                ("inserted_at", TODAY, 2),
+                ("inserted_at.gte", "2024-01-01", 2),
+                ("inserted_at.lte", "2024-01-01", 0),
+                ("updated_at.gte", "2024-01-01", 2),
+                ("updated_at.lte", "2024-01-01", 0),
+                ("expires_at.gte", TODAY, 1),
+                ("expires_at.lte", TODAY, 1),
+                ("expires_at", TODAY, 0),
+                ("email", "testing@prowler.com", 2),
+                ("email.icontains", "testing", 2),
+                ("inviter", "", 2),
+            ]
+        ),
+    )
+    def test_invitations_filters(
+        self,
+        authenticated_client,
+        create_test_user,
+        invitations_fixture,
+        filter_name,
+        filter_value,
+        expected_count,
+    ):
+        user = create_test_user
+        response = authenticated_client.get(
+            reverse("invitation-list"),
+            {
+                f"filter[{filter_name}]": filter_value
+                if filter_name != "inviter"
+                else str(user.id)
+            },
+        )
+
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == expected_count
+
+    def test_invitations_list_filter_invalid(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("invitation-list"),
+            {"filter[invalid]": "whatever"},
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    @pytest.mark.parametrize(
+        "sort_field",
+        [
+            "inserted_at",
+            "updated_at",
+            "expires_at",
+            "state",
+            "inviter",
+        ],
+    )
+    def test_invitations_sort(self, authenticated_client, sort_field):
+        response = authenticated_client.get(
+            reverse("invitation-list"),
+            {"sort": sort_field},
+        )
+        assert response.status_code == status.HTTP_200_OK
+
+    def test_invitations_sort_invalid(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("invitation-list"),
+            {"sort": "invalid"},
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+
+@pytest.mark.django_db
+class TestComplianceOverviewViewSet:
+    def test_compliance_overview_list_none(self, authenticated_client):
+        response = authenticated_client.get(
+            reverse("complianceoverview-list"),
+            {"filter[scan_id]": "8d20ac7d-4cbc-435e-85f4-359be37af821"},
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == 0
+
+    def test_compliance_overview_list(
+        self, authenticated_client, compliance_overviews_fixture
+    ):
+        # List compliance overviews with existing data
+        compliance_overview1, compliance_overview2 = compliance_overviews_fixture
+        scan_id = str(compliance_overview1.scan.id)
+
+        response = authenticated_client.get(
+            reverse("complianceoverview-list"),
+            {"filter[scan_id]": scan_id},
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert (
+            len(response.json()["data"]) == 1
+        )  # Due to the custom get_queryset method, only one compliance_id
+
+    def test_compliance_overview_list_missing_scan_id(self, authenticated_client):
+        # Attempt to list compliance overviews without providing filter[scan_id]
+        response = authenticated_client.get(reverse("complianceoverview-list"))
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["source"]["pointer"] == "filter[scan_id]"
+        assert response.json()["errors"][0]["code"] == "required"
+
+    @pytest.mark.parametrize(
+        "filter_name, filter_value, expected_count",
+        [
+            ("compliance_id", "aws_account_security_onboarding_aws", 1),
+            ("compliance_id.icontains", "security_onboarding", 1),
+            ("framework", "AWS-Account-Security-Onboarding", 1),
+            ("framework.icontains", "security-onboarding", 1),
+            ("version", "1.0", 1),
+            ("version", "2.0", 0),
+            ("version.icontains", "0", 1),
+            ("region", "eu-west-1", 1),
+            ("region.icontains", "west-1", 1),
+            ("region.in", "eu-west-1,eu-west-2", 1),
+            ("inserted_at.date", "2024-01-01", 0),
+            ("inserted_at.date", TODAY, 1),
+            ("inserted_at.gte", "2024-01-01", 1),
+        ],
+    )
+    def test_compliance_overview_filters(
+        self,
+        authenticated_client,
+        compliance_overviews_fixture,
+        filter_name,
+        filter_value,
+        expected_count,
+    ):
+        # Test filtering compliance overviews
+        compliance_overview1 = compliance_overviews_fixture[0]
+        scan_id = str(compliance_overview1.scan.id)
+
+        response = authenticated_client.get(
+            reverse("complianceoverview-list"),
+            {
+                "filter[scan_id]": scan_id,
+                f"filter[{filter_name}]": filter_value,
+            },
+        )
+        assert response.status_code == status.HTTP_200_OK
+        assert len(response.json()["data"]) == expected_count
+
+    @pytest.mark.parametrize(
+        "filter_name",
+        ["invalid_filter", "unknown_field"],
+    )
+    def test_compliance_overview_filters_invalid(
+        self, authenticated_client, compliance_overviews_fixture, filter_name
+    ):
+        # Test handling of invalid filters
+        compliance_overview1 = compliance_overviews_fixture[0]
+        scan_id = str(compliance_overview1.scan.id)
+
+        response = authenticated_client.get(
+            reverse("complianceoverview-list"),
+            {
+                "filter[scan_id]": scan_id,
+                f"filter[{filter_name}]": "some_value",
+            },
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+
+    @pytest.mark.parametrize(
+        "sort_field",
+        ["inserted_at", "-inserted_at", "compliance_id", "-compliance_id"],
+    )
+    def test_compliance_overview_sort(
+        self, authenticated_client, compliance_overviews_fixture, sort_field
+    ):
+        # Test sorting compliance overviews
+        compliance_overview1 = compliance_overviews_fixture[0]
+        scan_id = str(compliance_overview1.scan.id)
+
+        response = authenticated_client.get(
+            reverse("complianceoverview-list"),
+            {
+                "filter[scan_id]": scan_id,
+                "sort": sort_field,
+            },
+        )
+        assert response.status_code == status.HTTP_200_OK
+
+    def test_compliance_overview_sort_invalid(
+        self, authenticated_client, compliance_overviews_fixture
+    ):
+        # Test handling of invalid sort parameters
+        compliance_overview1 = compliance_overviews_fixture[0]
+        scan_id = str(compliance_overview1.scan.id)
+
+        response = authenticated_client.get(
+            reverse("complianceoverview-list"),
+            {
+                "filter[scan_id]": scan_id,
+                "sort": "invalid_field",
+            },
+        )
+        assert response.status_code == status.HTTP_400_BAD_REQUEST
+        assert response.json()["errors"][0]["code"] == "invalid"
+        assert "invalid sort parameter" in response.json()["errors"][0]["detail"]
+
+    def test_compliance_overview_retrieve(
+        self, authenticated_client, compliance_overviews_fixture
+    ):
+        # Retrieve a specific compliance overview
+        compliance_overview1 = compliance_overviews_fixture[0]
+
+        response = authenticated_client.get(
+            reverse(
+                "complianceoverview-detail",
+                kwargs={"pk": compliance_overview1.id},
+            ),
+        )
+        assert response.status_code == status.HTTP_200_OK
+        data = response.json()["data"]
+        assert data["id"] == str(compliance_overview1.id)
+        attributes = data["attributes"]
+        assert attributes["compliance_id"] == compliance_overview1.compliance_id
+        assert attributes["framework"] == compliance_overview1.framework
+        assert attributes["version"] == compliance_overview1.version
+        assert attributes["region"] == compliance_overview1.region
+        assert attributes["description"] == compliance_overview1.description
+        assert "requirements" in attributes
+
+    def test_compliance_overview_invalid_retrieve(self, authenticated_client):
+        # Attempt to retrieve a compliance overview with an invalid ID
+        response = authenticated_client.get(
+            reverse(
+                "complianceoverview-detail",
+                kwargs={"pk": "invalid-id"},
+            ),
+        )
+        assert response.status_code == status.HTTP_404_NOT_FOUND
+
+    def test_compliance_overview_list_queryset(
+        self, authenticated_client, compliance_overviews_fixture
+    ):
+        compliance_overview1, compliance_overview2 = compliance_overviews_fixture
+        scan_id = str(compliance_overview1.scan.id)
+
+        response = authenticated_client.get(
+            reverse("complianceoverview-list"),
+            {"filter[scan_id]": scan_id},
+        )
+        # No filters, most fails should be returned
+        assert len(response.json()["data"]) == 1
+        assert response.json()["data"][0]["id"] == str(compliance_overview2.id)
+
+        compliance_overview1.requirements_failed = 5
+        compliance_overview1.save()
+
+        response = authenticated_client.get(
+            reverse("complianceoverview-list"),
+            {"filter[scan_id]": scan_id},
+        )
+        # No filters, now compliance_overview1 has more fails
+        assert len(response.json()["data"]) == 1
+        assert response.json()["data"][0]["id"] == str(compliance_overview1.id)
+
+
+@pytest.mark.django_db
+class TestOverviewViewSet:
+    def test_overview_list_invalid_method(self, authenticated_client):
+        response = authenticated_client.put(reverse("overview-list"))
+        assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
+
+    def test_overview_providers_list(
+        self, authenticated_client, findings_fixture, resources_fixture
+    ):
+        response = authenticated_client.get(reverse("overview-providers"))
+        assert response.status_code == status.HTTP_200_OK
+        # Only findings from one provider
+        assert len(response.json()["data"]) == 1
+        assert response.json()["data"][0]["attributes"]["findings"]["total"] == len(
+            findings_fixture
+        )
+        assert response.json()["data"][0]["attributes"]["findings"]["pass"] == 0
+        assert response.json()["data"][0]["attributes"]["findings"]["fail"] == 2
+        assert response.json()["data"][0]["attributes"]["findings"]["manual"] == 0
+        assert response.json()["data"][0]["attributes"]["resources"]["total"] == len(
+            resources_fixture
+        )
diff --git a/api/src/backend/api/utils.py b/api/src/backend/api/utils.py
new file mode 100644
index 0000000000..1f60669039
--- /dev/null
+++ b/api/src/backend/api/utils.py
@@ -0,0 +1,189 @@
+from datetime import datetime, timezone
+
+from prowler.providers.aws.aws_provider import AwsProvider
+from prowler.providers.azure.azure_provider import AzureProvider
+from prowler.providers.common.models import Connection
+from prowler.providers.gcp.gcp_provider import GcpProvider
+from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
+from rest_framework.exceptions import ValidationError, NotFound
+
+from api.db_router import MainRouter
+from api.exceptions import InvitationTokenExpiredException
+from api.models import Provider, Invitation
+
+
+def merge_dicts(default_dict: dict, replacement_dict: dict) -> dict:
+    """
+    Recursively merge two dictionaries, using `default_dict` as the base and `replacement_dict` for overriding values.
+
+    Args:
+        default_dict (dict): The base dictionary containing default key-value pairs.
+        replacement_dict (dict): The dictionary containing values that should override those in `default_dict`.
+
+    Returns:
+        dict: A new dictionary containing all keys from `default_dict` with values from `replacement_dict` replacing
+              any overlapping keys. If a key in both `default_dict` and `replacement_dict` contains dictionaries,
+              this function will merge them recursively.
+    """
+    result = default_dict.copy()
+
+    for key, value in replacement_dict.items():
+        if key in result and isinstance(result[key], dict) and isinstance(value, dict):
+            if value:
+                result[key] = merge_dicts(result[key], value)
+            else:
+                result[key] = value
+        else:
+            result[key] = value
+
+    return result
+
+
+def return_prowler_provider(
+    provider: Provider,
+) -> [AwsProvider | AzureProvider | GcpProvider | KubernetesProvider]:
+    """Return the Prowler provider class based on the given provider type.
+
+    Args:
+        provider (Provider): The provider object containing the provider type and associated secrets.
+
+    Returns:
+        AwsProvider | AzureProvider | GcpProvider | KubernetesProvider: The corresponding provider class.
+
+    Raises:
+        ValueError: If the provider type specified in `provider.provider` is not supported.
+    """
+    match provider.provider:
+        case Provider.ProviderChoices.AWS.value:
+            prowler_provider = AwsProvider
+        case Provider.ProviderChoices.GCP.value:
+            prowler_provider = GcpProvider
+        case Provider.ProviderChoices.AZURE.value:
+            prowler_provider = AzureProvider
+        case Provider.ProviderChoices.KUBERNETES.value:
+            prowler_provider = KubernetesProvider
+        case _:
+            raise ValueError(f"Provider type {provider.provider} not supported")
+    return prowler_provider
+
+
+def get_prowler_provider_kwargs(provider: Provider) -> dict:
+    """Get the Prowler provider kwargs based on the given provider type.
+
+    Args:
+        provider (Provider): The provider object containing the provider type and associated secret.
+
+    Returns:
+        dict: The provider kwargs for the corresponding provider class.
+    """
+    prowler_provider_kwargs = provider.secret.secret
+    if provider.provider == Provider.ProviderChoices.AZURE.value:
+        prowler_provider_kwargs = {
+            **prowler_provider_kwargs,
+            "subscription_ids": [provider.uid],
+        }
+    elif provider.provider == Provider.ProviderChoices.GCP.value:
+        prowler_provider_kwargs = {
+            **prowler_provider_kwargs,
+            "project_ids": [provider.uid],
+        }
+    elif provider.provider == Provider.ProviderChoices.KUBERNETES.value:
+        prowler_provider_kwargs = {**prowler_provider_kwargs, "context": provider.uid}
+    return prowler_provider_kwargs
+
+
+def initialize_prowler_provider(
+    provider: Provider,
+) -> AwsProvider | AzureProvider | GcpProvider | KubernetesProvider:
+    """Initialize a Prowler provider instance based on the given provider type.
+
+    Args:
+        provider (Provider): The provider object containing the provider type and associated secrets.
+
+    Returns:
+        AwsProvider | AzureProvider | GcpProvider | KubernetesProvider: An instance of the corresponding provider class
+            (`AwsProvider`, `AzureProvider`, `GcpProvider`, or `KubernetesProvider`) initialized with the
+            provider's secrets.
+    """
+    prowler_provider = return_prowler_provider(provider)
+    prowler_provider_kwargs = get_prowler_provider_kwargs(provider)
+    return prowler_provider(**prowler_provider_kwargs)
+
+
+def prowler_provider_connection_test(provider: Provider) -> Connection:
+    """Test the connection to a Prowler provider based on the given provider type.
+
+    Args:
+        provider (Provider): The provider object containing the provider type and associated secrets.
+
+    Returns:
+        Connection: A connection object representing the result of the connection test for the specified provider.
+    """
+    prowler_provider = return_prowler_provider(provider)
+    prowler_provider_kwargs = provider.secret.secret
+    return prowler_provider.test_connection(
+        **prowler_provider_kwargs, provider_id=provider.uid, raise_on_exception=False
+    )
+
+
+def validate_invitation(
+    invitation_token: str, email: str, raise_not_found=False
+) -> Invitation:
+    """
+    Validates an invitation based on the provided token and email.
+
+    This function attempts to retrieve an Invitation object using the given
+    `invitation_token` and `email`. It performs several checks to ensure that
+    the invitation is valid, not expired, and in the correct state for acceptance.
+
+    Args:
+        invitation_token (str): The token associated with the invitation.
+        email (str): The email address associated with the invitation.
+        raise_not_found (bool, optional): If True, raises a `NotFound` exception
+            when the invitation is not found. If False, raises a `ValidationError`.
+            Defaults to False.
+
+    Returns:
+        Invitation: The validated Invitation object.
+
+    Raises:
+        NotFound: If `raise_not_found` is True and the invitation does not exist.
+        ValidationError: If the invitation does not exist and `raise_not_found`
+            is False, or if the invitation is invalid or in an incorrect state.
+        InvitationTokenExpiredException: If the invitation has expired.
+
+    Notes:
+        - This function uses the admin database connector to bypass RLS protection
+          since the invitation may belong to a tenant the user is not a member of yet.
+        - If the invitation has expired, its state is updated to EXPIRED, and an
+          `InvitationTokenExpiredException` is raised.
+        - Only invitations in the PENDING state can be accepted.
+
+    Examples:
+        invitation = validate_invitation("TOKEN123", "user@example.com")
+    """
+    try:
+        # Admin DB connector is used to bypass RLS protection since the invitation belongs to a tenant the user
+        # is not a member of yet
+        invitation = Invitation.objects.using(MainRouter.admin_db).get(
+            token=invitation_token, email=email
+        )
+    except Invitation.DoesNotExist:
+        if raise_not_found:
+            raise NotFound(detail="Invitation is not valid.")
+        else:
+            raise ValidationError({"invitation_token": "Invalid invitation code."})
+
+    # Check if the invitation has expired
+    if invitation.expires_at < datetime.now(timezone.utc):
+        invitation.state = Invitation.State.EXPIRED
+        invitation.save(using=MainRouter.admin_db)
+        raise InvitationTokenExpiredException()
+
+    # Check the state of the invitation
+    if invitation.state != Invitation.State.PENDING:
+        raise ValidationError(
+            {"invitation_token": "This invitation is no longer valid."}
+        )
+
+    return invitation
diff --git a/api/src/backend/api/uuid_utils.py b/api/src/backend/api/uuid_utils.py
new file mode 100644
index 0000000000..8b5dcf7874
--- /dev/null
+++ b/api/src/backend/api/uuid_utils.py
@@ -0,0 +1,148 @@
+from datetime import datetime, timezone
+from random import getrandbits
+
+from dateutil.relativedelta import relativedelta
+from rest_framework_json_api.serializers import ValidationError
+from uuid6 import UUID
+
+
+def transform_into_uuid7(uuid_obj: UUID) -> UUID:
+    """
+    Validates that the given UUID object is a UUIDv7 and returns it.
+
+    This function checks if the provided UUID object is of version 7.
+    If it is, it returns a new UUID object constructed from the uppercase
+    hexadecimal representation of the input UUID. If not, it raises a ValidationError.
+
+    Args:
+        uuid_obj (UUID): The UUID object to validate and transform.
+
+    Returns:
+        UUID: A new UUIDv7 object constructed from the uppercase hexadecimal
+        representation of the input UUID.
+
+    Raises:
+        ValidationError: If the provided UUID is not a version 7 UUID.
+    """
+    try:
+        if uuid_obj.version != 7:
+            raise ValueError
+        return UUID(hex=uuid_obj.hex.upper())
+    except ValueError:
+        raise ValidationError("Invalid UUIDv7 value.")
+
+
+def datetime_to_uuid7(dt: datetime) -> UUID:
+    """
+    Generates a UUIDv7 from a given datetime object.
+
+    Constructs a UUIDv7 using the provided datetime timestamp.
+    Ensures that the version and variant bits are set correctly.
+
+    Args:
+        dt: A datetime object representing the desired timestamp for the UUIDv7.
+
+    Returns:
+        A UUIDv7 object corresponding to the given datetime.
+    """
+    timestamp_ms = int(dt.timestamp() * 1000) & 0xFFFFFFFFFFFF  # 48 bits
+
+    # Generate 12 bits of randomness for the sequence
+    rand_seq = getrandbits(12)
+    # Generate 62 bits of randomness for the node
+    rand_node = getrandbits(62)
+
+    # Build the UUID integer
+    uuid_int = timestamp_ms << 80  # Shift timestamp to bits 80-127
+
+    # Set the version to 7 in bits 76-79
+    uuid_int |= 0x7 << 76
+
+    # Set 12 bits of randomness in bits 64-75
+    uuid_int |= rand_seq << 64
+
+    # Set the variant to "10" in bits 62-63
+    uuid_int |= 0x2 << 62
+
+    # Set 62 bits of randomness in bits 0-61
+    uuid_int |= rand_node
+
+    return UUID(int=uuid_int)
+
+
+def datetime_from_uuid7(uuid7: UUID) -> datetime:
+    """
+    Extracts the timestamp from a UUIDv7 and returns it as a datetime object.
+
+    Args:
+        uuid7: A UUIDv7 object.
+
+    Returns:
+        A datetime object representing the timestamp encoded in the UUIDv7.
+    """
+    timestamp_ms = uuid7.time
+    return datetime.fromtimestamp(timestamp_ms / 1000, tz=timezone.utc)
+
+
+def uuid7_start(uuid_obj: UUID) -> UUID:
+    """
+    Returns a UUIDv7 that represents the start of the day for the given UUID.
+
+    Args:
+        uuid_obj: A UUIDv7 object.
+
+    Returns:
+        A UUIDv7 object representing the start of the day for the given UUID's timestamp.
+    """
+    start_of_day = datetime_from_uuid7(uuid_obj).replace(
+        hour=0, minute=0, second=0, microsecond=0
+    )
+    return datetime_to_uuid7(start_of_day)
+
+
+def uuid7_end(uuid_obj: UUID, offset_months: int = 1) -> UUID:
+    """
+    Returns a UUIDv7 that represents the end of the month for the given UUID.
+
+    Args:
+        uuid_obj: A UUIDv7 object.
+        offset_days: Number of months to offset from the given UUID's date. Defaults to 1 to handle if
+        partitions are not being used, if so the value will be the one set at FINDINGS_TABLE_PARTITION_MONTHS.
+
+    Returns:
+        A UUIDv7 object representing the end of the month for the given UUID's date plus offset_months.
+    """
+    end_of_month = datetime_from_uuid7(uuid_obj).replace(
+        day=1, hour=0, minute=0, second=0, microsecond=0
+    )
+    end_of_month += relativedelta(months=offset_months, microseconds=-1)
+    return datetime_to_uuid7(end_of_month)
+
+
+def uuid7_range(uuid_list: list[UUID]) -> list[UUID]:
+    """
+    For the given list of UUIDv7s, returns the start and end UUIDv7 values that represent
+    the range of days covered by the UUIDs.
+
+    Args:
+        uuid_list: A list of UUIDv7 objects.
+
+    Returns:
+        A list containing two UUIDv7 objects: the start and end of the day range.
+
+    Raises:
+        ValidationError: If the list is empty or contains invalid UUIDv7 objects.
+    """
+    if not uuid_list:
+        raise ValidationError("UUID list is empty.")
+
+    try:
+        start_uuid = min(uuid_list, key=lambda u: u.time)
+        end_uuid = max(uuid_list, key=lambda u: u.time)
+    except AttributeError:
+        raise ValidationError("Invalid UUIDv7 objects in the list.")
+
+    start_range = uuid7_start(start_uuid)
+    end_range = uuid7_end(end_uuid)
+
+    return [start_range, end_range]
diff --git a/api/src/backend/api/v1/__init__.py b/api/src/backend/api/v1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/src/backend/api/v1/serializers.py b/api/src/backend/api/v1/serializers.py
new file mode 100644
index 0000000000..9b0895adcb
--- /dev/null
+++ b/api/src/backend/api/v1/serializers.py
@@ -0,0 +1,1272 @@
+import json
+from datetime import datetime, timezone, timedelta
+
+from django.conf import settings
+from django.contrib.auth import authenticate
+from django.contrib.auth.models import update_last_login
+from django.contrib.auth.password_validation import validate_password
+from drf_spectacular.utils import extend_schema_field
+from jwt.exceptions import InvalidKeyError
+from rest_framework_json_api import serializers
+from rest_framework_json_api.serializers import ValidationError
+from rest_framework_simplejwt.exceptions import TokenError
+from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
+from rest_framework_simplejwt.tokens import RefreshToken
+
+from api.models import (
+    StateChoices,
+    User,
+    Membership,
+    Provider,
+    ProviderGroup,
+    ProviderGroupMembership,
+    Scan,
+    Task,
+    Resource,
+    ResourceTag,
+    Finding,
+    ProviderSecret,
+    Invitation,
+    ComplianceOverview,
+)
+from api.rls import Tenant
+
+
+# Tokens
+
+
+class TokenSerializer(TokenObtainPairSerializer):
+    email = serializers.EmailField(write_only=True)
+    password = serializers.CharField(write_only=True)
+    tenant_id = serializers.UUIDField(
+        write_only=True,
+        required=False,
+        help_text="If not provided, the tenant ID of the first membership that was added"
+        " to the user will be used.",
+    )
+
+    # Output tokens
+    refresh = serializers.CharField(read_only=True)
+    access = serializers.CharField(read_only=True)
+
+    class JSONAPIMeta:
+        resource_name = "tokens"
+
+    def validate(self, attrs):
+        email = attrs.get("email")
+        password = attrs.get("password")
+        tenant_id = str(attrs.get("tenant_id", ""))
+
+        # Authenticate user
+        user = authenticate(username=email, password=password)
+        if user is None:
+            raise ValidationError("Invalid credentials")
+
+        if tenant_id:
+            if not user.is_member_of_tenant(tenant_id):
+                raise ValidationError("Tenant does not exist or user is not a member.")
+        else:
+            first_membership = user.memberships.order_by("date_joined").first()
+            if first_membership is None:
+                raise ValidationError("User has no memberships.")
+            tenant_id = str(first_membership.tenant_id)
+
+        # Generate tokens
+        try:
+            refresh = RefreshToken.for_user(user)
+        except InvalidKeyError:
+            # Handle invalid key error
+            raise ValidationError(
+                {
+                    "detail": "Token generation failed due to invalid key configuration. Provide valid "
+                    "DJANGO_TOKEN_SIGNING_KEY and DJANGO_TOKEN_VERIFYING_KEY in the environment."
+                }
+            )
+        except Exception as e:
+            raise ValidationError({"detail": str(e)})
+
+        # Post-process the tokens
+        # Set the tenant_id
+        refresh["tenant_id"] = tenant_id
+
+        # Set the nbf (not before) claim to the iat (issued at) claim. At this moment, simplejwt does not provide a
+        # way to set the nbf claim
+        refresh.payload["nbf"] = refresh["iat"]
+
+        # Get the access token
+        access = refresh.access_token
+
+        if settings.SIMPLE_JWT["UPDATE_LAST_LOGIN"]:
+            update_last_login(None, user)
+
+        return {"access": str(access), "refresh": str(refresh)}
+
+
+# TODO: Check if we can change the parent class to TokenRefreshSerializer from rest_framework_simplejwt.serializers
+class TokenRefreshSerializer(serializers.Serializer):
+    refresh = serializers.CharField()
+
+    # Output token
+    access = serializers.CharField(read_only=True)
+
+    class JSONAPIMeta:
+        resource_name = "tokens-refresh"
+
+    def validate(self, attrs):
+        refresh_token = attrs.get("refresh")
+
+        try:
+            # Validate the refresh token
+            refresh = RefreshToken(refresh_token)
+            # Generate new access token
+            access_token = refresh.access_token
+
+            if settings.SIMPLE_JWT["ROTATE_REFRESH_TOKENS"]:
+                if settings.SIMPLE_JWT["BLACKLIST_AFTER_ROTATION"]:
+                    try:
+                        refresh.blacklist()
+                    except AttributeError:
+                        pass
+
+                refresh.set_jti()
+                refresh.set_exp()
+                refresh.set_iat()
+
+            return {"access": str(access_token), "refresh": str(refresh)}
+        except TokenError:
+            raise ValidationError({"refresh": "Invalid or expired token"})
+
+
+# Base
+
+
+class BaseSerializerV1(serializers.ModelSerializer):
+    def get_root_meta(self, _resource, _many):
+        return {"version": "v1"}
+
+
+class BaseWriteSerializer(BaseSerializerV1):
+    def validate(self, data):
+        if hasattr(self, "initial_data"):
+            initial_data = set(self.initial_data.keys()) - {"id", "type"}
+            unknown_keys = initial_data - set(self.fields.keys())
+            if unknown_keys:
+                raise ValidationError(f"Invalid fields: {unknown_keys}")
+        return data
+
+
+class RLSSerializer(BaseSerializerV1):
+    def create(self, validated_data):
+        tenant_id = self.context.get("tenant_id")
+        validated_data["tenant_id"] = tenant_id
+        return super().create(validated_data)
+
+
+class StateEnumSerializerField(serializers.ChoiceField):
+    def __init__(self, **kwargs):
+        kwargs["choices"] = StateChoices.choices
+        super().__init__(**kwargs)
+
+
+# Users
+
+
+class UserSerializer(BaseSerializerV1):
+    """
+    Serializer for the User model.
+    """
+
+    memberships = serializers.ResourceRelatedField(many=True, read_only=True)
+
+    class Meta:
+        model = User
+        fields = ["id", "name", "email", "company_name", "date_joined", "memberships"]
+
+
+class UserCreateSerializer(BaseWriteSerializer):
+    password = serializers.CharField(write_only=True)
+    company_name = serializers.CharField(required=False)
+
+    class Meta:
+        model = User
+        fields = ["name", "password", "email", "company_name"]
+
+    def validate_password(self, value):
+        user = User(**{k: v for k, v in self.initial_data.items() if k != "type"})
+        validate_password(value, user=user)
+        return value
+
+    def validate_email(self, value):
+        normalized_email = value.strip().lower()
+        if User.objects.filter(email__iexact=normalized_email).exists():
+            raise ValidationError(
+                User._meta.get_field("email").error_messages["unique"], code="unique"
+            )
+        return value
+
+    def create(self, validated_data):
+        password = validated_data.pop("password")
+        user = User(**validated_data)
+
+        validate_password(password, user=user)
+        user.set_password(password)
+        user.save()
+        return user
+
+
+class UserUpdateSerializer(BaseWriteSerializer):
+    password = serializers.CharField(write_only=True, required=False)
+
+    class Meta:
+        model = User
+        fields = ["id", "name", "password", "email", "company_name"]
+        extra_kwargs = {
+            "id": {"read_only": True},
+        }
+
+    def validate_password(self, value):
+        validate_password(value, user=self.instance)
+        return value
+
+    def update(self, instance, validated_data):
+        password = validated_data.pop("password", None)
+        if password:
+            validate_password(password, user=instance)
+            instance.set_password(password)
+        return super().update(instance, validated_data)
+
+
+# Tasks
+class TaskBase(serializers.ModelSerializer):
+    state_mapping = {
+        "PENDING": StateChoices.AVAILABLE,
+        "STARTED": StateChoices.EXECUTING,
+        "PROGRESS": StateChoices.EXECUTING,
+        "SUCCESS": StateChoices.COMPLETED,
+        "FAILURE": StateChoices.FAILED,
+        "REVOKED": StateChoices.CANCELLED,
+    }
+
+    class Meta:
+        fields = ["id"]
+        model = Task
+
+    def map_state(self, task_result_state):
+        return self.state_mapping.get(task_result_state, StateChoices.AVAILABLE)
+
+    @extend_schema_field(
+        {
+            "type": "string",
+            "enum": StateChoices.values,
+        }
+    )
+    def get_state(self, obj):
+        task_result_state = (
+            obj.task_runner_task.status if obj.task_runner_task else None
+        )
+        return self.map_state(task_result_state)
+
+
+class TaskSerializer(RLSSerializer, TaskBase):
+    state = serializers.SerializerMethodField(read_only=True)
+    metadata = serializers.SerializerMethodField(read_only=True)
+    result = serializers.SerializerMethodField(read_only=True)
+    task_args = serializers.SerializerMethodField(read_only=True)
+
+    completed_at = serializers.DateTimeField(
+        source="task_runner_task.date_done", read_only=True
+    )
+    name = serializers.CharField(source="task_runner_task.task_name", read_only=True)
+
+    class Meta:
+        model = Task
+        fields = [
+            "id",
+            "inserted_at",
+            "completed_at",
+            "name",
+            "state",
+            "result",
+            "task_args",
+            "metadata",
+        ]
+
+    @extend_schema_field(serializers.JSONField())
+    def get_metadata(self, obj):
+        return self.get_json_field(obj, "metadata")
+
+    @extend_schema_field(serializers.JSONField())
+    def get_result(self, obj):
+        return self.get_json_field(obj, "result")
+
+    @extend_schema_field(serializers.JSONField())
+    def get_task_args(self, obj):
+        task_args = self.get_json_field(obj, "task_kwargs")
+        # Celery task_kwargs are stored as a double string JSON in the database when not empty
+        if isinstance(task_args, str):
+            task_args = json.loads(task_args.replace("'", '"').replace("None", "null"))
+        # Remove tenant_id from task_kwargs if present
+        task_args.pop("tenant_id", None)
+
+        return task_args
+
+    @staticmethod
+    def get_json_field(obj, field_name):
+        """Helper method to DRY the logic for loading JSON fields from task_runner_task."""
+        task_result_field = (
+            getattr(obj.task_runner_task, field_name, None)
+            if obj.task_runner_task
+            else None
+        )
+        return json.loads(task_result_field) if task_result_field else {}
+
+
+# Tenants
+
+
+class TenantSerializer(BaseSerializerV1):
+    """
+    Serializer for the Tenant model.
+    """
+
+    memberships = serializers.ResourceRelatedField(many=True, read_only=True)
+
+    class Meta:
+        model = Tenant
+        fields = ["id", "name", "memberships"]
+
+
+# Memberships
+
+
+class MemberRoleEnumSerializerField(serializers.ChoiceField):
+    def __init__(self, **kwargs):
+        kwargs["choices"] = Membership.RoleChoices.choices
+        super().__init__(**kwargs)
+
+
+class MembershipSerializer(serializers.ModelSerializer):
+    role = MemberRoleEnumSerializerField()
+    user = serializers.HyperlinkedRelatedField(view_name="user-detail", read_only=True)
+    tenant = serializers.HyperlinkedRelatedField(
+        view_name="tenant-detail", read_only=True
+    )
+
+    class Meta:
+        model = Membership
+        fields = ["id", "user", "tenant", "role", "date_joined"]
+
+
+# Provider Groups
+class ProviderGroupSerializer(RLSSerializer, BaseWriteSerializer):
+    providers = serializers.ResourceRelatedField(many=True, read_only=True)
+
+    def validate(self, attrs):
+        tenant = self.context["tenant_id"]
+        name = attrs.get("name", self.instance.name if self.instance else None)
+
+        # Exclude the current instance when checking for uniqueness during updates
+        queryset = ProviderGroup.objects.filter(tenant=tenant, name=name)
+        if self.instance:
+            queryset = queryset.exclude(pk=self.instance.pk)
+
+        if queryset.exists():
+            raise serializers.ValidationError(
+                {
+                    "name": "A provider group with this name already exists for this tenant."
+                }
+            )
+
+        return super().validate(attrs)
+
+    class Meta:
+        model = ProviderGroup
+        fields = ["id", "name", "inserted_at", "updated_at", "providers", "url"]
+        read_only_fields = ["id", "inserted_at", "updated_at"]
+        extra_kwargs = {
+            "id": {"read_only": True},
+            "inserted_at": {"read_only": True},
+            "updated_at": {"read_only": True},
+        }
+
+
+class ProviderGroupUpdateSerializer(RLSSerializer, BaseWriteSerializer):
+    """
+    Serializer for updating the ProviderGroup model.
+    Only allows "name" field to be updated.
+    """
+
+    class Meta:
+        model = ProviderGroup
+        fields = ["id", "name"]
+
+
+class ProviderGroupMembershipUpdateSerializer(RLSSerializer, BaseWriteSerializer):
+    """
+    Serializer for modifying provider group memberships
+    """
+
+    provider_ids = serializers.ListField(
+        child=serializers.UUIDField(),
+        help_text="List of provider UUIDs to add to the group",
+    )
+
+    def validate(self, attrs):
+        tenant_id = self.context["tenant_id"]
+        provider_ids = attrs.get("provider_ids", [])
+
+        existing_provider_ids = set(
+            Provider.objects.filter(
+                id__in=provider_ids, tenant_id=tenant_id
+            ).values_list("id", flat=True)
+        )
+        provided_provider_ids = set(provider_ids)
+
+        missing_provider_ids = provided_provider_ids - existing_provider_ids
+
+        if missing_provider_ids:
+            raise serializers.ValidationError(
+                {
+                    "provider_ids": f"The following provider IDs do not exist: {', '.join(str(id) for id in missing_provider_ids)}"
+                }
+            )
+
+        return super().validate(attrs)
+
+    class Meta:
+        model = ProviderGroupMembership
+        fields = ["id", "provider_ids"]
+
+
+# Providers
+class ProviderEnumSerializerField(serializers.ChoiceField):
+    def __init__(self, **kwargs):
+        kwargs["choices"] = Provider.ProviderChoices.choices
+        super().__init__(**kwargs)
+
+
+class ProviderSerializer(RLSSerializer):
+    """
+    Serializer for the Provider model.
+    """
+
+    provider = ProviderEnumSerializerField()
+    connection = serializers.SerializerMethodField(read_only=True)
+
+    class Meta:
+        model = Provider
+        fields = [
+            "id",
+            "inserted_at",
+            "updated_at",
+            "provider",
+            "uid",
+            "alias",
+            "connection",
+            # "scanner_args",
+            "secret",
+            "url",
+        ]
+
+    @extend_schema_field(
+        {
+            "type": "object",
+            "properties": {
+                "connected": {"type": "boolean"},
+                "last_checked_at": {"type": "string", "format": "date-time"},
+            },
+        }
+    )
+    def get_connection(self, obj):
+        return {
+            "connected": obj.connected,
+            "last_checked_at": obj.connection_last_checked_at,
+        }
+
+
+class ProviderCreateSerializer(RLSSerializer, BaseWriteSerializer):
+    class Meta:
+        model = Provider
+        fields = [
+            "alias",
+            "provider",
+            "uid",
+            # "scanner_args"
+        ]
+
+
+class ProviderUpdateSerializer(BaseWriteSerializer):
+    """
+    Serializer for updating the Provider model.
+    Only allows "alias" and "scanner_args" fields to be updated.
+    """
+
+    class Meta:
+        model = Provider
+        fields = [
+            "alias",
+            # "scanner_args"
+        ]
+
+
+# Scans
+
+
+class ScanTriggerEnumSerializerField(serializers.ChoiceField):
+    def __init__(self, **kwargs):
+        kwargs["choices"] = Scan.TriggerChoices.choices
+        super().__init__(**kwargs)
+
+
+class ScanSerializer(RLSSerializer):
+    trigger = serializers.ChoiceField(
+        choices=Scan.TriggerChoices.choices, read_only=True
+    )
+    state = StateEnumSerializerField(read_only=True)
+
+    class Meta:
+        model = Scan
+        fields = [
+            "id",
+            "name",
+            "trigger",
+            "state",
+            "unique_resource_count",
+            "progress",
+            # "scanner_args",
+            "duration",
+            "provider",
+            "task",
+            "started_at",
+            "completed_at",
+            "scheduled_at",
+            "url",
+        ]
+
+
+class ScanCreateSerializer(RLSSerializer, BaseWriteSerializer):
+    class Meta:
+        model = Scan
+        # TODO: add mutelist when implemented
+        fields = [
+            "id",
+            "provider",
+            # "scanner_args",
+            "name",
+        ]
+
+    def create(self, validated_data):
+        # provider = validated_data.get("provider")
+
+        # scanner_args will be disabled for the user in the first release
+        # if not validated_data.get("scanner_args"):
+        #     validated_data["scanner_args"] = provider.scanner_args
+        # else:
+        #     validated_data["scanner_args"] = merge_dicts(
+        #         provider.scanner_args, validated_data["scanner_args"]
+        #     )
+
+        if not validated_data.get("trigger"):
+            validated_data["trigger"] = Scan.TriggerChoices.MANUAL.value
+
+        return RLSSerializer.create(self, validated_data)
+
+
+class ScanUpdateSerializer(BaseWriteSerializer):
+    """
+    Serializer for updating the Provider model.
+    Only allows "alias" and "scanner_args" fields to be updated.
+    """
+
+    class Meta:
+        model = Scan
+        # TODO: add mutelist when implemented
+        fields = ["id", "name"]
+        extra_kwargs = {
+            "id": {"read_only": True},
+        }
+
+
+class ScanTaskSerializer(RLSSerializer):
+    trigger = serializers.ChoiceField(
+        choices=Scan.TriggerChoices.choices, read_only=True
+    )
+    state = StateEnumSerializerField(read_only=True)
+
+    class Meta:
+        model = Scan
+        fields = [
+            "id",
+            "name",
+            "trigger",
+            "state",
+            "unique_resource_count",
+            "progress",
+            # "scanner_args",
+            "duration",
+            "started_at",
+            "completed_at",
+            "scheduled_at",
+        ]
+
+
+class ResourceTagSerializer(RLSSerializer):
+    """
+    Serializer for the ResourceTag model
+    """
+
+    class Meta:
+        model = ResourceTag
+        fields = ["key", "value"]
+
+
+class ResourceSerializer(RLSSerializer):
+    """
+    Serializer for the Resource model.
+    """
+
+    tags = serializers.SerializerMethodField()
+    type_ = serializers.CharField(read_only=True)
+
+    findings = serializers.ResourceRelatedField(many=True, read_only=True)
+
+    class Meta:
+        model = Resource
+        fields = [
+            "id",
+            "inserted_at",
+            "updated_at",
+            "uid",
+            "name",
+            "region",
+            "service",
+            "type_",
+            "tags",
+            "provider",
+            "findings",
+            "url",
+        ]
+        extra_kwargs = {
+            "id": {"read_only": True},
+            "inserted_at": {"read_only": True},
+            "updated_at": {"read_only": True},
+        }
+
+    included_serializers = {
+        "findings": "api.v1.serializers.FindingSerializer",
+        "provider": "api.v1.serializers.ProviderSerializer",
+    }
+
+    @extend_schema_field(
+        {
+            "type": "object",
+            "description": "Tags associated with the resource",
+            "example": {"env": "prod", "owner": "johndoe"},
+        }
+    )
+    def get_tags(self, obj):
+        return obj.get_tags()
+
+    def get_fields(self):
+        """`type` is a Python reserved keyword."""
+        fields = super().get_fields()
+        type_ = fields.pop("type_")
+        fields["type"] = type_
+        return fields
+
+
+class FindingSerializer(RLSSerializer):
+    """
+    Serializer for the Finding model.
+    """
+
+    resources = serializers.ResourceRelatedField(many=True, read_only=True)
+
+    class Meta:
+        model = Finding
+        fields = [
+            "id",
+            "uid",
+            "delta",
+            "status",
+            "status_extended",
+            "severity",
+            "check_id",
+            "check_metadata",
+            "raw_result",
+            "inserted_at",
+            "updated_at",
+            "url",
+            # Relationships
+            "scan",
+            "resources",
+        ]
+
+    included_serializers = {
+        "scan": ScanSerializer,
+        "resources": ResourceSerializer,
+    }
+
+
+# Provider secrets
+class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
+    @staticmethod
+    def validate_secret_based_on_provider(
+        provider_type: str, secret_type: ProviderSecret.TypeChoices, secret: dict
+    ):
+        if secret_type == ProviderSecret.TypeChoices.STATIC:
+            if provider_type == Provider.ProviderChoices.AWS.value:
+                serializer = AwsProviderSecret(data=secret)
+            elif provider_type == Provider.ProviderChoices.AZURE.value:
+                serializer = AzureProviderSecret(data=secret)
+            elif provider_type == Provider.ProviderChoices.GCP.value:
+                serializer = GCPProviderSecret(data=secret)
+            elif provider_type == Provider.ProviderChoices.KUBERNETES.value:
+                serializer = KubernetesProviderSecret(data=secret)
+            else:
+                raise serializers.ValidationError(
+                    {"provider": f"Provider type not supported {provider_type}"}
+                )
+        elif secret_type == ProviderSecret.TypeChoices.ROLE:
+            serializer = AWSRoleAssumptionProviderSecret(data=secret)
+        else:
+            raise serializers.ValidationError(
+                {"secret_type": f"Secret type not supported: {secret_type}"}
+            )
+        try:
+            serializer.is_valid(raise_exception=True)
+        except ValidationError as validation_error:
+            # Customize the error message
+            details = validation_error.detail.copy()
+            for key, value in details.items():
+                validation_error.detail[f"secret/{key}"] = value
+                del validation_error.detail[key]
+            raise validation_error
+
+
+class AwsProviderSecret(serializers.Serializer):
+    aws_access_key_id = serializers.CharField()
+    aws_secret_access_key = serializers.CharField()
+    aws_session_token = serializers.CharField(required=False)
+
+    class Meta:
+        resource_name = "provider-secrets"
+
+
+class AzureProviderSecret(serializers.Serializer):
+    client_id = serializers.CharField()
+    client_secret = serializers.CharField()
+    tenant_id = serializers.CharField()
+
+    class Meta:
+        resource_name = "provider-secrets"
+
+
+class GCPProviderSecret(serializers.Serializer):
+    client_id = serializers.CharField()
+    client_secret = serializers.CharField()
+    refresh_token = serializers.CharField()
+
+    class Meta:
+        resource_name = "provider-secrets"
+
+
+class KubernetesProviderSecret(serializers.Serializer):
+    kubeconfig_content = serializers.CharField()
+
+    class Meta:
+        resource_name = "provider-secrets"
+
+
+class AWSRoleAssumptionProviderSecret(serializers.Serializer):
+    role_arn = serializers.CharField()
+    external_id = serializers.CharField(required=False)
+    role_session_name = serializers.CharField(required=False)
+    session_duration = serializers.IntegerField(
+        required=False, min_value=900, max_value=43200
+    )
+    aws_access_key_id = serializers.CharField(required=False)
+    aws_secret_access_key = serializers.CharField(required=False)
+    aws_session_token = serializers.CharField(required=False)
+
+    class Meta:
+        resource_name = "provider-secrets"
+
+
+@extend_schema_field(
+    {
+        "oneOf": [
+            {
+                "type": "object",
+                "title": "AWS Static Credentials",
+                "properties": {
+                    "aws_access_key_id": {
+                        "type": "string",
+                        "description": "The AWS access key ID. Required for environments where no IAM role is being "
+                        "assumed and direct AWS access is needed.",
+                    },
+                    "aws_secret_access_key": {
+                        "type": "string",
+                        "description": "The AWS secret access key. Must accompany 'aws_access_key_id' to authorize "
+                        "access to AWS resources.",
+                    },
+                    "aws_session_token": {
+                        "type": "string",
+                        "description": "The session token associated with temporary credentials. Only needed for "
+                        "session-based or temporary AWS access.",
+                    },
+                },
+                "required": ["aws_access_key_id", "aws_secret_access_key"],
+            },
+            {
+                "type": "object",
+                "title": "AWS Assume Role",
+                "properties": {
+                    "role_arn": {
+                        "type": "string",
+                        "description": "The Amazon Resource Name (ARN) of the role to assume. Required for AWS role "
+                        "assumption.",
+                    },
+                    "aws_access_key_id": {
+                        "type": "string",
+                        "description": "The AWS access key ID. Only required if the environment lacks pre-configured "
+                        "AWS credentials.",
+                    },
+                    "aws_secret_access_key": {
+                        "type": "string",
+                        "description": "The AWS secret access key. Required if 'aws_access_key_id' is provided or if "
+                        "no AWS credentials are pre-configured.",
+                    },
+                    "aws_session_token": {
+                        "type": "string",
+                        "description": "The session token for temporary credentials, if applicable.",
+                    },
+                    "session_duration": {
+                        "type": "integer",
+                        "minimum": 900,
+                        "maximum": 43200,
+                        "default": 3600,
+                        "description": "The duration (in seconds) for the role session.",
+                    },
+                    "external_id": {
+                        "type": "string",
+                        "description": "An optional identifier to enhance security for role assumption; may be "
+                        "required by the role administrator.",
+                    },
+                    "role_session_name": {
+                        "type": "string",
+                        "description": "An identifier for the role session, useful for tracking sessions in AWS logs. "
+                        "The regex used to validate this parameter is a string of characters consisting of "
+                        "upper- and lower-case alphanumeric characters with no spaces. You can also include "
+                        "underscores or any of the following characters: =,.@-\n\n"
+                        "Examples:\n"
+                        "- MySession123\n"
+                        "- User_Session-1\n"
+                        "- Test.Session@2",
+                        "pattern": "^[a-zA-Z0-9=,.@_-]+$",
+                    },
+                },
+                "required": ["role_arn"],
+            },
+            {
+                "type": "object",
+                "title": "Azure Static Credentials",
+                "properties": {
+                    "client_id": {
+                        "type": "string",
+                        "description": "The Azure application (client) ID for authentication in Azure AD.",
+                    },
+                    "client_secret": {
+                        "type": "string",
+                        "description": "The client secret associated with the application (client) ID, providing "
+                        "secure access.",
+                    },
+                    "tenant_id": {
+                        "type": "string",
+                        "description": "The Azure tenant ID, representing the directory where the application is "
+                        "registered.",
+                    },
+                },
+                "required": ["client_id", "client_secret", "tenant_id"],
+            },
+            {
+                "type": "object",
+                "title": "GCP Static Credentials",
+                "properties": {
+                    "client_id": {
+                        "type": "string",
+                        "description": "The client ID from Google Cloud, used to identify the application for GCP "
+                        "access.",
+                    },
+                    "client_secret": {
+                        "type": "string",
+                        "description": "The client secret associated with the GCP client ID, required for secure "
+                        "access.",
+                    },
+                    "refresh_token": {
+                        "type": "string",
+                        "description": "A refresh token that allows the application to obtain new access tokens for "
+                        "extended use.",
+                    },
+                },
+                "required": ["client_id", "client_secret", "refresh_token"],
+            },
+            {
+                "type": "object",
+                "title": "Kubernetes Static Credentials",
+                "properties": {
+                    "kubeconfig_content": {
+                        "type": "string",
+                        "description": "The content of the Kubernetes kubeconfig file, encoded as a string.",
+                    }
+                },
+                "required": ["kubeconfig_content"],
+            },
+        ]
+    }
+)
+class ProviderSecretField(serializers.JSONField):
+    pass
+
+
+class ProviderSecretSerializer(RLSSerializer):
+    """
+    Serializer for the ProviderSecret model.
+    """
+
+    class Meta:
+        model = ProviderSecret
+        fields = [
+            "id",
+            "inserted_at",
+            "updated_at",
+            "name",
+            "secret_type",
+            "provider",
+            "url",
+        ]
+
+
+class ProviderSecretCreateSerializer(RLSSerializer, BaseWriteProviderSecretSerializer):
+    secret = ProviderSecretField(write_only=True)
+
+    class Meta:
+        model = ProviderSecret
+        fields = [
+            "inserted_at",
+            "updated_at",
+            "name",
+            "secret_type",
+            "secret",
+            "provider",
+        ]
+        extra_kwargs = {
+            "inserted_at": {"read_only": True},
+            "updated_at": {"read_only": True},
+        }
+
+    def validate(self, attrs):
+        provider = attrs.get("provider")
+        secret_type = attrs.get("secret_type")
+        secret = attrs.get("secret")
+
+        validated_attrs = super().validate(attrs)
+        self.validate_secret_based_on_provider(provider.provider, secret_type, secret)
+        return validated_attrs
+
+
+class ProviderSecretUpdateSerializer(BaseWriteProviderSecretSerializer):
+    secret = ProviderSecretField(write_only=True)
+
+    class Meta:
+        model = ProviderSecret
+        fields = [
+            "id",
+            "inserted_at",
+            "updated_at",
+            "name",
+            "secret_type",
+            "secret",
+            "provider",
+        ]
+        extra_kwargs = {
+            "inserted_at": {"read_only": True},
+            "updated_at": {"read_only": True},
+            "provider": {"read_only": True},
+            "secret_type": {"read_only": True},
+        }
+
+    def validate(self, attrs):
+        provider = self.instance.provider
+        secret_type = self.instance.secret_type
+        secret = attrs.get("secret")
+
+        validated_attrs = super().validate(attrs)
+        self.validate_secret_based_on_provider(provider.provider, secret_type, secret)
+        return validated_attrs
+
+
+# Invitations
+
+
+class InvitationSerializer(RLSSerializer):
+    """
+    Serializer for the Invitation model.
+    """
+
+    class Meta:
+        model = Invitation
+        fields = [
+            "id",
+            "inserted_at",
+            "updated_at",
+            "email",
+            "state",
+            "token",
+            "expires_at",
+            "inviter",
+            "url",
+        ]
+
+
+class InvitationBaseWriteSerializer(BaseWriteSerializer):
+    def validate_email(self, value):
+        user = User.objects.filter(email=value).first()
+        tenant_id = self.context["tenant_id"]
+        if user and Membership.objects.filter(user=user, tenant=tenant_id).exists():
+            raise ValidationError(
+                "The user may already be a member of the tenant or there was an issue with the "
+                "email provided."
+            )
+        if Invitation.objects.filter(
+            email=value, state=Invitation.State.PENDING
+        ).exists():
+            raise ValidationError(
+                "Unable to process your request. Please check the information provided and "
+                "try again."
+            )
+        return value
+
+    def validate_expires_at(self, value):
+        now = datetime.now(timezone.utc)
+        if value and value < now + timedelta(hours=24):
+            raise ValidationError(
+                "Expiry date must be at least 24 hours in the future."
+            )
+        return value
+
+
+class InvitationCreateSerializer(InvitationBaseWriteSerializer, RLSSerializer):
+    expires_at = serializers.DateTimeField(
+        required=False,
+        help_text="UTC. Default 7 days. If this attribute is "
+        "provided, it must be at least 24 hours in the "
+        "future.",
+    )
+
+    class Meta:
+        model = Invitation
+        fields = ["email", "expires_at", "state", "token", "inviter"]
+        extra_kwargs = {
+            "token": {"read_only": True},
+            "state": {"read_only": True},
+            "inviter": {"read_only": True},
+            "expires_at": {"required": False},
+        }
+
+    def create(self, validated_data):
+        inviter = self.context.get("request").user
+        validated_data["inviter"] = inviter
+        return super().create(validated_data)
+
+
+class InvitationUpdateSerializer(InvitationBaseWriteSerializer):
+    class Meta:
+        model = Invitation
+        fields = ["id", "email", "expires_at", "state", "token"]
+        extra_kwargs = {
+            "token": {"read_only": True},
+            "state": {"read_only": True},
+            "expires_at": {"required": False},
+            "email": {"required": False},
+        }
+
+
+class InvitationAcceptSerializer(RLSSerializer):
+    """Serializer for accepting an invitation."""
+
+    invitation_token = serializers.CharField(write_only=True)
+
+    class Meta:
+        model = Invitation
+        fields = ["invitation_token"]
+
+
+# Compliance overview
+
+
+class ComplianceOverviewSerializer(RLSSerializer):
+    """
+    Serializer for the ComplianceOverview model.
+    """
+
+    requirements_status = serializers.SerializerMethodField(
+        read_only=True, method_name="get_requirements_status"
+    )
+    provider_type = serializers.SerializerMethodField(read_only=True)
+
+    class Meta:
+        model = ComplianceOverview
+        fields = [
+            "id",
+            "inserted_at",
+            "compliance_id",
+            "framework",
+            "version",
+            "requirements_status",
+            "region",
+            "provider_type",
+            "scan",
+            "url",
+        ]
+
+    @extend_schema_field(
+        {
+            "type": "object",
+            "properties": {
+                "passed": {"type": "integer"},
+                "failed": {"type": "integer"},
+                "manual": {"type": "integer"},
+                "total": {"type": "integer"},
+            },
+        }
+    )
+    def get_requirements_status(self, obj):
+        return {
+            "passed": obj.requirements_passed,
+            "failed": obj.requirements_failed,
+            "manual": obj.requirements_manual,
+            "total": obj.total_requirements,
+        }
+
+    @extend_schema_field(serializers.CharField(allow_null=True))
+    def get_provider_type(self, obj):
+        """
+        Retrieves the provider_type from scan.provider.provider_type.
+        """
+        try:
+            return obj.scan.provider.provider
+        except AttributeError:
+            return None
+
+
+class ComplianceOverviewFullSerializer(ComplianceOverviewSerializer):
+    requirements = serializers.SerializerMethodField(read_only=True)
+
+    class Meta(ComplianceOverviewSerializer.Meta):
+        fields = ComplianceOverviewSerializer.Meta.fields + [
+            "description",
+            "requirements",
+        ]
+
+    @extend_schema_field(
+        {
+            "type": "object",
+            "properties": {
+                "requirement_id": {
+                    "type": "object",
+                    "properties": {
+                        "name": {"type": "string"},
+                        "checks": {
+                            "type": "object",
+                            "properties": {
+                                "check_name": {
+                                    "type": "object",
+                                    "properties": {
+                                        "status": {
+                                            "type": "string",
+                                            "enum": ["PASS", "FAIL", None],
+                                        },
+                                    },
+                                }
+                            },
+                            "description": "Each key in the 'checks' object is a check name, with values as "
+                            "'PASS', 'FAIL', or null.",
+                        },
+                        "status": {
+                            "type": "string",
+                            "enum": ["PASS", "FAIL", "MANUAL"],
+                        },
+                        "attributes": {
+                            "type": "array",
+                            "items": {
+                                "type": "object",
+                            },
+                        },
+                        "description": {"type": "string"},
+                        "checks_status": {
+                            "type": "object",
+                            "properties": {
+                                "total": {"type": "integer"},
+                                "pass": {"type": "integer"},
+                                "fail": {"type": "integer"},
+                                "manual": {"type": "integer"},
+                            },
+                        },
+                    },
+                }
+            },
+        }
+    )
+    def get_requirements(self, obj):
+        """
+        Returns the detailed structure of requirements.
+        """
+        return obj.requirements
+
+
+# Overviews
+
+
+class OverviewProviderSerializer(serializers.Serializer):
+    id = serializers.CharField(source="provider")
+    findings = serializers.SerializerMethodField(read_only=True)
+    resources = serializers.SerializerMethodField(read_only=True)
+
+    class JSONAPIMeta:
+        resource_name = "provider-overviews"
+
+    def get_root_meta(self, _resource, _many):
+        return {"version": "v1"}
+
+    @extend_schema_field(
+        {
+            "type": "object",
+            "properties": {
+                "pass": {"type": "integer"},
+                "fail": {"type": "integer"},
+                "manual": {"type": "integer"},
+                "total": {"type": "integer"},
+            },
+        }
+    )
+    def get_findings(self, obj):
+        return {
+            "pass": obj["findings_passed"],
+            "fail": obj["findings_failed"],
+            "manual": obj["findings_manual"],
+            "total": obj["total_findings"],
+        }
+
+    @extend_schema_field(
+        {
+            "type": "object",
+            "properties": {
+                "total": {"type": "integer"},
+            },
+        }
+    )
+    def get_resources(self, obj):
+        return {
+            "total": obj["total_resources"],
+        }
diff --git a/api/src/backend/api/v1/urls.py b/api/src/backend/api/v1/urls.py
new file mode 100644
index 0000000000..c212c95e06
--- /dev/null
+++ b/api/src/backend/api/v1/urls.py
@@ -0,0 +1,86 @@
+from django.urls import path, include
+from drf_spectacular.views import SpectacularRedocView
+from rest_framework_nested import routers
+
+from api.v1.views import (
+    CustomTokenObtainView,
+    CustomTokenRefreshView,
+    SchemaView,
+    UserViewSet,
+    TenantViewSet,
+    TenantMembersViewSet,
+    MembershipViewSet,
+    ProviderViewSet,
+    ScanViewSet,
+    TaskViewSet,
+    ResourceViewSet,
+    FindingViewSet,
+    ProviderGroupViewSet,
+    ProviderSecretViewSet,
+    InvitationViewSet,
+    InvitationAcceptViewSet,
+    OverviewViewSet,
+    ComplianceOverviewViewSet,
+)
+
+router = routers.DefaultRouter(trailing_slash=False)
+
+router.register(r"users", UserViewSet, basename="user")
+router.register(r"tenants", TenantViewSet, basename="tenant")
+router.register(r"providers", ProviderViewSet, basename="provider")
+router.register(r"provider_groups", ProviderGroupViewSet, basename="providergroup")
+router.register(r"scans", ScanViewSet, basename="scan")
+router.register(r"tasks", TaskViewSet, basename="task")
+router.register(r"resources", ResourceViewSet, basename="resource")
+router.register(r"findings", FindingViewSet, basename="finding")
+router.register(
+    r"compliance-overviews", ComplianceOverviewViewSet, basename="complianceoverview"
+)
+router.register(r"overviews", OverviewViewSet, basename="overview")
+
+tenants_router = routers.NestedSimpleRouter(router, r"tenants", lookup="tenant")
+tenants_router.register(
+    r"memberships", TenantMembersViewSet, basename="tenant-membership"
+)
+
+users_router = routers.NestedSimpleRouter(router, r"users", lookup="user")
+users_router.register(r"memberships", MembershipViewSet, basename="user-membership")
+
+urlpatterns = [
+    path("tokens", CustomTokenObtainView.as_view(), name="token-obtain"),
+    path("tokens/refresh", CustomTokenRefreshView.as_view(), name="token-refresh"),
+    path(
+        "providers/secrets",
+        ProviderSecretViewSet.as_view({"get": "list", "post": "create"}),
+        name="providersecret-list",
+    ),
+    path(
+        "providers/secrets/",
+        ProviderSecretViewSet.as_view(
+            {"get": "retrieve", "patch": "partial_update", "delete": "destroy"}
+        ),
+        name="providersecret-detail",
+    ),
+    path(
+        "tenants/invitations",
+        InvitationViewSet.as_view({"get": "list", "post": "create"}),
+        name="invitation-list",
+    ),
+    path(
+        "tenants/invitations/",
+        InvitationViewSet.as_view(
+            {"get": "retrieve", "patch": "partial_update", "delete": "destroy"}
+        ),
+        name="invitation-detail",
+    ),
+    path(
+        "invitations/accept",
+        InvitationAcceptViewSet.as_view({"post": "accept"}),
+        name="invitation-accept",
+    ),
+    path("", include(router.urls)),
+    path("", include(tenants_router.urls)),
+    path("", include(users_router.urls)),
+    path("schema", SchemaView.as_view(), name="schema"),
+    path("docs", SpectacularRedocView.as_view(url_name="schema"), name="docs"),
+]
diff --git a/api/src/backend/api/v1/views.py b/api/src/backend/api/v1/views.py
new file mode 100644
index 0000000000..70af1069ab
--- /dev/null
+++ b/api/src/backend/api/v1/views.py
@@ -0,0 +1,1384 @@
+from celery.result import AsyncResult
+from django.conf import settings as django_settings
+from django.contrib.postgres.search import SearchQuery
+from django.db import transaction
+from django.db.models import Prefetch, Subquery, OuterRef, Count, Q, F
+from django.urls import reverse
+from django.utils.decorators import method_decorator
+from django.views.decorators.cache import cache_control
+from drf_spectacular.settings import spectacular_settings
+from drf_spectacular.utils import (
+    extend_schema,
+    extend_schema_view,
+    OpenApiParameter,
+    OpenApiResponse,
+    OpenApiTypes,
+)
+from drf_spectacular.views import SpectacularAPIView
+from rest_framework import status, permissions
+from rest_framework.decorators import action
+from rest_framework.exceptions import (
+    MethodNotAllowed,
+    NotFound,
+    PermissionDenied,
+    ValidationError,
+)
+from rest_framework.generics import get_object_or_404, GenericAPIView
+from rest_framework_json_api.views import Response
+from rest_framework_simplejwt.exceptions import InvalidToken
+from rest_framework_simplejwt.exceptions import TokenError
+
+from api.base_views import BaseTenantViewset, BaseRLSViewSet, BaseUserViewset
+from api.db_router import MainRouter
+from api.filters import (
+    ProviderFilter,
+    ProviderGroupFilter,
+    TenantFilter,
+    MembershipFilter,
+    ScanFilter,
+    TaskFilter,
+    ResourceFilter,
+    FindingFilter,
+    ProviderSecretFilter,
+    InvitationFilter,
+    UserFilter,
+    ComplianceOverviewFilter,
+)
+from api.models import (
+    StatusChoices,
+    User,
+    Membership,
+    Provider,
+    ProviderGroup,
+    ProviderGroupMembership,
+    Scan,
+    Task,
+    Resource,
+    Finding,
+    ProviderSecret,
+    Invitation,
+    ComplianceOverview,
+)
+from api.pagination import ComplianceOverviewPagination
+from api.rls import Tenant
+from api.utils import validate_invitation
+from api.uuid_utils import datetime_to_uuid7
+from api.v1.serializers import (
+    TokenSerializer,
+    TokenRefreshSerializer,
+    UserSerializer,
+    UserCreateSerializer,
+    UserUpdateSerializer,
+    MembershipSerializer,
+    ProviderGroupSerializer,
+    ProviderGroupUpdateSerializer,
+    ProviderGroupMembershipUpdateSerializer,
+    ProviderSerializer,
+    ProviderCreateSerializer,
+    ProviderUpdateSerializer,
+    TenantSerializer,
+    TaskSerializer,
+    ScanSerializer,
+    ScanCreateSerializer,
+    ScanUpdateSerializer,
+    ResourceSerializer,
+    FindingSerializer,
+    ProviderSecretSerializer,
+    ProviderSecretUpdateSerializer,
+    ProviderSecretCreateSerializer,
+    InvitationSerializer,
+    InvitationCreateSerializer,
+    InvitationUpdateSerializer,
+    InvitationAcceptSerializer,
+    ComplianceOverviewSerializer,
+    ComplianceOverviewFullSerializer,
+    OverviewProviderSerializer,
+)
+from tasks.beat import schedule_provider_scan
+from tasks.tasks import (
+    check_provider_connection_task,
+    delete_provider_task,
+    perform_scan_task,
+)
+
+CACHE_DECORATOR = cache_control(
+    max_age=django_settings.CACHE_MAX_AGE,
+    stale_while_revalidate=django_settings.CACHE_STALE_WHILE_REVALIDATE,
+)
+
+
+@extend_schema(
+    tags=["Token"],
+    summary="Obtain a token",
+    description="Obtain a token by providing valid credentials and an optional tenant ID.",
+)
+class CustomTokenObtainView(GenericAPIView):
+    resource_name = "tokens"
+    serializer_class = TokenSerializer
+    http_method_names = ["post"]
+
+    def post(self, request):
+        serializer = TokenSerializer(data=request.data)
+
+        try:
+            serializer.is_valid(raise_exception=True)
+        except TokenError as e:
+            raise InvalidToken(e.args[0])
+
+        return Response(
+            data={"type": "tokens", "attributes": serializer.validated_data},
+            status=status.HTTP_200_OK,
+        )
+
+
+@extend_schema(
+    tags=["Token"],
+    summary="Refresh a token",
+    description="Refresh an access token by providing a valid refresh token. Former refresh tokens are invalidated "
+    "when a new one is issued.",
+)
+class CustomTokenRefreshView(GenericAPIView):
+    resource_name = "tokens-refresh"
+    serializer_class = TokenRefreshSerializer
+    http_method_names = ["post"]
+
+    def post(self, request):
+        serializer = TokenRefreshSerializer(data=request.data)
+
+        try:
+            serializer.is_valid(raise_exception=True)
+        except TokenError as e:
+            raise InvalidToken(e.args[0])
+
+        return Response(
+            data={"type": "tokens-refresh", "attributes": serializer.validated_data},
+            status=status.HTTP_200_OK,
+        )
+
+
+@extend_schema(exclude=True)
+class SchemaView(SpectacularAPIView):
+    serializer_class = None
+
+    def get(self, request, *args, **kwargs):
+        spectacular_settings.TITLE = "Prowler API"
+        spectacular_settings.VERSION = "1.0.0"
+        spectacular_settings.DESCRIPTION = (
+            "Prowler API specification.\n\nThis file is auto-generated."
+        )
+        spectacular_settings.TAGS = [
+            {"name": "User", "description": "Endpoints for managing user accounts."},
+            {
+                "name": "Token",
+                "description": "Endpoints for token management, including obtaining a new token and "
+                "refreshing an existing token for authentication purposes.",
+            },
+            {
+                "name": "Tenant",
+                "description": "Endpoints for managing tenants, along with their memberships.",
+            },
+            {
+                "name": "Invitation",
+                "description": "Endpoints for tenant invitations management, allowing retrieval and filtering of "
+                "invitations, creating new invitations, accepting and revoking them.",
+            },
+            {
+                "name": "Provider",
+                "description": "Endpoints for managing providers (AWS, GCP, Azure, etc...).",
+            },
+            {
+                "name": "Provider Group",
+                "description": "Endpoints for managing provider groups.",
+            },
+            {
+                "name": "Scan",
+                "description": "Endpoints for triggering manual scans and viewing scan results.",
+            },
+            {
+                "name": "Resource",
+                "description": "Endpoints for managing resources discovered by scans, allowing "
+                "retrieval and filtering of resource information.",
+            },
+            {
+                "name": "Finding",
+                "description": "Endpoints for managing findings, allowing retrieval and filtering of "
+                "findings that result from scans.",
+            },
+            {
+                "name": "Overview",
+                "description": "Endpoints for retrieving aggregated summaries of resources from the system.",
+            },
+            {
+                "name": "Compliance Overview",
+                "description": "Endpoints for checking the compliance overview, allowing filtering by scan, provider or"
+                " compliance framework ID.",
+            },
+            {
+                "name": "Task",
+                "description": "Endpoints for task management, allowing retrieval of task status and "
+                "revoking tasks that have not started.",
+            },
+        ]
+        return super().get(request, *args, **kwargs)
+
+
+@extend_schema_view(
+    list=extend_schema(
+        tags=["User"],
+        summary="List all users",
+        description="Retrieve a list of all users with options for filtering by various criteria.",
+    ),
+    retrieve=extend_schema(
+        tags=["User"],
+        summary="Retrieve a user's information",
+        description="Fetch detailed information about an authenticated user.",
+    ),
+    create=extend_schema(
+        tags=["User"],
+        summary="Register a new user",
+        description="Create a new user account by providing the necessary registration details.",
+    ),
+    partial_update=extend_schema(
+        tags=["User"],
+        summary="Update user information",
+        description="Partially update information about a user.",
+    ),
+    destroy=extend_schema(
+        tags=["User"],
+        summary="Delete a user account",
+        description="Remove a user account from the system.",
+    ),
+    me=extend_schema(
+        tags=["User"],
+        summary="Retrieve the current user's information",
+        description="Fetch detailed information about the authenticated user.",
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+class UserViewSet(BaseUserViewset):
+    serializer_class = UserSerializer
+    http_method_names = ["get", "post", "patch", "delete"]
+    filterset_class = UserFilter
+    ordering = ["-date_joined"]
+    ordering_fields = ["name", "email", "company_name", "date_joined", "is_active"]
+
+    def get_queryset(self):
+        # If called during schema generation, return an empty queryset
+        if getattr(self, "swagger_fake_view", False):
+            return User.objects.none()
+        return User.objects.filter(membership__tenant__id=self.request.tenant_id)
+
+    def get_permissions(self):
+        if self.action == "create":
+            permission_classes = [permissions.AllowAny]
+        else:
+            permission_classes = self.permission_classes
+        return [permission() for permission in permission_classes]
+
+    def get_serializer_class(self):
+        if self.action == "create":
+            return UserCreateSerializer
+        elif self.action == "partial_update":
+            return UserUpdateSerializer
+        else:
+            return UserSerializer
+
+    @action(detail=False, methods=["get"], url_name="me")
+    def me(self, request):
+        user = self.get_queryset().first()
+        serializer = UserSerializer(user, context=self.get_serializer_context())
+        return Response(
+            data=serializer.data,
+            status=status.HTTP_200_OK,
+        )
+
+    @extend_schema(
+        parameters=[
+            OpenApiParameter(
+                name="invitation_token",
+                description="Optional invitation code for joining an existing tenant.",
+                required=False,
+                type={"type": "string", "example": "F3NMFPNDZHR4Z9"},
+                location=OpenApiParameter.QUERY,
+            ),
+        ]
+    )
+    def create(self, request, *args, **kwargs):
+        invitation_token = request.query_params.get("invitation_token", None)
+        invitation = None
+
+        serializer = self.get_serializer(
+            data=request.data, context=self.get_serializer_context()
+        )
+        serializer.is_valid(raise_exception=True)
+
+        if invitation_token:
+            invitation = validate_invitation(
+                invitation_token, serializer.validated_data["email"]
+            )
+
+        # Proceed with creating the user and membership
+        user = User.objects.db_manager(MainRouter.admin_db).create_user(
+            **serializer.validated_data
+        )
+        tenant = (
+            invitation.tenant
+            if invitation_token
+            else Tenant.objects.using(MainRouter.admin_db).create(
+                name=f"{user.email.split('@')[0]} default tenant"
+            )
+        )
+        role = (
+            Membership.RoleChoices.MEMBER
+            if invitation_token
+            else Membership.RoleChoices.OWNER
+        )
+        Membership.objects.using(MainRouter.admin_db).create(
+            user=user, tenant=tenant, role=role
+        )
+        if invitation:
+            invitation.state = Invitation.State.ACCEPTED
+            invitation.save(using=MainRouter.admin_db)
+        return Response(data=UserSerializer(user).data, status=status.HTTP_201_CREATED)
+
+
+@extend_schema_view(
+    list=extend_schema(
+        tags=["Tenant"],
+        summary="List all tenants",
+        description="Retrieve a list of all tenants with options for filtering by various criteria.",
+    ),
+    retrieve=extend_schema(
+        tags=["Tenant"],
+        summary="Retrieve data from a tenant",
+        description="Fetch detailed information about a specific tenant by their ID.",
+    ),
+    create=extend_schema(
+        tags=["Tenant"],
+        summary="Create a new tenant",
+        description="Add a new tenant to the system by providing the required tenant details.",
+    ),
+    partial_update=extend_schema(
+        tags=["Tenant"],
+        summary="Partially update a tenant",
+        description="Update certain fields of an existing tenant's information without affecting other fields.",
+    ),
+    destroy=extend_schema(
+        tags=["Tenant"],
+        summary="Delete a tenant",
+        description="Remove a tenant from the system by their ID.",
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+@method_decorator(CACHE_DECORATOR, name="retrieve")
+class TenantViewSet(BaseTenantViewset):
+    queryset = Tenant.objects.all()
+    serializer_class = TenantSerializer
+    http_method_names = ["get", "post", "patch", "delete"]
+    filterset_class = TenantFilter
+    search_fields = ["name"]
+    ordering = ["-inserted_at"]
+    ordering_fields = ["name", "inserted_at", "updated_at"]
+
+    def get_queryset(self):
+        return Tenant.objects.all()
+
+    def create(self, request, *args, **kwargs):
+        serializer = self.get_serializer(data=request.data)
+        serializer.is_valid(raise_exception=True)
+        tenant = serializer.save()
+        Membership.objects.create(
+            user=self.request.user, tenant=tenant, role=Membership.RoleChoices.OWNER
+        )
+        return Response(data=serializer.data, status=status.HTTP_201_CREATED)
+
+
+@extend_schema_view(
+    list=extend_schema(
+        tags=["User"],
+        summary="List user memberships",
+        description="Retrieve a list of all user memberships with options for filtering by various criteria.",
+    ),
+    retrieve=extend_schema(
+        tags=["User"],
+        summary="Retrieve membership data from the user",
+        description="Fetch detailed information about a specific user membership by their ID.",
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+class MembershipViewSet(BaseTenantViewset):
+    http_method_names = ["get"]
+    serializer_class = MembershipSerializer
+    queryset = Membership.objects.all()
+    filterset_class = MembershipFilter
+    ordering = ["date_joined"]
+    ordering_fields = [
+        "tenant",
+        "role",
+        "date_joined",
+    ]
+
+    def get_queryset(self):
+        user = self.request.user
+        return Membership.objects.filter(user_id=user.id)
+
+
+@extend_schema_view(
+    list=extend_schema(
+        summary="List tenant memberships",
+        description="List the membership details of users in a tenant you are a part of.",
+        tags=["Tenant"],
+        parameters=[
+            OpenApiParameter(
+                name="tenant_pk",
+                type=OpenApiTypes.UUID,
+                location=OpenApiParameter.PATH,
+                description="Tenant ID",
+            ),
+        ],
+    ),
+    destroy=extend_schema(
+        summary="Delete tenant memberships",
+        description="Delete the membership details of users in a tenant. You need to be one of the owners to delete a "
+        "membership that is not yours. If you are the last owner of a tenant, you cannot delete your own "
+        "membership.",
+        tags=["Tenant"],
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+class TenantMembersViewSet(BaseTenantViewset):
+    http_method_names = ["get", "delete"]
+    serializer_class = MembershipSerializer
+    queryset = Membership.objects.none()
+
+    def get_queryset(self):
+        tenant = self.get_tenant()
+        requesting_membership = self.get_requesting_membership(tenant)
+
+        if requesting_membership.role == Membership.RoleChoices.OWNER:
+            return Membership.objects.filter(tenant=tenant)
+        else:
+            return Membership.objects.filter(tenant=tenant, user=self.request.user)
+
+    def get_tenant(self):
+        tenant_id = self.kwargs.get("tenant_pk")
+        tenant = get_object_or_404(Tenant, id=tenant_id)
+        return tenant
+
+    def get_requesting_membership(self, tenant):
+        try:
+            membership = Membership.objects.get(user=self.request.user, tenant=tenant)
+        except Membership.DoesNotExist:
+            raise NotFound("Membership does not exist.")
+        return membership
+
+    @extend_schema(exclude=True)
+    def retrieve(self, request, *args, **kwargs):
+        raise MethodNotAllowed(method="GET")
+
+    def destroy(self, request, *args, **kwargs):
+        tenant = self.get_tenant()
+        membership_to_delete = get_object_or_404(
+            Membership, tenant=tenant, id=kwargs.get("pk")
+        )
+        requesting_membership = self.get_requesting_membership(tenant)
+
+        if requesting_membership.role == Membership.RoleChoices.OWNER:
+            if membership_to_delete.user == request.user:
+                # Check if the user is the last owner
+                other_owners = Membership.objects.filter(
+                    tenant=tenant, role=Membership.RoleChoices.OWNER
+                ).exclude(user=request.user)
+                if not other_owners.exists():
+                    raise PermissionDenied(
+                        "You cannot delete your own membership as the last owner."
+                    )
+        else:
+            if membership_to_delete.user != request.user:
+                raise PermissionDenied(
+                    "You do not have permission to delete this membership."
+                )
+
+        membership_to_delete.delete()
+        return Response(status=status.HTTP_204_NO_CONTENT)
+
+
+@extend_schema(tags=["Provider Group"])
+@extend_schema_view(
+    list=extend_schema(
+        summary="List all provider groups",
+        description="Retrieve a list of all provider groups with options for filtering by various criteria.",
+    ),
+    retrieve=extend_schema(
+        summary="Retrieve data from a provider group",
+        description="Fetch detailed information about a specific provider group by their ID.",
+    ),
+    create=extend_schema(
+        summary="Create a new provider group",
+        description="Add a new provider group to the system by providing the required provider group details.",
+    ),
+    partial_update=extend_schema(
+        summary="Partially update a provider group",
+        description="Update certain fields of an existing provider group's information without affecting other fields.",
+        request=ProviderGroupUpdateSerializer,
+        responses={200: ProviderGroupSerializer},
+    ),
+    destroy=extend_schema(
+        summary="Delete a provider group",
+        description="Remove a provider group from the system by their ID.",
+    ),
+    update=extend_schema(exclude=True),
+)
+class ProviderGroupViewSet(BaseRLSViewSet):
+    queryset = ProviderGroup.objects.all()
+    serializer_class = ProviderGroupSerializer
+    filterset_class = ProviderGroupFilter
+    http_method_names = ["get", "post", "patch", "put", "delete"]
+    ordering = ["inserted_at"]
+
+    def get_queryset(self):
+        return ProviderGroup.objects.prefetch_related("providers")
+
+    def get_serializer_class(self):
+        if self.action == "partial_update":
+            return ProviderGroupUpdateSerializer
+        elif self.action == "providers":
+            if hasattr(self, "response_serializer_class"):
+                return self.response_serializer_class
+            return ProviderGroupMembershipUpdateSerializer
+        return super().get_serializer_class()
+
+    @extend_schema(
+        tags=["Provider Group"],
+        summary="Add providers to a provider group",
+        description="Add one or more providers to an existing provider group.",
+        request=ProviderGroupMembershipUpdateSerializer,
+        responses={200: OpenApiResponse(response=ProviderGroupSerializer)},
+    )
+    @action(detail=True, methods=["put"], url_name="providers")
+    def providers(self, request, pk=None):
+        provider_group = self.get_object()
+
+        # Validate input data
+        serializer = self.get_serializer_class()(
+            data=request.data,
+            context=self.get_serializer_context(),
+        )
+        serializer.is_valid(raise_exception=True)
+
+        provider_ids = serializer.validated_data["provider_ids"]
+
+        # Update memberships
+        ProviderGroupMembership.objects.filter(
+            provider_group=provider_group, tenant_id=request.tenant_id
+        ).delete()
+
+        provider_group_memberships = [
+            ProviderGroupMembership(
+                tenant_id=self.request.tenant_id,
+                provider_group=provider_group,
+                provider_id=provider_id,
+            )
+            for provider_id in provider_ids
+        ]
+
+        ProviderGroupMembership.objects.bulk_create(
+            provider_group_memberships, ignore_conflicts=True
+        )
+
+        # Return the updated provider group with providers
+        provider_group.refresh_from_db()
+        self.response_serializer_class = ProviderGroupSerializer
+        response_serializer = ProviderGroupSerializer(
+            provider_group, context=self.get_serializer_context()
+        )
+        return Response(data=response_serializer.data, status=status.HTTP_200_OK)
+
+
+@extend_schema_view(
+    list=extend_schema(
+        tags=["Provider"],
+        summary="List all providers",
+        description="Retrieve a list of all providers with options for filtering by various criteria.",
+    ),
+    retrieve=extend_schema(
+        tags=["Provider"],
+        summary="Retrieve data from a provider",
+        description="Fetch detailed information about a specific provider by their ID.",
+    ),
+    create=extend_schema(
+        tags=["Provider"],
+        summary="Create a new provider",
+        description="Add a new provider to the system by providing the required provider details.",
+    ),
+    partial_update=extend_schema(
+        tags=["Provider"],
+        summary="Partially update a provider",
+        description="Update certain fields of an existing provider's information without affecting other fields.",
+        request=ProviderUpdateSerializer,
+        responses={200: ProviderSerializer},
+    ),
+    destroy=extend_schema(
+        tags=["Provider"],
+        summary="Delete a provider",
+        description="Remove a provider from the system by their ID.",
+        responses={202: OpenApiResponse(response=TaskSerializer)},
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+@method_decorator(CACHE_DECORATOR, name="retrieve")
+class ProviderViewSet(BaseRLSViewSet):
+    queryset = Provider.objects.all()
+    serializer_class = ProviderSerializer
+    http_method_names = ["get", "post", "patch", "delete"]
+    filterset_class = ProviderFilter
+    search_fields = ["provider", "uid", "alias"]
+    ordering = ["-inserted_at"]
+    ordering_fields = [
+        "provider",
+        "uid",
+        "alias",
+        "connected",
+        "inserted_at",
+        "updated_at",
+    ]
+
+    def get_queryset(self):
+        return Provider.objects.all()
+
+    def get_serializer_class(self):
+        if self.action == "create":
+            return ProviderCreateSerializer
+        elif self.action == "partial_update":
+            return ProviderUpdateSerializer
+        elif self.action in ["connection", "destroy"]:
+            return TaskSerializer
+        return super().get_serializer_class()
+
+    def partial_update(self, request, *args, **kwargs):
+        instance = self.get_object()
+        serializer = self.get_serializer(
+            instance,
+            data=request.data,
+            partial=True,
+            context=self.get_serializer_context(),
+        )
+        serializer.is_valid(raise_exception=True)
+        serializer.save()
+        read_serializer = ProviderSerializer(
+            instance, context=self.get_serializer_context()
+        )
+        return Response(data=read_serializer.data, status=status.HTTP_200_OK)
+
+    @extend_schema(
+        tags=["Provider"],
+        summary="Check connection",
+        description="Try to verify connection. For instance, Role & Credentials are set correctly",
+        request=None,
+        responses={202: OpenApiResponse(response=TaskSerializer)},
+    )
+    @action(detail=True, methods=["post"], url_name="connection")
+    def connection(self, request, pk=None):
+        get_object_or_404(Provider, pk=pk)
+        with transaction.atomic():
+            task = check_provider_connection_task.delay(
+                provider_id=pk, tenant_id=request.tenant_id
+            )
+        prowler_task = Task.objects.get(id=task.id)
+        serializer = TaskSerializer(prowler_task)
+        return Response(
+            data=serializer.data,
+            status=status.HTTP_202_ACCEPTED,
+            headers={
+                "Content-Location": reverse(
+                    "task-detail", kwargs={"pk": prowler_task.id}
+                )
+            },
+        )
+
+    def destroy(self, request, *args, pk=None, **kwargs):
+        get_object_or_404(Provider, pk=pk)
+        with transaction.atomic():
+            task = delete_provider_task.delay(
+                provider_id=pk, tenant_id=request.tenant_id
+            )
+        prowler_task = Task.objects.get(id=task.id)
+        serializer = TaskSerializer(prowler_task)
+        return Response(
+            data=serializer.data,
+            status=status.HTTP_202_ACCEPTED,
+            headers={
+                "Content-Location": reverse(
+                    "task-detail", kwargs={"pk": prowler_task.id}
+                )
+            },
+        )
+
+    def create(self, request, *args, **kwargs):
+        serializer = self.get_serializer(data=request.data)
+        serializer.is_valid(raise_exception=True)
+        provider = serializer.save()
+        # Schedule a daily scan for the new provider
+        schedule_provider_scan(provider)
+        return Response(data=serializer.data, status=status.HTTP_201_CREATED)
+
+
+@extend_schema_view(
+    list=extend_schema(
+        tags=["Scan"],
+        summary="List all scans",
+        description="Retrieve a list of all scans with options for filtering by various criteria.",
+    ),
+    retrieve=extend_schema(
+        tags=["Scan"],
+        summary="Retrieve data from a specific scan",
+        description="Fetch detailed information about a specific scan by its ID.",
+    ),
+    partial_update=extend_schema(
+        tags=["Scan"],
+        summary="Partially update a scan",
+        description="Update certain fields of an existing scan without affecting other fields.",
+    ),
+    create=extend_schema(
+        tags=["Scan"],
+        summary="Trigger a manual scan",
+        description=(
+            "Trigger a manual scan by providing the required scan details. "
+            "If `scanner_args` are not provided, the system will automatically use the default settings "
+            "from the associated provider. If you do provide `scanner_args`, these settings will be "
+            "merged with the provider's defaults. This means that your provided settings will override "
+            "the defaults only where they conflict, while the rest of the default settings will remain intact."
+        ),
+        request=ScanCreateSerializer,
+        responses={202: OpenApiResponse(response=TaskSerializer)},
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+@method_decorator(CACHE_DECORATOR, name="retrieve")
+class ScanViewSet(BaseRLSViewSet):
+    queryset = Scan.objects.all()
+    serializer_class = ScanSerializer
+    http_method_names = ["get", "post", "patch"]
+    filterset_class = ScanFilter
+    ordering = ["-inserted_at"]
+    ordering_fields = [
+        "name",
+        "trigger",
+        "attempted_at",
+        "scheduled_at",
+        "inserted_at",
+        "updated_at",
+    ]
+
+    def get_queryset(self):
+        return Scan.objects.all()
+
+    def get_serializer_class(self):
+        if self.action == "create":
+            if hasattr(self, "response_serializer_class"):
+                return self.response_serializer_class
+            return ScanCreateSerializer
+        elif self.action == "partial_update":
+            return ScanUpdateSerializer
+        return super().get_serializer_class()
+
+    def partial_update(self, request, *args, **kwargs):
+        instance = self.get_object()
+        serializer = self.get_serializer(
+            instance,
+            data=request.data,
+            partial=True,
+            context=self.get_serializer_context(),
+        )
+        serializer.is_valid(raise_exception=True)
+        serializer.save()
+        read_serializer = ScanSerializer(
+            instance, context=self.get_serializer_context()
+        )
+        return Response(data=read_serializer.data, status=status.HTTP_200_OK)
+
+    def create(self, request, *args, **kwargs):
+        input_serializer = self.get_serializer(data=request.data)
+        input_serializer.is_valid(raise_exception=True)
+        with transaction.atomic():
+            scan = input_serializer.save()
+        with transaction.atomic():
+            task = perform_scan_task.delay(
+                tenant_id=request.tenant_id,
+                scan_id=str(scan.id),
+                provider_id=str(scan.provider_id),
+                # Disabled for now
+                # checks_to_execute=scan.scanner_args.get("checks_to_execute"),
+            )
+
+        scan.task_id = task.id
+        scan.save(update_fields=["task_id"])
+
+        prowler_task = Task.objects.get(id=task.id)
+        self.response_serializer_class = TaskSerializer
+        output_serializer = self.get_serializer(prowler_task)
+
+        return Response(
+            data=output_serializer.data,
+            status=status.HTTP_202_ACCEPTED,
+            headers={
+                "Content-Location": reverse(
+                    "task-detail", kwargs={"pk": prowler_task.id}
+                )
+            },
+        )
+
+
+@extend_schema_view(
+    list=extend_schema(
+        tags=["Task"],
+        summary="List all tasks",
+        description="Retrieve a list of all tasks with options for filtering by name, state, and other criteria.",
+    ),
+    retrieve=extend_schema(
+        tags=["Task"],
+        summary="Retrieve data from a specific task",
+        description="Fetch detailed information about a specific task by its ID.",
+    ),
+    destroy=extend_schema(
+        tags=["Task"],
+        summary="Revoke a task",
+        description="Try to revoke a task using its ID. Only tasks that are not yet in progress can be revoked.",
+        responses={202: OpenApiResponse(response=TaskSerializer)},
+    ),
+)
+class TaskViewSet(BaseRLSViewSet):
+    queryset = Task.objects.all()
+    serializer_class = TaskSerializer
+    http_method_names = ["get", "delete"]
+    filterset_class = TaskFilter
+    search_fields = ["name"]
+    ordering = ["-inserted_at"]
+    ordering_fields = ["inserted_at", "completed_at", "name", "state"]
+
+    def get_queryset(self):
+        return Task.objects.annotate(
+            name=F("task_runner_task__task_name"), state=F("task_runner_task__status")
+        )
+
+    def destroy(self, request, *args, pk=None, **kwargs):
+        task = get_object_or_404(Task, pk=pk)
+        if task.task_runner_task.status not in ["PENDING", "RECEIVED"]:
+            serializer = TaskSerializer(task)
+            return Response(
+                data={
+                    "detail": f"Task cannot be revoked. Status: '{serializer.data.get('state')}'"
+                },
+                status=status.HTTP_400_BAD_REQUEST,
+                headers={
+                    "Content-Location": reverse("task-detail", kwargs={"pk": task.id})
+                },
+            )
+
+        task_instance = AsyncResult(pk)
+        task_instance.revoke()
+        task.refresh_from_db()
+        serializer = TaskSerializer(task)
+        return Response(
+            data=serializer.data,
+            status=status.HTTP_202_ACCEPTED,
+            headers={
+                "Content-Location": reverse("task-detail", kwargs={"pk": task.id})
+            },
+        )
+
+
+@extend_schema_view(
+    list=extend_schema(
+        tags=["Resource"],
+        summary="List all resources",
+        description="Retrieve a list of all resources with options for filtering by various criteria. Resources are "
+        "objects that are discovered by Prowler. They can be anything from a single host to a whole VPC.",
+    ),
+    retrieve=extend_schema(
+        tags=["Resource"],
+        summary="Retrieve data for a resource",
+        description="Fetch detailed information about a specific resource by their ID. A Resource is an object that "
+        "is discovered by Prowler. It can be anything from a single host to a whole VPC.",
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+@method_decorator(CACHE_DECORATOR, name="retrieve")
+class ResourceViewSet(BaseRLSViewSet):
+    queryset = Resource.objects.all()
+    serializer_class = ResourceSerializer
+    http_method_names = ["get"]
+    filterset_class = ResourceFilter
+    ordering = ["-inserted_at"]
+    ordering_fields = [
+        "provider_uid",
+        "uid",
+        "name",
+        "region",
+        "service",
+        "type",
+        "inserted_at",
+        "updated_at",
+    ]
+
+    def get_queryset(self):
+        queryset = Resource.objects.all()
+        search_value = self.request.query_params.get("filter[search]", None)
+
+        if search_value:
+            # Django's ORM will build a LEFT JOIN and OUTER JOIN on the "through" table, resulting in duplicates
+            # The duplicates then require a `distinct` query
+            search_query = SearchQuery(
+                search_value, config="simple", search_type="plain"
+            )
+            queryset = queryset.filter(
+                Q(tags__key=search_value)
+                | Q(tags__value=search_value)
+                | Q(tags__text_search=search_query)
+                | Q(tags__key__contains=search_value)
+                | Q(tags__value__contains=search_value)
+                | Q(uid=search_value)
+                | Q(name=search_value)
+                | Q(region=search_value)
+                | Q(service=search_value)
+                | Q(type=search_value)
+                | Q(text_search=search_query)
+                | Q(uid__contains=search_value)
+                | Q(name__contains=search_value)
+                | Q(region__contains=search_value)
+                | Q(service__contains=search_value)
+                | Q(type__contains=search_value)
+            ).distinct()
+
+        return queryset
+
+
+@extend_schema_view(
+    list=extend_schema(
+        tags=["Finding"],
+        summary="List all findings",
+        description="Retrieve a list of all findings with options for filtering by various criteria.",
+    ),
+    retrieve=extend_schema(
+        tags=["Finding"],
+        summary="Retrieve data from a specific finding",
+        description="Fetch detailed information about a specific finding by its ID.",
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+@method_decorator(CACHE_DECORATOR, name="retrieve")
+class FindingViewSet(BaseRLSViewSet):
+    queryset = Finding.objects.all()
+    serializer_class = FindingSerializer
+    prefetch_for_includes = {
+        "__all__": [],
+        "resources": [
+            Prefetch("resources", queryset=Resource.objects.select_related("findings"))
+        ],
+        "scan": [Prefetch("scan", queryset=Scan.objects.select_related("findings"))],
+    }
+    http_method_names = ["get"]
+    filterset_class = FindingFilter
+    ordering = ["-id"]
+    ordering_fields = [
+        "id",
+        "status",
+        "severity",
+        "check_id",
+        "inserted_at",
+        "updated_at",
+    ]
+
+    def inserted_at_to_uuidv7(self, inserted_at):
+        if inserted_at is None:
+            return None
+        return datetime_to_uuid7(inserted_at)
+
+    def get_queryset(self):
+        queryset = Finding.objects.all()
+        search_value = self.request.query_params.get("filter[search]", None)
+
+        if search_value:
+            # Django's ORM will build a LEFT JOIN and OUTER JOIN on any "through" tables, resulting in duplicates
+            # The duplicates then require a `distinct` query
+            search_query = SearchQuery(
+                search_value, config="simple", search_type="plain"
+            )
+            queryset = queryset.filter(
+                Q(impact_extended__contains=search_value)
+                | Q(status_extended__contains=search_value)
+                | Q(check_id=search_value)
+                | Q(check_id__icontains=search_value)
+                | Q(text_search=search_query)
+                | Q(resources__uid=search_value)
+                | Q(resources__name=search_value)
+                | Q(resources__region=search_value)
+                | Q(resources__service=search_value)
+                | Q(resources__type=search_value)
+                | Q(resources__uid__contains=search_value)
+                | Q(resources__name__contains=search_value)
+                | Q(resources__region__contains=search_value)
+                | Q(resources__service__contains=search_value)
+                | Q(resources__tags__text_search=search_query)
+                | Q(resources__text_search=search_query)
+            ).distinct()
+
+        return queryset
+
+
+@extend_schema_view(
+    list=extend_schema(
+        tags=["Provider"],
+        summary="List all secrets",
+        description="Retrieve a list of all secrets with options for filtering by various criteria.",
+    ),
+    retrieve=extend_schema(
+        tags=["Provider"],
+        summary="Retrieve data from a secret",
+        description="Fetch detailed information about a specific secret by their ID.",
+    ),
+    create=extend_schema(
+        tags=["Provider"],
+        summary="Create a new secret",
+        description="Add a new secret to the system by providing the required secret details.",
+    ),
+    partial_update=extend_schema(
+        tags=["Provider"],
+        summary="Partially update a secret",
+        description="Update certain fields of an existing secret's information without affecting other fields.",
+    ),
+    destroy=extend_schema(
+        tags=["Provider"],
+        summary="Delete a secret",
+        description="Remove a secret from the system by their ID.",
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+@method_decorator(CACHE_DECORATOR, name="retrieve")
+class ProviderSecretViewSet(BaseRLSViewSet):
+    queryset = ProviderSecret.objects.all()
+    serializer_class = ProviderSecretSerializer
+    filterset_class = ProviderSecretFilter
+    http_method_names = ["get", "post", "patch", "delete"]
+    search_fields = ["name"]
+    ordering = ["-inserted_at"]
+    ordering_fields = [
+        "name",
+        "inserted_at",
+        "updated_at",
+    ]
+
+    def get_queryset(self):
+        return ProviderSecret.objects.all()
+
+    def get_serializer_class(self):
+        if self.action == "create":
+            return ProviderSecretCreateSerializer
+        elif self.action == "partial_update":
+            return ProviderSecretUpdateSerializer
+        return super().get_serializer_class()
+
+
+@extend_schema_view(
+    list=extend_schema(
+        tags=["Invitation"],
+        summary="List all invitations",
+        description="Retrieve a list of all tenant invitations with options for filtering by various criteria.",
+    ),
+    retrieve=extend_schema(
+        tags=["Invitation"],
+        summary="Retrieve data from a tenant invitation",
+        description="Fetch detailed information about a specific invitation by its ID.",
+    ),
+    create=extend_schema(
+        tags=["Invitation"],
+        summary="Invite a user to a tenant",
+        description="Add a new tenant invitation to the system by providing the required invitation details. The "
+        "invited user will have to accept the invitations or create an account using the given code.",
+    ),
+    partial_update=extend_schema(
+        tags=["Invitation"],
+        summary="Partially update a tenant invitation",
+        description="Update certain fields of an existing tenant invitation's information without affecting other "
+        "fields.",
+    ),
+    destroy=extend_schema(
+        tags=["Invitation"],
+        summary="Revoke a tenant invitation",
+        description="Revoke a tenant invitation from the system by their ID.",
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+@method_decorator(CACHE_DECORATOR, name="retrieve")
+class InvitationViewSet(BaseRLSViewSet):
+    queryset = Invitation.objects.all()
+    serializer_class = InvitationSerializer
+    filterset_class = InvitationFilter
+    http_method_names = ["get", "post", "patch", "delete"]
+    search_fields = ["email"]
+    ordering = ["-inserted_at"]
+    ordering_fields = [
+        "inserted_at",
+        "updated_at",
+        "expires_at",
+        "state",
+        "inviter",
+    ]
+
+    def get_queryset(self):
+        return Invitation.objects.all()
+
+    def get_serializer_class(self):
+        if self.action == "create":
+            return InvitationCreateSerializer
+        elif self.action == "partial_update":
+            return InvitationUpdateSerializer
+        return super().get_serializer_class()
+
+    def create(self, request, *args, **kwargs):
+        serializer = self.get_serializer(
+            data=request.data,
+            context={"tenant_id": self.request.tenant_id, "request": request},
+        )
+        serializer.is_valid(raise_exception=True)
+        serializer.save()
+        return Response(data=serializer.data, status=status.HTTP_201_CREATED)
+
+    def partial_update(self, request, *args, **kwargs):
+        instance = self.get_object()
+        if instance.state != Invitation.State.PENDING:
+            raise ValidationError(detail="This invitation cannot be updated.")
+        serializer = self.get_serializer(
+            instance,
+            data=request.data,
+            partial=True,
+            context={"tenant_id": self.request.tenant_id, "request": request},
+        )
+        serializer.is_valid(raise_exception=True)
+        serializer.save()
+        return Response(data=serializer.data, status=status.HTTP_200_OK)
+
+    def destroy(self, request, *args, **kwargs):
+        instance = self.get_object()
+        if instance.state != Invitation.State.PENDING:
+            raise ValidationError(detail="This invitation cannot be revoked.")
+        instance.state = Invitation.State.REVOKED
+        instance.save()
+        return Response(status=status.HTTP_204_NO_CONTENT)
+
+
+class InvitationAcceptViewSet(BaseRLSViewSet):
+    queryset = Invitation.objects.all()
+    serializer_class = InvitationAcceptSerializer
+    http_method_names = ["post"]
+
+    def get_queryset(self):
+        return Invitation.objects.all()
+
+    def get_serializer_class(self):
+        if hasattr(self, "response_serializer_class"):
+            return self.response_serializer_class
+        return InvitationAcceptSerializer
+
+    @extend_schema(exclude=True)
+    def create(self, request, *args, **kwargs):
+        raise MethodNotAllowed(method="POST")
+
+    @extend_schema(
+        tags=["Invitation"],
+        summary="Accept an invitation",
+        description="Accept an invitation to an existing tenant. This invitation cannot be expired and the emails must "
+        "match.",
+        responses={201: OpenApiResponse(response=MembershipSerializer)},
+    )
+    @action(detail=False, methods=["post"], url_name="accept")
+    def accept(self, request):
+        serializer = self.get_serializer(
+            data=request.data,
+            context=self.get_serializer_context(),
+        )
+        serializer.is_valid(raise_exception=True)
+        invitation_token = serializer.validated_data["invitation_token"]
+        user_email = request.user.email
+
+        invitation = validate_invitation(
+            invitation_token, user_email, raise_not_found=True
+        )
+
+        # Proceed with accepting the invitation
+        user = User.objects.using(MainRouter.admin_db).get(email=user_email)
+        membership = Membership.objects.using(MainRouter.admin_db).create(
+            user=user,
+            tenant=invitation.tenant,
+        )
+        invitation.state = Invitation.State.ACCEPTED
+        invitation.save(using=MainRouter.admin_db)
+
+        self.response_serializer_class = MembershipSerializer
+        membership_serializer = self.get_serializer(membership)
+        return Response(data=membership_serializer.data, status=status.HTTP_201_CREATED)
+
+
+@extend_schema_view(
+    list=extend_schema(
+        tags=["Compliance Overview"],
+        summary="List compliance overviews for a scan",
+        description="Retrieve an overview of all the compliance in a given scan. If no region filters are provided, the"
+        " region with the most fails will be returned by default.",
+        parameters=[
+            OpenApiParameter(
+                name="filter[scan_id]",
+                required=True,
+                type=OpenApiTypes.UUID,
+                location=OpenApiParameter.QUERY,
+                description="Related scan ID.",
+            ),
+        ],
+    ),
+    retrieve=extend_schema(
+        tags=["Compliance Overview"],
+        summary="Retrieve data from a specific compliance overview",
+        description="Fetch detailed information about a specific compliance overview by its ID, including detailed "
+        "requirement information and check's status.",
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+@method_decorator(CACHE_DECORATOR, name="retrieve")
+class ComplianceOverviewViewSet(BaseRLSViewSet):
+    pagination_class = ComplianceOverviewPagination
+    queryset = ComplianceOverview.objects.all()
+    serializer_class = ComplianceOverviewSerializer
+    filterset_class = ComplianceOverviewFilter
+    http_method_names = ["get"]
+    search_fields = ["compliance_id"]
+    ordering = ["compliance_id"]
+    ordering_fields = ["inserted_at", "compliance_id", "framework", "region"]
+
+    def get_queryset(self):
+        if self.action == "retrieve":
+            return ComplianceOverview.objects.all()
+
+        base_queryset = self.filter_queryset(ComplianceOverview.objects.all())
+
+        max_failed_ids = (
+            base_queryset.filter(compliance_id=OuterRef("compliance_id"))
+            .order_by("-requirements_failed")
+            .values("id")[:1]
+        )
+
+        queryset = base_queryset.filter(id__in=Subquery(max_failed_ids)).order_by(
+            "compliance_id"
+        )
+
+        return queryset
+
+    def get_serializer_class(self):
+        if self.action == "retrieve":
+            return ComplianceOverviewFullSerializer
+        return super().get_serializer_class()
+
+    def list(self, request, *args, **kwargs):
+        if not request.query_params.get("filter[scan_id]"):
+            raise ValidationError(
+                [
+                    {
+                        "detail": "This query parameter is required.",
+                        "status": 400,
+                        "source": {"pointer": "filter[scan_id]"},
+                        "code": "required",
+                    }
+                ]
+            )
+        return super().list(request, *args, **kwargs)
+
+
+@extend_schema(tags=["Overview"])
+@extend_schema_view(
+    providers=extend_schema(
+        summary="List aggregated overview data for providers",
+        description="Fetch aggregated summaries of the latest findings and resources for each provider. "
+        "This includes counts of passed, failed, and manual findings, as well as the total number "
+        "of resources managed by each provider.",
+    ),
+)
+@method_decorator(CACHE_DECORATOR, name="list")
+class OverviewViewSet(BaseRLSViewSet):
+    queryset = ComplianceOverview.objects.all()
+    http_method_names = ["get"]
+    ordering = ["compliance_id"]
+
+    def get_queryset(self):
+        return Finding.objects.all()
+
+    def get_serializer_class(self):
+        if self.action == "providers":
+            return OverviewProviderSerializer
+        return super().get_serializer_class()
+
+    @extend_schema(exclude=True)
+    def list(self, request, *args, **kwargs):
+        raise MethodNotAllowed(method="GET")
+
+    @extend_schema(exclude=True)
+    def retrieve(self, request, *args, **kwargs):
+        raise MethodNotAllowed(method="GET")
+
+    @action(detail=False, methods=["get"], url_name="providers")
+    def providers(self, request):
+        # Subquery to get the most recent finding for each uid
+        latest_finding_ids = (
+            Finding.objects.filter(
+                uid=OuterRef("uid"), scan__provider=OuterRef("scan__provider")
+            )
+            .order_by("-id")  # Most recent by id
+            .values("id")[:1]
+        )
+
+        # Filter findings to only include the most recent for each uid
+        recent_findings = Finding.objects.filter(id__in=Subquery(latest_finding_ids))
+
+        # Aggregate findings by provider
+        findings_aggregated = (
+            recent_findings.values("scan__provider__provider")
+            .annotate(
+                findings_passed=Count("id", filter=Q(status=StatusChoices.PASS.value)),
+                findings_failed=Count("id", filter=Q(status=StatusChoices.FAIL.value)),
+                findings_manual=Count(
+                    "id", filter=Q(status=StatusChoices.MANUAL.value)
+                ),
+                total_findings=Count("id"),
+            )
+            .order_by("-findings_failed")
+        )
+
+        # Aggregate total resources by provider
+        resources_aggregated = Resource.objects.values("provider__provider").annotate(
+            total_resources=Count("id")
+        )
+
+        # Combine findings and resources data
+        overview = []
+        for findings in findings_aggregated:
+            provider = findings["scan__provider__provider"]
+            total_resources = next(
+                (
+                    res["total_resources"]
+                    for res in resources_aggregated
+                    if res["provider__provider"] == provider
+                ),
+                0,  # Default to 0 if no resources are found
+            )
+            overview.append(
+                {
+                    "provider": provider,
+                    "total_resources": total_resources,
+                    "total_findings": findings["total_findings"],
+                    "findings_passed": findings["findings_passed"],
+                    "findings_failed": findings["findings_failed"],
+                    "findings_manual": findings["findings_manual"],
+                }
+            )
+
+        serializer = OverviewProviderSerializer(overview, many=True)
+
+        return Response(serializer.data, status=status.HTTP_200_OK)
diff --git a/api/src/backend/api/validators.py b/api/src/backend/api/validators.py
new file mode 100644
index 0000000000..135a0fd6c0
--- /dev/null
+++ b/api/src/backend/api/validators.py
@@ -0,0 +1,22 @@
+from django.core.exceptions import ValidationError
+from django.utils.translation import gettext as _
+
+
+class MaximumLengthValidator:
+    def __init__(self, max_length=72):
+        self.max_length = max_length
+
+    def validate(self, password, user=None):
+        if len(password) > self.max_length:
+            raise ValidationError(
+                _(
+                    "This password is too long. It must contain no more than %(max_length)d characters."
+                ),
+                code="password_too_long",
+                params={"max_length": self.max_length},
+            )
+
+    def get_help_text(self):
+        return _(
+            f"Your password must contain no more than {self.max_length} characters."
+        )
diff --git a/api/src/backend/config/__init__.py b/api/src/backend/config/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/src/backend/config/asgi.py b/api/src/backend/config/asgi.py
new file mode 100644
index 0000000000..cfc064ba97
--- /dev/null
+++ b/api/src/backend/config/asgi.py
@@ -0,0 +1,16 @@
+"""
+ASGI config for backend project.
+
+It exposes the ASGI callable as a module-level variable named ``application``.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/5.0/howto/deployment/asgi/
+"""
+
+import os
+
+from django.core.asgi import get_asgi_application
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.django.production")
+
+application = get_asgi_application()
diff --git a/api/src/backend/config/celery.py b/api/src/backend/config/celery.py
new file mode 100644
index 0000000000..d94fb1aa1e
--- /dev/null
+++ b/api/src/backend/config/celery.py
@@ -0,0 +1,46 @@
+from celery import Celery, Task
+
+celery_app = Celery("tasks")
+
+celery_app.config_from_object("django.conf:settings", namespace="CELERY")
+celery_app.conf.update(result_extended=True)
+
+celery_app.autodiscover_tasks(["api"])
+
+
+class RLSTask(Task):
+    def apply_async(
+        self,
+        args=None,
+        kwargs=None,
+        task_id=None,
+        producer=None,
+        link=None,
+        link_error=None,
+        shadow=None,
+        **options,
+    ):
+        from api.models import Task as APITask
+        from django_celery_results.models import TaskResult
+
+        result = super().apply_async(
+            args=args,
+            kwargs=kwargs,
+            task_id=task_id,
+            producer=producer,
+            link=link,
+            link_error=link_error,
+            shadow=shadow,
+            **options,
+        )
+        task_result_instance = TaskResult.objects.get(task_id=result.task_id)
+        from api.db_utils import tenant_transaction
+
+        tenant_id = kwargs.get("tenant_id")
+        with tenant_transaction(tenant_id):
+            APITask.objects.create(
+                id=task_result_instance.task_id,
+                tenant_id=tenant_id,
+                task_runner_task=task_result_instance,
+            )
+        return result
diff --git a/api/src/backend/config/custom_logging.py b/api/src/backend/config/custom_logging.py
new file mode 100644
index 0000000000..04ef403b1d
--- /dev/null
+++ b/api/src/backend/config/custom_logging.py
@@ -0,0 +1,230 @@
+import json
+import logging
+from enum import StrEnum
+
+from django_guid.log_filters import CorrelationId
+
+from config.env import env
+
+
+class BackendLogger(StrEnum):
+    GUNICORN = "gunicorn"
+    GUNICORN_ACCESS = "gunicorn.access"
+    GUNICORN_ERROR = "gunicorn.error"
+    DJANGO = "django"
+    SECURITY = "django.security"
+    DB = "django.db"
+    API = "api"
+    TASKS = "tasks"
+
+
+# Formatters
+
+
+class NDJSONFormatter(logging.Formatter):
+    """NDJSON custom formatter for logging messages.
+
+    If available, it will include all kind of API request metadata.
+    """
+
+    def format(self, record):
+        log_record = {
+            "timestamp": self.formatTime(record, self.datefmt),
+            "level": record.levelname,
+            "message": record.getMessage(),
+            "logger": record.name,
+            "module": record.module,
+            "pathname": record.pathname,
+            "lineno": record.lineno,
+            "funcName": record.funcName,
+            "process": record.process,
+            "thread": record.thread,
+            "transaction_id": record.transaction_id
+            if hasattr(record, "transaction_id")
+            else None,
+        }
+
+        # Add REST API extra fields
+        if hasattr(record, "user_id"):
+            log_record["user_id"] = record.user_id
+        if hasattr(record, "tenant_id"):
+            log_record["tenant_id"] = record.tenant_id
+        if hasattr(record, "method"):
+            log_record["method"] = record.method
+        if hasattr(record, "path"):
+            log_record["path"] = record.path
+        if hasattr(record, "query_params"):
+            log_record["query_params"] = record.query_params
+        if hasattr(record, "duration"):
+            log_record["duration"] = record.duration
+        if hasattr(record, "status_code"):
+            log_record["status_code"] = record.status_code
+
+        if record.exc_info:
+            log_record["exc_info"] = self.formatException(record.exc_info)
+
+        return json.dumps(log_record)
+
+
+class HumanReadableFormatter(logging.Formatter):
+    """Human-readable custom formatter for logging messages.
+
+    If available, it will include all kinds of API request metadata.
+    """
+
+    def format(self, record):
+        log_components = [
+            f"{self.formatTime(record, self.datefmt)}",
+            f"[{record.name}]",
+            f"{record.levelname}:",
+            f"({record.module})",
+            f"[module={record.module}",
+            f"path={record.pathname}",
+            f"line={record.lineno}",
+            f"function={record.funcName}",
+            f"process={record.process}",
+            f"thread={record.thread}",
+            f"transaction-id={record.transaction_id if hasattr(record, 'transaction_id') else None}]",
+            f"{record.getMessage()}",
+        ]
+
+        # Add REST API extra fields
+        if hasattr(record, "user_id"):
+            log_components.append(f"({record.user_id})")
+        if hasattr(record, "tenant_id"):
+            log_components.append(f"[{record.tenant_id}]")
+        if hasattr(record, "method"):
+            log_components.append(f'"{record.method} {record.path}"')
+        if hasattr(record, "query_params"):
+            log_components.append(f"with parameters {record.query_params}")
+        if hasattr(record, "duration"):
+            log_components.append(f"done in {record.duration}s:")
+        if hasattr(record, "status_code"):
+            log_components.append(f"{record.status_code}")
+
+        if record.exc_info:
+            log_components.append(self.formatException(record.exc_info))
+
+        return " ".join(log_components)
+
+
+# Filters
+
+
+class TransactionIdFilter(CorrelationId):
+    """Logging filter class.
+
+    Used to override the `correlation_id_field` parameter in the parent class to use a different name.
+    """
+
+    CORRELATION_ID_FIELD = "transaction_id"
+
+    def __init__(self):
+        super().__init__(correlation_id_field=self.CORRELATION_ID_FIELD)
+
+
+# Logging settings
+
+LEVEL = env("DJANGO_LOGGING_LEVEL", default="INFO")
+FORMATTER = env("DJANGO_LOGGING_FORMATTER", default="ndjson")
+
+LOGGING = {
+    "version": 1,
+    "disable_existing_loggers": True,
+    "filters": {"transaction_id": {"()": TransactionIdFilter}},
+    "formatters": {
+        "ndjson": {
+            "()": NDJSONFormatter,
+            "datefmt": "%Y-%m-%d %H:%M:%S",
+        },
+        "human_readable": {
+            "()": HumanReadableFormatter,
+            "datefmt": "%Y-%m-%d %H:%M:%S",
+        },
+    },
+    "handlers": {
+        "gunicorn_console": {
+            "level": LEVEL,
+            "class": "logging.StreamHandler",
+            "formatter": FORMATTER,
+            "filters": ["transaction_id"],
+        },
+        "django_console": {
+            "level": LEVEL,
+            "class": "logging.StreamHandler",
+            "formatter": FORMATTER,
+            "filters": ["transaction_id"],
+        },
+        "api_console": {
+            "level": LEVEL,
+            "class": "logging.StreamHandler",
+            "formatter": FORMATTER,
+            "filters": ["transaction_id"],
+        },
+        "db_console": {
+            "level": f"{'DEBUG' if LEVEL == 'DEBUG' else 'INFO'}",
+            "class": "logging.StreamHandler",
+            "formatter": FORMATTER,
+            "filters": ["transaction_id"],
+        },
+        "security_console": {
+            "level": LEVEL,
+            "class": "logging.StreamHandler",
+            "formatter": FORMATTER,
+            "filters": ["transaction_id"],
+        },
+        "tasks_console": {
+            "level": LEVEL,
+            "class": "logging.StreamHandler",
+            "formatter": FORMATTER,
+            "filters": ["transaction_id"],
+        },
+    },
+    "loggers": {
+        BackendLogger.GUNICORN: {
+            "handlers": ["gunicorn_console"],
+            "level": LEVEL,
+            "propagate": False,
+        },
+        BackendLogger.GUNICORN_ACCESS: {
+            "handlers": ["gunicorn_console"],
+            "level": "CRITICAL",
+            "propagate": False,
+        },
+        BackendLogger.GUNICORN_ERROR: {
+            "handlers": ["gunicorn_console"],
+            "level": LEVEL,
+            "propagate": False,
+        },
+        BackendLogger.DJANGO: {
+            "handlers": ["django_console"],
+            "level": "WARNING",
+            "propagate": True,
+        },
+        BackendLogger.DB: {
+            "handlers": ["db_console"],
+            "level": LEVEL,
+            "propagate": False,
+        },
+        BackendLogger.SECURITY: {
+            "handlers": ["security_console"],
+            "level": LEVEL,
+            "propagate": False,
+        },
+        BackendLogger.API: {
+            "handlers": ["api_console"],
+            "level": LEVEL,
+            "propagate": False,
+        },
+        BackendLogger.TASKS: {
+            "handlers": ["tasks_console"],
+            "level": LEVEL,
+            "propagate": False,
+        },
+    },
+    # Gunicorn required configuration
+    "root": {
+        "level": "ERROR",
+        "handlers": ["gunicorn_console"],
+    },
+}
diff --git a/api/src/backend/config/django/__init__.py b/api/src/backend/config/django/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/src/backend/config/django/base.py b/api/src/backend/config/django/base.py
new file mode 100644
index 0000000000..f78cc94ab8
--- /dev/null
+++ b/api/src/backend/config/django/base.py
@@ -0,0 +1,209 @@
+from datetime import timedelta
+
+from config.custom_logging import LOGGING  # noqa
+from config.env import BASE_DIR, env  # noqa
+from config.settings.celery import *  # noqa
+from config.settings.partitions import *  # noqa
+
+SECRET_KEY = env("SECRET_KEY", default="secret")
+DEBUG = env.bool("DJANGO_DEBUG", default=False)
+ALLOWED_HOSTS = ["localhost", "127.0.0.1"]
+
+# Application definition
+
+INSTALLED_APPS = [
+    "django.contrib.admin",
+    "django.contrib.auth",
+    "django.contrib.contenttypes",
+    "django.contrib.sessions",
+    "django.contrib.messages",
+    "django.contrib.staticfiles",
+    "django.contrib.postgres",
+    "psqlextra",
+    "api",
+    "rest_framework",
+    "corsheaders",
+    "drf_spectacular",
+    "django_guid",
+    "rest_framework_json_api",
+    "django_celery_results",
+    "django_celery_beat",
+    "rest_framework_simplejwt.token_blacklist",
+]
+
+MIDDLEWARE = [
+    "django_guid.middleware.guid_middleware",
+    "django.middleware.security.SecurityMiddleware",
+    "django.contrib.sessions.middleware.SessionMiddleware",
+    "corsheaders.middleware.CorsMiddleware",
+    "django.middleware.common.CommonMiddleware",
+    "django.middleware.csrf.CsrfViewMiddleware",
+    "django.contrib.auth.middleware.AuthenticationMiddleware",
+    "django.contrib.messages.middleware.MessageMiddleware",
+    "django.middleware.clickjacking.XFrameOptionsMiddleware",
+    "api.middleware.APILoggingMiddleware",
+]
+
+CORS_ALLOWED_ORIGINS = ["http://localhost", "http://127.0.0.1"]
+
+ROOT_URLCONF = "config.urls"
+
+TEMPLATES = [
+    {
+        "BACKEND": "django.template.backends.django.DjangoTemplates",
+        "DIRS": [],
+        "APP_DIRS": True,
+        "OPTIONS": {
+            "context_processors": [
+                "django.template.context_processors.debug",
+                "django.template.context_processors.request",
+                "django.contrib.auth.context_processors.auth",
+                "django.contrib.messages.context_processors.messages",
+            ],
+        },
+    },
+]
+
+REST_FRAMEWORK = {
+    "DEFAULT_SCHEMA_CLASS": "drf_spectacular_jsonapi.schemas.openapi.JsonApiAutoSchema",
+    "DEFAULT_AUTHENTICATION_CLASSES": (
+        "rest_framework_simplejwt.authentication.JWTAuthentication",
+    ),
+    "PAGE_SIZE": 10,
+    "EXCEPTION_HANDLER": "api.exceptions.custom_exception_handler",
+    "DEFAULT_PAGINATION_CLASS": "drf_spectacular_jsonapi.schemas.pagination.JsonApiPageNumberPagination",
+    "DEFAULT_PARSER_CLASSES": (
+        "rest_framework_json_api.parsers.JSONParser",
+        "rest_framework.parsers.FormParser",
+        "rest_framework.parsers.MultiPartParser",
+    ),
+    "DEFAULT_RENDERER_CLASSES": ("api.renderers.APIJSONRenderer",),
+    "DEFAULT_METADATA_CLASS": "rest_framework_json_api.metadata.JSONAPIMetadata",
+    "DEFAULT_FILTER_BACKENDS": (
+        "rest_framework_json_api.filters.QueryParameterValidationFilter",
+        "rest_framework_json_api.filters.OrderingFilter",
+        "rest_framework_json_api.django_filters.backends.DjangoFilterBackend",
+        "rest_framework.filters.SearchFilter",
+    ),
+    "SEARCH_PARAM": "filter[search]",
+    "TEST_REQUEST_RENDERER_CLASSES": (
+        "rest_framework_json_api.renderers.JSONRenderer",
+    ),
+    "TEST_REQUEST_DEFAULT_FORMAT": "vnd.api+json",
+    "JSON_API_UNIFORM_EXCEPTIONS": True,
+}
+
+SPECTACULAR_SETTINGS = {
+    "SERVE_INCLUDE_SCHEMA": False,
+    "COMPONENT_SPLIT_REQUEST": True,
+    "PREPROCESSING_HOOKS": [
+        "drf_spectacular_jsonapi.hooks.fix_nested_path_parameters",
+    ],
+}
+
+WSGI_APPLICATION = "config.wsgi.application"
+
+DJANGO_GUID = {
+    "GUID_HEADER_NAME": "Transaction-ID",
+    "VALIDATE_GUID": True,
+    "RETURN_HEADER": True,
+    "EXPOSE_HEADER": True,
+    "INTEGRATIONS": [],
+    "IGNORE_URLS": [],
+    "UUID_LENGTH": 32,
+}
+
+DATABASE_ROUTERS = ["api.db_router.MainRouter"]
+
+
+# Password validation
+# https://docs.djangoproject.com/en/5.0/ref/settings/#auth-password-validators
+
+AUTH_USER_MODEL = "api.User"
+
+AUTH_PASSWORD_VALIDATORS = [
+    {
+        "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
+    },
+    {
+        "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
+        "OPTIONS": {"min_length": 12},
+    },
+    {
+        "NAME": "api.validators.MaximumLengthValidator",
+        "OPTIONS": {
+            "max_length": 72,
+        },
+    },
+    {
+        "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
+    },
+    {
+        "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
+    },
+]
+
+SIMPLE_JWT = {
+    # Token lifetime settings
+    "ACCESS_TOKEN_LIFETIME": timedelta(
+        minutes=env.int("DJANGO_ACCESS_TOKEN_LIFETIME", 30)
+    ),
+    "REFRESH_TOKEN_LIFETIME": timedelta(
+        minutes=env.int("DJANGO_REFRESH_TOKEN_LIFETIME", 60 * 24)
+    ),
+    "ROTATE_REFRESH_TOKENS": True,
+    "BLACKLIST_AFTER_ROTATION": True,
+    # Algorithm and keys
+    "ALGORITHM": "RS256",
+    "SIGNING_KEY": env.str("DJANGO_TOKEN_SIGNING_KEY", "").replace("\\n", "\n"),
+    "VERIFYING_KEY": env.str("DJANGO_TOKEN_VERIFYING_KEY", "").replace("\\n", "\n"),
+    # Authorization header configuration
+    "AUTH_HEADER_TYPES": ("Bearer",),
+    "AUTH_HEADER_NAME": "HTTP_AUTHORIZATION",
+    # Custom serializers
+    "TOKEN_OBTAIN_SERIALIZER": "api.serializers.TokenSerializer",
+    "TOKEN_REFRESH_SERIALIZER": "api.serializers.TokenRefreshSerializer",
+    # Standard JWT claims
+    "TOKEN_TYPE_CLAIM": "typ",
+    "JTI_CLAIM": "jti",
+    "USER_ID_FIELD": "id",
+    "USER_ID_CLAIM": "sub",
+    # Issuer and Audience claims, for the moment we will keep these values as default values, they may change in the future.
+    "AUDIENCE": env.str("DJANGO_JWT_AUDIENCE", "https://api.prowler.com"),
+    "ISSUER": env.str("DJANGO_JWT_ISSUER", "https://api.prowler.com"),
+    # Additional security settings
+    "UPDATE_LAST_LOGIN": True,
+}
+
+SECRETS_ENCRYPTION_KEY = env.str("DJANGO_SECRETS_ENCRYPTION_KEY", "")
+
+# Internationalization
+# https://docs.djangoproject.com/en/5.0/topics/i18n/
+
+LANGUAGE_CODE = "en-us"
+LANGUAGES = [
+    ("en", "English"),
+]
+
+TIME_ZONE = "UTC"
+
+USE_I18N = True
+
+USE_TZ = True
+
+# Static files (CSS, JavaScript, Images)
+# https://docs.djangoproject.com/en/5.0/howto/static-files/
+
+STATIC_URL = "static/"
+
+# Default primary key field type
+# https://docs.djangoproject.com/en/5.0/ref/settings/#default-auto-field
+
+DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
+
+# Cache settings
+CACHE_MAX_AGE = env.int("DJANGO_CACHE_MAX_AGE", 3600)
+CACHE_STALE_WHILE_REVALIDATE = env.int("DJANGO_STALE_WHILE_REVALIDATE", 60)
+
+
+TESTING = False
diff --git a/api/src/backend/config/django/devel.py b/api/src/backend/config/django/devel.py
new file mode 100644
index 0000000000..573d913478
--- /dev/null
+++ b/api/src/backend/config/django/devel.py
@@ -0,0 +1,40 @@
+from config.django.base import *  # noqa
+from config.env import env
+
+
+DEBUG = env.bool("DJANGO_DEBUG", default=True)
+ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["*"])
+
+# Database
+DATABASES = {
+    "prowler_user": {
+        "ENGINE": "psqlextra.backend",
+        "NAME": env("POSTGRES_DB", default="prowler_db"),
+        "USER": env("POSTGRES_USER", default="prowler_user"),
+        "PASSWORD": env("POSTGRES_PASSWORD", default="prowler"),
+        "HOST": env("POSTGRES_HOST", default="postgres-db"),
+        "PORT": env("POSTGRES_PORT", default="5432"),
+    },
+    "admin": {
+        "ENGINE": "psqlextra.backend",
+        "NAME": env("POSTGRES_DB", default="prowler_db"),
+        "USER": env("POSTGRES_ADMIN_USER", default="prowler"),
+        "PASSWORD": env("POSTGRES_ADMIN_PASSWORD", default="S3cret"),
+        "HOST": env("POSTGRES_HOST", default="postgres-db"),
+        "PORT": env("POSTGRES_PORT", default="5432"),
+    },
+}
+DATABASES["default"] = DATABASES["prowler_user"]
+
+REST_FRAMEWORK["DEFAULT_RENDERER_CLASSES"] = tuple(  # noqa: F405
+    render_class
+    for render_class in REST_FRAMEWORK["DEFAULT_RENDERER_CLASSES"]  # noqa: F405
+) + ("rest_framework_json_api.renderers.BrowsableAPIRenderer",)
+
+REST_FRAMEWORK["DEFAULT_FILTER_BACKENDS"] = tuple(  # noqa: F405
+    filter_backend
+    for filter_backend in REST_FRAMEWORK["DEFAULT_FILTER_BACKENDS"]  # noqa: F405
+    if "DjangoFilterBackend" not in filter_backend
+) + ("api.filters.CustomDjangoFilterBackend",)
+
+SECRETS_ENCRYPTION_KEY = "ZMiYVo7m4Fbe2eXXPyrwxdJss2WSalXSv3xHBcJkPl0="
diff --git a/api/src/backend/config/django/production.py b/api/src/backend/config/django/production.py
new file mode 100644
index 0000000000..99e9bc69bc
--- /dev/null
+++ b/api/src/backend/config/django/production.py
@@ -0,0 +1,28 @@
+from config.django.base import *  # noqa
+from config.env import env
+
+
+DEBUG = env.bool("DJANGO_DEBUG", default=False)
+ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
+
+# Database
+# TODO Use Django database routers https://docs.djangoproject.com/en/5.0/topics/db/multi-db/#automatic-database-routing
+DATABASES = {
+    "prowler_user": {
+        "ENGINE": "django.db.backends.postgresql",
+        "NAME": env("POSTGRES_DB"),
+        "USER": env("POSTGRES_USER"),
+        "PASSWORD": env("POSTGRES_PASSWORD"),
+        "HOST": env("POSTGRES_HOST"),
+        "PORT": env("POSTGRES_PORT"),
+    },
+    "admin": {
+        "ENGINE": "psqlextra.backend",
+        "NAME": env("POSTGRES_DB"),
+        "USER": env("POSTGRES_ADMIN_USER"),
+        "PASSWORD": env("POSTGRES_ADMIN_PASSWORD"),
+        "HOST": env("POSTGRES_HOST"),
+        "PORT": env("POSTGRES_PORT"),
+    },
+}
+DATABASES["default"] = DATABASES["prowler_user"]
diff --git a/api/src/backend/config/django/testing.py b/api/src/backend/config/django/testing.py
new file mode 100644
index 0000000000..c62f0fc3a9
--- /dev/null
+++ b/api/src/backend/config/django/testing.py
@@ -0,0 +1,26 @@
+from config.django.base import *  # noqa
+from config.env import env
+
+
+DEBUG = env.bool("DJANGO_DEBUG", default=False)
+ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
+
+
+DATABASES = {
+    "default": {
+        "ENGINE": "psqlextra.backend",
+        "NAME": "prowler_db_test",
+        "USER": env("POSTGRES_USER", default="prowler"),
+        "PASSWORD": env("POSTGRES_PASSWORD", default="S3cret"),
+        "HOST": env("POSTGRES_HOST", default="localhost"),
+        "PORT": env("POSTGRES_PORT", default="5432"),
+    },
+}
+
+DATABASE_ROUTERS = []
+TESTING = True
+SECRETS_ENCRYPTION_KEY = "ZMiYVo7m4Fbe2eXXPyrwxdJss2WSalXSv3xHBcJkPl0="
+
+# JWT
+
+SIMPLE_JWT["ALGORITHM"] = "HS256"  # noqa: F405
diff --git a/api/src/backend/config/env.py b/api/src/backend/config/env.py
new file mode 100644
index 0000000000..55ec0b9718
--- /dev/null
+++ b/api/src/backend/config/env.py
@@ -0,0 +1,7 @@
+from pathlib import Path
+
+import environ
+
+env = environ.Env()
+
+BASE_DIR = Path(__file__).resolve().parent.parent
diff --git a/api/src/backend/config/guniconf.py b/api/src/backend/config/guniconf.py
new file mode 100644
index 0000000000..a5b625874b
--- /dev/null
+++ b/api/src/backend/config/guniconf.py
@@ -0,0 +1,43 @@
+import logging
+import multiprocessing
+import os
+
+from config.env import env
+
+# Ensure the environment variable for Django settings is set
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.django.production")
+
+# Import Django and set it up before accessing settings
+import django  # noqa: E402
+
+django.setup()
+from config.django.production import LOGGING as DJANGO_LOGGERS, DEBUG  # noqa: E402
+from config.custom_logging import BackendLogger  # noqa: E402
+
+BIND_ADDRESS = env("DJANGO_BIND_ADDRESS", default="127.0.0.1")
+PORT = env("DJANGO_PORT", default=8000)
+
+# Server settings
+bind = f"{BIND_ADDRESS}:{PORT}"
+
+workers = env.int("DJANGO_WORKERS", default=multiprocessing.cpu_count() * 2 + 1)
+reload = DEBUG
+
+# Logging
+logconfig_dict = DJANGO_LOGGERS
+gunicorn_logger = logging.getLogger(BackendLogger.GUNICORN)
+
+
+# Hooks
+def on_starting(_):
+    gunicorn_logger.info(f"Starting gunicorn server with {workers} workers")
+    if reload:
+        gunicorn_logger.warning("Reload settings enabled (dev mode)")
+
+
+def on_reload(_):
+    gunicorn_logger.warning("Gunicorn server has reloaded")
+
+
+def when_ready(_):
+    gunicorn_logger.info("Gunicorn server is ready")
diff --git a/api/src/backend/config/settings/__init__.py b/api/src/backend/config/settings/__init__.py
new file mode 100644
index 0000000000..3d8ce919d4
--- /dev/null
+++ b/api/src/backend/config/settings/__init__.py
@@ -0,0 +1,3 @@
+from config.celery import celery_app
+
+__all__ = ("celery_app",)
diff --git a/api/src/backend/config/settings/celery.py b/api/src/backend/config/settings/celery.py
new file mode 100644
index 0000000000..c7e55140ea
--- /dev/null
+++ b/api/src/backend/config/settings/celery.py
@@ -0,0 +1,11 @@
+from config.env import env
+
+VALKEY_HOST = env("VALKEY_HOST", default="valkey")
+VALKEY_PORT = env("VALKEY_PORT", default="6379")
+VALKEY_DB = env("VALKEY_DB", default="0")
+
+CELERY_BROKER_URL = f"redis://{VALKEY_HOST}:{VALKEY_PORT}/{VALKEY_DB}"
+CELERY_RESULT_BACKEND = "django-db"
+CELERY_TASK_TRACK_STARTED = True
+
+CELERY_BROKER_CONNECTION_RETRY_ON_STARTUP = True
diff --git a/api/src/backend/config/settings/partitions.py b/api/src/backend/config/settings/partitions.py
new file mode 100644
index 0000000000..caceee70c3
--- /dev/null
+++ b/api/src/backend/config/settings/partitions.py
@@ -0,0 +1,16 @@
+from config.env import env
+
+# Partitioning
+PSQLEXTRA_PARTITIONING_MANAGER = "api.partitions.manager"
+
+# Set the months for each partition. Setting the partition months to 1 will create partitions with a size of 1 natural month.
+FINDINGS_TABLE_PARTITION_MONTHS = env.int("FINDINGS_TABLE_PARTITION_MONTHS", 1)
+
+# Set the number of partitions to create
+FINDINGS_TABLE_PARTITION_COUNT = env.int("FINDINGS_TABLE_PARTITION_COUNT", 7)
+
+# Set the number of months to keep partitions before deleting them
+# Setting this to None will keep partitions indefinitely
+FINDINGS_TABLE_PARTITION_MAX_AGE_MONTHS = env.int(
+    "FINDINGS_TABLE_PARTITION_MAX_AGE_MONTHS", None
+)
diff --git a/api/src/backend/config/urls.py b/api/src/backend/config/urls.py
new file mode 100644
index 0000000000..195329d171
--- /dev/null
+++ b/api/src/backend/config/urls.py
@@ -0,0 +1,7 @@
+from django.contrib import admin
+from django.urls import include, path
+
+urlpatterns = [
+    path("admin/", admin.site.urls),
+    path("api/v1/", include("api.v1.urls")),
+]
diff --git a/api/src/backend/config/wsgi.py b/api/src/backend/config/wsgi.py
new file mode 100644
index 0000000000..6c770033c4
--- /dev/null
+++ b/api/src/backend/config/wsgi.py
@@ -0,0 +1,16 @@
+"""
+WSGI config for backend project.
+
+It exposes the WSGI callable as a module-level variable named ``application``.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/5.0/howto/deployment/wsgi/
+"""
+
+import os
+
+from django.core.wsgi import get_wsgi_application
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.django.production")
+
+application = get_wsgi_application()
diff --git a/api/src/backend/conftest.py b/api/src/backend/conftest.py
new file mode 100644
index 0000000000..f69a7a6b93
--- /dev/null
+++ b/api/src/backend/conftest.py
@@ -0,0 +1,546 @@
+import logging
+
+import pytest
+from django.conf import settings
+from datetime import datetime, timezone, timedelta
+from django.db import connections as django_connections, connection as django_connection
+from django.urls import reverse
+from django_celery_results.models import TaskResult
+from prowler.lib.check.models import Severity
+from prowler.lib.outputs.finding import Status
+from rest_framework import status
+from rest_framework.test import APIClient
+
+from api.models import (
+    Finding,
+)
+from api.models import (
+    User,
+    Provider,
+    ProviderGroup,
+    Resource,
+    ResourceTag,
+    Scan,
+    StateChoices,
+    Task,
+    Membership,
+    ProviderSecret,
+    Invitation,
+    ComplianceOverview,
+)
+from api.rls import Tenant
+from api.v1.serializers import TokenSerializer
+
+API_JSON_CONTENT_TYPE = "application/vnd.api+json"
+NO_TENANT_HTTP_STATUS = status.HTTP_401_UNAUTHORIZED
+TEST_USER = "dev@prowler.com"
+TEST_PASSWORD = "testing_psswd"
+
+
+@pytest.fixture(scope="module")
+def enforce_test_user_db_connection(django_db_setup, django_db_blocker):
+    """Ensure tests use the test user for database connections."""
+    test_user = "test"
+    test_password = "test"
+
+    with django_db_blocker.unblock():
+        with django_connection.cursor() as cursor:
+            # Required for testing purposes using APIClient
+            cursor.execute(f"GRANT ALL PRIVILEGES ON django_session TO {test_user};")
+
+        original_user = settings.DATABASES["default"]["USER"]
+        original_password = settings.DATABASES["default"]["PASSWORD"]
+
+        django_connections["default"].settings_dict["USER"] = test_user
+        django_connections["default"].settings_dict["PASSWORD"] = test_password
+
+        django_connections["default"].close()
+        django_connections["default"].connect()
+
+    yield
+
+    with django_db_blocker.unblock():
+        django_connections["default"].settings_dict["USER"] = original_user
+        django_connections["default"].settings_dict["PASSWORD"] = original_password
+
+        django_connections["default"].close()
+        django_connections["default"].connect()
+
+
+@pytest.fixture(autouse=True)
+def disable_logging():
+    logging.disable(logging.CRITICAL)
+
+
+@pytest.fixture(scope="session", autouse=True)
+def create_test_user(django_db_setup, django_db_blocker):
+    with django_db_blocker.unblock():
+        user = User.objects.create_user(
+            name="testing",
+            email=TEST_USER,
+            password=TEST_PASSWORD,
+        )
+    return user
+
+
+@pytest.fixture
+def authenticated_client(create_test_user, tenants_fixture, client):
+    client.user = create_test_user
+    serializer = TokenSerializer(
+        data={"type": "tokens", "email": TEST_USER, "password": TEST_PASSWORD}
+    )
+    serializer.is_valid()
+    access_token = serializer.validated_data["access"]
+    client.defaults["HTTP_AUTHORIZATION"] = f"Bearer {access_token}"
+    return client
+
+
+@pytest.fixture
+def authenticated_api_client(create_test_user, tenants_fixture):
+    client = APIClient()
+    serializer = TokenSerializer(
+        data={"type": "tokens", "email": TEST_USER, "password": TEST_PASSWORD}
+    )
+    serializer.is_valid()
+    access_token = serializer.validated_data["access"]
+    client.defaults["HTTP_AUTHORIZATION"] = f"Bearer {access_token}"
+    return client
+
+
+@pytest.fixture
+def tenants_fixture(create_test_user):
+    user = create_test_user
+    tenant1 = Tenant.objects.create(
+        name="Tenant One",
+    )
+    Membership.objects.create(
+        user=user,
+        tenant=tenant1,
+    )
+    tenant2 = Tenant.objects.create(
+        name="Tenant Two",
+    )
+    Membership.objects.create(
+        user=user,
+        tenant=tenant2,
+        role=Membership.RoleChoices.OWNER,
+    )
+    tenant3 = Tenant.objects.create(
+        name="Tenant Three",
+    )
+    return tenant1, tenant2, tenant3
+
+
+@pytest.fixture
+def invitations_fixture(create_test_user, tenants_fixture):
+    user = create_test_user
+    *_, tenant = tenants_fixture
+    valid_invitation = Invitation.objects.create(
+        email="testing@prowler.com",
+        state=Invitation.State.PENDING,
+        token="TESTING1234567",
+        inviter=user,
+        tenant=tenant,
+    )
+    expired_invitation = Invitation.objects.create(
+        email="testing@prowler.com",
+        state=Invitation.State.EXPIRED,
+        token="TESTING1234568",
+        expires_at=datetime.now(timezone.utc) - timedelta(days=1),
+        inviter=user,
+        tenant=tenant,
+    )
+    return valid_invitation, expired_invitation
+
+
+@pytest.fixture
+def providers_fixture(tenants_fixture):
+    tenant, *_ = tenants_fixture
+    provider1 = Provider.objects.create(
+        provider="aws",
+        uid="123456789012",
+        alias="aws_testing_1",
+        tenant_id=tenant.id,
+    )
+    provider2 = Provider.objects.create(
+        provider="aws",
+        uid="123456789013",
+        alias="aws_testing_2",
+        tenant_id=tenant.id,
+    )
+    provider3 = Provider.objects.create(
+        provider="gcp",
+        uid="a12322-test321",
+        alias="gcp_testing",
+        tenant_id=tenant.id,
+    )
+    provider4 = Provider.objects.create(
+        provider="kubernetes",
+        uid="kubernetes-test-12345",
+        alias="k8s_testing",
+        tenant_id=tenant.id,
+    )
+    provider5 = Provider.objects.create(
+        provider="azure",
+        uid="37b065f8-26b0-4218-a665-0b23d07b27d9",
+        alias="azure_testing",
+        tenant_id=tenant.id,
+        scanner_args={"key1": "value1", "key2": {"key21": "value21"}},
+    )
+
+    return provider1, provider2, provider3, provider4, provider5
+
+
+@pytest.fixture
+def provider_groups_fixture(tenants_fixture):
+    tenant, *_ = tenants_fixture
+    pgroup1 = ProviderGroup.objects.create(
+        name="Group One",
+        tenant_id=tenant.id,
+    )
+    pgroup2 = ProviderGroup.objects.create(
+        name="Group Two",
+        tenant_id=tenant.id,
+    )
+    pgroup3 = ProviderGroup.objects.create(
+        name="Group Three",
+        tenant_id=tenant.id,
+    )
+
+    return pgroup1, pgroup2, pgroup3
+
+
+@pytest.fixture
+def provider_secret_fixture(providers_fixture):
+    return tuple(
+        ProviderSecret.objects.create(
+            tenant_id=provider.tenant_id,
+            provider=provider,
+            secret_type=ProviderSecret.TypeChoices.STATIC,
+            secret={"key": "value"},
+            name=provider.alias,
+        )
+        for provider in providers_fixture
+    )
+
+
+@pytest.fixture
+def scans_fixture(tenants_fixture, providers_fixture):
+    tenant, *_ = tenants_fixture
+    provider, provider2, *_ = providers_fixture
+
+    scan1 = Scan.objects.create(
+        name="Scan 1",
+        provider=provider,
+        trigger=Scan.TriggerChoices.MANUAL,
+        state=StateChoices.AVAILABLE,
+        tenant_id=tenant.id,
+        started_at="2024-01-02T00:00:00Z",
+    )
+    scan2 = Scan.objects.create(
+        name="Scan 2",
+        provider=provider,
+        trigger=Scan.TriggerChoices.SCHEDULED,
+        state=StateChoices.FAILED,
+        tenant_id=tenant.id,
+        started_at="2024-01-02T00:00:00Z",
+    )
+    scan3 = Scan.objects.create(
+        name="Scan 3",
+        provider=provider2,
+        trigger=Scan.TriggerChoices.SCHEDULED,
+        state=StateChoices.AVAILABLE,
+        tenant_id=tenant.id,
+        started_at="2024-01-02T00:00:00Z",
+    )
+    return scan1, scan2, scan3
+
+
+@pytest.fixture
+def tasks_fixture(tenants_fixture):
+    tenant, *_ = tenants_fixture
+
+    task_runner_task1 = TaskResult.objects.create(
+        task_id="81a1b34b-ff6e-498e-979c-d6a83260167f",
+        task_name="task_runner_task1",
+        task_kwargs='{"kwarg1": "value1"}',
+        status="SUCCESS",
+    )
+    task_runner_task2 = TaskResult.objects.create(
+        task_id="4d0260a5-2e1f-4a34-a976-8c5acb9f5499",
+        task_name="task_runner_task1",
+        status="PENDING",
+    )
+    task1 = Task.objects.create(
+        id=task_runner_task1.task_id,
+        task_runner_task=task_runner_task1,
+        tenant_id=tenant.id,
+    )
+    task2 = Task.objects.create(
+        id=task_runner_task2.task_id,
+        task_runner_task=task_runner_task2,
+        tenant_id=tenant.id,
+    )
+
+    return task1, task2
+
+
+@pytest.fixture
+def resources_fixture(providers_fixture):
+    provider, *_ = providers_fixture
+
+    tags = [
+        ResourceTag.objects.create(
+            tenant_id=provider.tenant_id,
+            key="key",
+            value="value",
+        ),
+        ResourceTag.objects.create(
+            tenant_id=provider.tenant_id,
+            key="key2",
+            value="value2",
+        ),
+    ]
+
+    resource1 = Resource.objects.create(
+        tenant_id=provider.tenant_id,
+        provider=provider,
+        uid="arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef0",
+        name="My Instance 1",
+        region="us-east-1",
+        service="ec2",
+        type="prowler-test",
+    )
+
+    resource1.upsert_or_delete_tags(tags)
+
+    resource2 = Resource.objects.create(
+        tenant_id=provider.tenant_id,
+        provider=provider,
+        uid="arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef1",
+        name="My Instance 2",
+        region="eu-west-1",
+        service="s3",
+        type="prowler-test",
+    )
+    resource2.upsert_or_delete_tags(tags)
+
+    resource3 = Resource.objects.create(
+        tenant_id=providers_fixture[1].tenant_id,
+        provider=providers_fixture[1],
+        uid="arn:aws:ec2:us-east-1:123456789012:bucket/i-1234567890abcdef2",
+        name="My Bucket 3",
+        region="us-east-1",
+        service="ec2",
+        type="test",
+    )
+
+    tags = [
+        ResourceTag.objects.create(
+            tenant_id=provider.tenant_id,
+            key="key3",
+            value="multi word value3",
+        ),
+    ]
+    resource3.upsert_or_delete_tags(tags)
+
+    return resource1, resource2, resource3
+
+
+@pytest.fixture
+def findings_fixture(scans_fixture, resources_fixture):
+    scan, *_ = scans_fixture
+    resource1, resource2, *_ = resources_fixture
+
+    finding1 = Finding.objects.create(
+        tenant_id=scan.tenant_id,
+        uid="test_finding_uid_1",
+        scan=scan,
+        delta=None,
+        status=Status.FAIL,
+        status_extended="test status extended ",
+        impact=Severity.critical,
+        impact_extended="test impact extended one",
+        severity=Severity.critical,
+        raw_result={
+            "status": Status.FAIL,
+            "impact": Severity.critical,
+            "severity": Severity.critical,
+        },
+        tags={"test": "dev-qa"},
+        check_id="test_check_id",
+        check_metadata={
+            "CheckId": "test_check_id",
+            "Description": "test description apple sauce",
+        },
+    )
+
+    finding1.add_resources([resource1])
+
+    finding2 = Finding.objects.create(
+        tenant_id=scan.tenant_id,
+        uid="test_finding_uid_2",
+        scan=scan,
+        delta="new",
+        status=Status.FAIL,
+        status_extended="Load Balancer exposed to internet",
+        impact=Severity.medium,
+        impact_extended="test impact extended two",
+        severity=Severity.medium,
+        raw_result={
+            "status": Status.FAIL,
+            "impact": Severity.medium,
+            "severity": Severity.medium,
+        },
+        tags={"test": "test"},
+        check_id="test_check_id",
+        check_metadata={
+            "CheckId": "test_check_id",
+            "Description": "test description orange juice",
+        },
+    )
+
+    finding2.add_resources([resource2])
+
+    return finding1, finding2
+
+
+@pytest.fixture
+def compliance_overviews_fixture(scans_fixture, tenants_fixture):
+    tenant = tenants_fixture[0]
+    scan1, scan2, scan3 = scans_fixture
+
+    compliance_overview1 = ComplianceOverview.objects.create(
+        tenant=tenant,
+        scan=scan1,
+        compliance_id="aws_account_security_onboarding_aws",
+        framework="AWS-Account-Security-Onboarding",
+        version="1.0",
+        description="Description for AWS Account Security Onboarding",
+        region="eu-west-1",
+        requirements={
+            "requirement1": {
+                "name": "Requirement 1",
+                "checks": {"check1.1": "PASS", "check1.2": None},
+                "status": "PASS",
+                "attributes": [],
+                "description": "Description of requirement 1",
+                "checks_status": {
+                    "total": 2,
+                    "failed": 0,
+                    "passed": 2,
+                },
+            },
+            "requirement2": {
+                "name": "Requirement 2",
+                "checks": {"check2.1": "PASS", "check2.2": "PASS"},
+                "status": "PASS",
+                "attributes": [],
+                "description": "Description of requirement 2",
+                "checks_status": {
+                    "total": 2,
+                    "failed": 0,
+                    "passed": 2,
+                },
+            },
+            "requirement3": {
+                "name": "Requirement 3 - manual",
+                "checks": {},
+                "status": "PASS",
+                "attributes": [],
+                "description": "Description of requirement 2",
+                "checks_status": {
+                    "total": 0,
+                    "failed": 0,
+                    "passed": 0,
+                },
+            },
+        },
+        requirements_passed=2,
+        requirements_failed=0,
+        requirements_manual=1,
+        total_requirements=3,
+    )
+
+    compliance_overview2 = ComplianceOverview.objects.create(
+        tenant=tenant,
+        scan=scan1,
+        compliance_id="aws_account_security_onboarding_aws",
+        framework="AWS-Account-Security-Onboarding",
+        version="1.0",
+        description="Description for AWS Account Security Onboarding",
+        region="eu-west-2",
+        requirements={
+            "requirement1": {
+                "name": "Requirement 1",
+                "checks": {"check1.1": "PASS", "check1.2": None},
+                "status": "PASS",
+                "attributes": [],
+                "description": "Description of requirement 1",
+                "checks_status": {
+                    "total": 2,
+                    "failed": 0,
+                    "passed": 2,
+                },
+            },
+            "requirement2": {
+                "name": "Requirement 2",
+                "checks": {"check2.1": "PASS", "check2.2": "FAIL"},
+                "status": "FAIL",
+                "attributes": [],
+                "description": "Description of requirement 2",
+                "checks_status": {
+                    "total": 2,
+                    "failed": 1,
+                    "passed": 1,
+                },
+            },
+            "requirement3": {
+                "name": "Requirement 3 - manual",
+                "checks": {},
+                "status": "PASS",
+                "attributes": [],
+                "description": "Description of requirement 2",
+                "checks_status": {
+                    "total": 0,
+                    "failed": 0,
+                    "passed": 0,
+                },
+            },
+        },
+        requirements_passed=1,
+        requirements_failed=1,
+        requirements_manual=1,
+        total_requirements=3,
+    )
+
+    # Return the created compliance overviews
+    return compliance_overview1, compliance_overview2
+
+
+def get_api_tokens(
+    api_client, user_email: str, user_password: str, tenant_id: str = None
+) -> tuple[str, str]:
+    json_body = {
+        "data": {
+            "type": "tokens",
+            "attributes": {
+                "email": user_email,
+                "password": user_password,
+            },
+        }
+    }
+    if tenant_id is not None:
+        json_body["data"]["attributes"]["tenant_id"] = tenant_id
+    response = api_client.post(
+        reverse("token-obtain"),
+        data=json_body,
+        format="vnd.api+json",
+    )
+    return response.json()["data"]["attributes"]["access"], response.json()["data"][
+        "attributes"
+    ]["refresh"]
+
+
+def get_authorization_header(access_token: str) -> dict:
+    return {"Authorization": f"Bearer {access_token}"}
diff --git a/api/src/backend/manage.py b/api/src/backend/manage.py
new file mode 100755
index 0000000000..590fbeb713
--- /dev/null
+++ b/api/src/backend/manage.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+"""Django's command-line utility for administrative tasks."""
+
+import os
+import sys
+
+
+def main():
+    """Run administrative tasks."""
+
+    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.django.production")
+    try:
+        from django.core.management import execute_from_command_line
+    except ImportError as exc:
+        raise ImportError(
+            "Couldn't import Django. Are you sure it's installed and "
+            "available on your PYTHONPATH environment variable? Did you "
+            "forget to activate a virtual environment?"
+        ) from exc
+    execute_from_command_line(sys.argv)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/api/src/backend/pytest.ini b/api/src/backend/pytest.ini
new file mode 100644
index 0000000000..ebce5cf212
--- /dev/null
+++ b/api/src/backend/pytest.ini
@@ -0,0 +1,3 @@
+[pytest]
+DJANGO_SETTINGS_MODULE = config.django.testing
+addopts = -rP
diff --git a/api/src/backend/tasks/__init__.py b/api/src/backend/tasks/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/src/backend/tasks/beat.py b/api/src/backend/tasks/beat.py
new file mode 100644
index 0000000000..b53fb700c9
--- /dev/null
+++ b/api/src/backend/tasks/beat.py
@@ -0,0 +1,31 @@
+import json
+
+from django.utils import timezone
+from django_celery_beat.models import PeriodicTask, IntervalSchedule
+
+from api.models import Provider
+
+
+def schedule_provider_scan(provider_instance: Provider):
+    schedule, _ = IntervalSchedule.objects.get_or_create(
+        every=24,
+        period=IntervalSchedule.HOURS,
+    )
+
+    # Create a unique name for the periodic task
+    task_name = f"scan-perform-scheduled-{provider_instance.id}"
+
+    # Schedule the task
+    PeriodicTask.objects.create(
+        interval=schedule,
+        name=task_name,
+        task="scan-perform-scheduled",
+        kwargs=json.dumps(
+            {
+                "tenant_id": str(provider_instance.tenant_id),
+                "provider_id": str(provider_instance.id),
+            }
+        ),
+        start_time=provider_instance.inserted_at + timezone.timedelta(hours=24),
+        one_off=False,
+    )
diff --git a/api/src/backend/tasks/jobs/connection.py b/api/src/backend/tasks/jobs/connection.py
new file mode 100644
index 0000000000..1583f8a75f
--- /dev/null
+++ b/api/src/backend/tasks/jobs/connection.py
@@ -0,0 +1,41 @@
+from datetime import datetime, timezone
+
+from celery.utils.log import get_task_logger
+
+from api.models import Provider
+from api.utils import prowler_provider_connection_test
+
+logger = get_task_logger(__name__)
+
+
+def check_provider_connection(provider_id: str):
+    """
+    Business logic to check the connection status of a provider.
+
+    Args:
+        provider_id (str): The primary key of the Provider instance to check.
+
+    Returns:
+        dict: A dictionary containing:
+            - 'connected' (bool): Indicates whether the provider is successfully connected.
+            - 'error' (str or None): The error message if the connection failed, otherwise `None`.
+
+    Raises:
+        ValueError: If the provider type is not supported.
+        Model.DoesNotExist: If the provider does not exist.
+    """
+    provider_instance = Provider.objects.get(pk=provider_id)
+    try:
+        connection_result = prowler_provider_connection_test(provider_instance)
+    except Exception as e:
+        logger.warning(
+            f"Unexpected exception checking {provider_instance.provider} provider connection: {str(e)}"
+        )
+        raise e
+
+    provider_instance.connected = connection_result.is_connected
+    provider_instance.connection_last_checked_at = datetime.now(tz=timezone.utc)
+    provider_instance.save()
+
+    connection_error = f"{connection_result.error}" if connection_result.error else None
+    return {"connected": connection_result.is_connected, "error": connection_error}
diff --git a/api/src/backend/tasks/jobs/deletion.py b/api/src/backend/tasks/jobs/deletion.py
new file mode 100644
index 0000000000..b203cf113e
--- /dev/null
+++ b/api/src/backend/tasks/jobs/deletion.py
@@ -0,0 +1,25 @@
+from celery.utils.log import get_task_logger
+
+logger = get_task_logger(__name__)
+
+
+def delete_instance(model, pk: str):
+    """
+    Deletes an instance of the specified model.
+
+    This function retrieves an instance of the provided model using its primary key
+    and deletes it from the database.
+
+    Args:
+        model (Model): The Django model class from which to delete an instance.
+        pk (str): The primary key of the instance to delete.
+
+    Returns:
+        tuple: A tuple containing the number of objects deleted and a dictionary
+               with the count of deleted objects per model,
+               including related models if applicable.
+
+    Raises:
+        model.DoesNotExist: If no instance with the provided primary key exists.
+    """
+    return model.objects.get(pk=pk).delete()
diff --git a/api/src/backend/tasks/jobs/scan.py b/api/src/backend/tasks/jobs/scan.py
new file mode 100644
index 0000000000..2b2fea8f17
--- /dev/null
+++ b/api/src/backend/tasks/jobs/scan.py
@@ -0,0 +1,323 @@
+import time
+from copy import deepcopy
+from datetime import datetime, timezone
+
+from celery.utils.log import get_task_logger
+from prowler.lib.outputs.finding import Finding as ProwlerFinding
+from prowler.lib.scan.scan import Scan as ProwlerScan
+
+from api.compliance import (
+    PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE,
+    generate_scan_compliance,
+)
+from api.db_utils import tenant_transaction
+from api.models import (
+    Provider,
+    Scan,
+    Finding,
+    Resource,
+    ResourceTag,
+    StatusChoices as FindingStatus,
+    StateChoices,
+    ComplianceOverview,
+)
+from api.utils import initialize_prowler_provider
+from api.v1.serializers import ScanTaskSerializer
+
+logger = get_task_logger(__name__)
+
+
+def _create_finding_delta(
+    last_status: FindingStatus | None | str, new_status: FindingStatus | None
+) -> Finding.DeltaChoices:
+    """
+    Determine the delta status of a finding based on its previous and current status.
+
+    Args:
+        last_status (FindingStatus | None | str): The previous status of the finding. Can be None or a string representation.
+        new_status (FindingStatus | None): The current status of the finding.
+
+    Returns:
+        Finding.DeltaChoices: The delta status indicating if the finding is new, changed, or unchanged.
+            - Returns `Finding.DeltaChoices.NEW` if `last_status` is None.
+            - Returns `Finding.DeltaChoices.CHANGED` if `last_status` and `new_status` are different.
+            - Returns `None` if the status hasn't changed.
+    """
+    if last_status is None:
+        return Finding.DeltaChoices.NEW
+    return Finding.DeltaChoices.CHANGED if last_status != new_status else None
+
+
+def _store_resources(
+    finding: ProwlerFinding, tenant_id: str, provider_instance: Provider
+) -> tuple[Resource, tuple[str, str]]:
+    """
+    Store resource information from a finding, including tags, in the database.
+
+    Args:
+        finding (ProwlerFinding): The finding object containing resource information.
+        tenant_id (str): The ID of the tenant owning the resource.
+        provider_instance (Provider): The provider instance associated with the resource.
+
+    Returns:
+        tuple:
+            - Resource: The resource instance created or retrieved from the database.
+            - tuple[str, str]: A tuple containing the resource UID and region.
+
+    """
+    with tenant_transaction(tenant_id):
+        resource_instance, created = Resource.objects.get_or_create(
+            tenant_id=tenant_id,
+            provider=provider_instance,
+            uid=finding.resource_uid,
+            defaults={
+                "region": finding.region,
+                "service": finding.service_name,
+                "type": finding.resource_type,
+            },
+        )
+
+        if not created:
+            resource_instance.region = finding.region
+            resource_instance.service = finding.service_name
+            resource_instance.type = finding.resource_type
+            resource_instance.save()
+    with tenant_transaction(tenant_id):
+        tags = [
+            ResourceTag.objects.get_or_create(
+                tenant_id=tenant_id, key=key, value=value
+            )[0]
+            for key, value in finding.resource_tags.items()
+        ]
+        resource_instance.upsert_or_delete_tags(tags=tags)
+    return resource_instance, (resource_instance.uid, resource_instance.region)
+
+
+def perform_prowler_scan(
+    tenant_id: str, scan_id: str, provider_id: str, checks_to_execute: list[str] = None
+):
+    """
+    Perform a scan using Prowler and store the findings and resources in the database.
+
+    Args:
+        tenant_id (str): The ID of the tenant for which the scan is performed.
+        scan_id (str): The ID of the scan instance.
+        provider_id (str): The ID of the provider to scan.
+        checks_to_execute (list[str], optional): A list of specific checks to execute. Defaults to None.
+
+    Returns:
+        dict: Serialized data of the completed scan instance.
+
+    Raises:
+        ValueError: If the provider cannot be connected.
+
+    """
+    generate_compliance = False
+    check_status_by_region = {}
+    exception = None
+    unique_resources = set()
+    start_time = time.time()
+
+    with tenant_transaction(tenant_id):
+        provider_instance = Provider.objects.get(pk=provider_id)
+        scan_instance = Scan.objects.get(pk=scan_id)
+        scan_instance.state = StateChoices.EXECUTING
+        scan_instance.started_at = datetime.now(tz=timezone.utc)
+        scan_instance.save()
+
+    try:
+        with tenant_transaction(tenant_id):
+            try:
+                prowler_provider = initialize_prowler_provider(provider_instance)
+                provider_instance.connected = True
+            except Exception as e:
+                provider_instance.connected = False
+                raise ValueError(
+                    f"Provider {provider_instance.provider} is not connected: {e}"
+                )
+            finally:
+                provider_instance.connection_last_checked_at = datetime.now(
+                    tz=timezone.utc
+                )
+                provider_instance.save()
+
+        generate_compliance = provider_instance.provider != Provider.ProviderChoices.GCP
+        prowler_scan = ProwlerScan(provider=prowler_provider, checks=checks_to_execute)
+
+        resource_cache = {}
+        tag_cache = {}
+        last_status_cache = {}
+
+        for progress, findings in prowler_scan.scan():
+            with tenant_transaction(tenant_id):
+                for finding in findings:
+                    # Process resource
+                    resource_uid = finding.resource_uid
+                    if resource_uid not in resource_cache:
+                        # Get or create the resource
+                        resource_instance, _ = Resource.objects.get_or_create(
+                            tenant_id=tenant_id,
+                            provider=provider_instance,
+                            uid=resource_uid,
+                            defaults={
+                                "region": finding.region,
+                                "service": finding.service_name,
+                                "type": finding.resource_type,
+                                "name": finding.resource_name,
+                            },
+                        )
+                        resource_cache[resource_uid] = resource_instance
+                    else:
+                        resource_instance = resource_cache[resource_uid]
+
+                    # Update resource fields if necessary
+                    updated_fields = []
+                    if resource_instance.region != finding.region:
+                        resource_instance.region = finding.region
+                        updated_fields.append("region")
+                    if resource_instance.service != finding.service_name:
+                        resource_instance.service = finding.service_name
+                        updated_fields.append("service")
+                    if resource_instance.type != finding.resource_type:
+                        resource_instance.type = finding.resource_type
+                        updated_fields.append("type")
+                    if updated_fields:
+                        resource_instance.save(update_fields=updated_fields)
+
+                    # Update tags
+                    tags = []
+                    for key, value in finding.resource_tags.items():
+                        tag_key = (key, value)
+                        if tag_key not in tag_cache:
+                            tag_instance, _ = ResourceTag.objects.get_or_create(
+                                tenant_id=tenant_id, key=key, value=value
+                            )
+                            tag_cache[tag_key] = tag_instance
+                        else:
+                            tag_instance = tag_cache[tag_key]
+                        tags.append(tag_instance)
+                    resource_instance.upsert_or_delete_tags(tags=tags)
+
+                    unique_resources.add(
+                        (resource_instance.uid, resource_instance.region)
+                    )
+
+                    # Process finding
+                    finding_uid = finding.uid
+                    if finding_uid not in last_status_cache:
+                        most_recent_finding = (
+                            Finding.objects.filter(uid=finding_uid)
+                            .order_by("-id")
+                            .values("status")
+                            .first()
+                        )
+                        last_status = (
+                            most_recent_finding["status"]
+                            if most_recent_finding
+                            else None
+                        )
+                        last_status_cache[finding_uid] = last_status
+                    else:
+                        last_status = last_status_cache[finding_uid]
+
+                    status = FindingStatus[finding.status]
+                    delta = _create_finding_delta(last_status, status)
+
+                    # Create the finding
+                    finding_instance = Finding.objects.create(
+                        tenant_id=tenant_id,
+                        uid=finding_uid,
+                        delta=delta,
+                        check_metadata=finding.get_metadata(),
+                        status=status,
+                        status_extended=finding.status_extended,
+                        severity=finding.severity,
+                        impact=finding.severity,
+                        raw_result=finding.raw,
+                        check_id=finding.check_id,
+                        scan=scan_instance,
+                    )
+                    finding_instance.add_resources([resource_instance])
+
+                    # Update compliance data if applicable
+                    if not generate_compliance or finding.status.value == "MUTED":
+                        continue
+
+                    region_dict = check_status_by_region.setdefault(finding.region, {})
+                    current_status = region_dict.get(finding.check_id)
+                    if current_status == "FAIL":
+                        continue
+                    region_dict[finding.check_id] = finding.status.value
+
+            # Update scan progress
+            with tenant_transaction(tenant_id):
+                scan_instance.progress = progress
+                scan_instance.save()
+
+        scan_instance.state = StateChoices.COMPLETED
+
+    except Exception as e:
+        logger.error(f"Error performing scan {scan_id}: {e}")
+        exception = e
+        scan_instance.state = StateChoices.FAILED
+
+    finally:
+        with tenant_transaction(tenant_id):
+            scan_instance.duration = time.time() - start_time
+            scan_instance.completed_at = datetime.now(tz=timezone.utc)
+            scan_instance.unique_resource_count = len(unique_resources)
+            scan_instance.save()
+
+    if generate_compliance:
+        try:
+            regions = prowler_provider.get_regions()
+        except AttributeError:
+            regions = set()
+
+        compliance_template = PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE[
+            provider_instance.provider
+        ]
+        compliance_overview_by_region = {
+            region: deepcopy(compliance_template) for region in regions
+        }
+
+        for region, check_status in check_status_by_region.items():
+            compliance_data = compliance_overview_by_region.setdefault(
+                region, deepcopy(compliance_template)
+            )
+            for check_name, status in check_status.items():
+                generate_scan_compliance(
+                    compliance_data,
+                    provider_instance.provider,
+                    check_name,
+                    status,
+                )
+
+        # Prepare compliance overview objects
+        compliance_overview_objects = []
+        for region, compliance_data in compliance_overview_by_region.items():
+            for compliance_id, compliance in compliance_data.items():
+                compliance_overview_objects.append(
+                    ComplianceOverview(
+                        tenant_id=tenant_id,
+                        scan=scan_instance,
+                        region=region,
+                        compliance_id=compliance_id,
+                        framework=compliance["framework"],
+                        version=compliance["version"],
+                        description=compliance["description"],
+                        requirements=compliance["requirements"],
+                        requirements_passed=compliance["requirements_status"]["passed"],
+                        requirements_failed=compliance["requirements_status"]["failed"],
+                        requirements_manual=compliance["requirements_status"]["manual"],
+                        total_requirements=compliance["total_requirements"],
+                    )
+                )
+        with tenant_transaction(tenant_id):
+            ComplianceOverview.objects.bulk_create(compliance_overview_objects)
+
+    if exception is not None:
+        raise exception
+
+    serializer = ScanTaskSerializer(instance=scan_instance)
+    return serializer.data
diff --git a/api/src/backend/tasks/tasks.py b/api/src/backend/tasks/tasks.py
new file mode 100644
index 0000000000..4fc933127e
--- /dev/null
+++ b/api/src/backend/tasks/tasks.py
@@ -0,0 +1,112 @@
+from celery import shared_task
+
+from api.db_utils import tenant_transaction
+from api.decorators import set_tenant
+from api.models import Provider, Scan
+from config.celery import RLSTask
+from tasks.jobs.connection import check_provider_connection
+from tasks.jobs.deletion import delete_instance
+from tasks.jobs.scan import perform_prowler_scan
+
+
+@shared_task(base=RLSTask, name="provider-connection-check")
+@set_tenant
+def check_provider_connection_task(provider_id: str):
+    """
+    Task to check the connection status of a provider.
+
+    Args:
+        provider_id (str): The primary key of the Provider instance to check.
+
+    Returns:
+        dict: A dictionary containing:
+            - 'connected' (bool): Indicates whether the provider is successfully connected.
+            - 'error' (str or None): The error message if the connection failed, otherwise `None`.
+    """
+    return check_provider_connection(provider_id=provider_id)
+
+
+@shared_task(base=RLSTask, name="provider-deletion")
+@set_tenant
+def delete_provider_task(provider_id: str):
+    """
+    Task to delete a specific Provider instance.
+
+    Args:
+        provider_id (str): The primary key of the `Provider` instance to be deleted.
+
+    Returns:
+        tuple: A tuple containing:
+            - The number of instances deleted.
+            - A dictionary with the count of deleted instances per model,
+              including related models if cascading deletes were triggered.
+    """
+    return delete_instance(model=Provider, pk=provider_id)
+
+
+@shared_task(base=RLSTask, name="scan-perform", queue="scans")
+def perform_scan_task(
+    tenant_id: str, scan_id: str, provider_id: str, checks_to_execute: list[str] = None
+):
+    """
+    Task to perform a Prowler scan on a given provider.
+
+    This task runs a Prowler scan on the provider identified by `provider_id`
+    under the tenant identified by `tenant_id`. The scan will use the `scan_id`
+    for tracking purposes.
+
+    Args:
+        tenant_id (str): The tenant ID under which the scan is being performed.
+        scan_id (str): The ID of the scan to be performed.
+        provider_id (str): The primary key of the Provider instance to scan.
+        checks_to_execute (list[str], optional): A list of specific checks to perform during the scan. Defaults to None.
+
+    Returns:
+        dict: The result of the scan execution, typically including the status and results of the performed checks.
+    """
+    return perform_prowler_scan(
+        tenant_id=tenant_id,
+        scan_id=scan_id,
+        provider_id=provider_id,
+        checks_to_execute=checks_to_execute,
+    )
+
+
+@shared_task(base=RLSTask, bind=True, name="scan-perform-scheduled", queue="scans")
+def perform_scheduled_scan_task(self, tenant_id: str, provider_id: str):
+    """
+    Task to perform a scheduled Prowler scan on a given provider.
+
+    This task creates and executes a Prowler scan for the provider identified by `provider_id`
+    under the tenant identified by `tenant_id`. It initiates a new scan instance with the task ID
+    for tracking purposes. This task is intended to be run on a schedule (e.g., daily) to
+    automatically perform scans without manual intervention.
+
+    Args:
+        self: The task instance (automatically passed when bind=True).
+        tenant_id (str): The tenant ID under which the scan is being performed.
+        provider_id (str): The primary key of the Provider instance to scan.
+
+    Returns:
+        dict: The result of the scan execution, typically including the status and results
+        of the performed checks.
+
+    """
+    task_id = self.request.id
+
+    with tenant_transaction(tenant_id):
+        provider_instance = Provider.objects.get(pk=provider_id)
+
+        scan_instance = Scan.objects.create(
+            tenant_id=tenant_id,
+            name="Daily scheduled scan",
+            provider=provider_instance,
+            trigger=Scan.TriggerChoices.SCHEDULED,
+            task_id=task_id,
+        )
+
+    return perform_prowler_scan(
+        tenant_id=tenant_id,
+        scan_id=str(scan_instance.id),
+        provider_id=provider_id,
+    )
diff --git a/api/src/backend/tasks/tests/__init__.py b/api/src/backend/tasks/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/src/backend/tasks/tests/test_connection.py b/api/src/backend/tasks/tests/test_connection.py
new file mode 100644
index 0000000000..75ba6dc2eb
--- /dev/null
+++ b/api/src/backend/tasks/tests/test_connection.py
@@ -0,0 +1,72 @@
+from datetime import datetime, timezone
+from unittest.mock import patch, MagicMock
+
+import pytest
+
+from api.models import Provider
+from tasks.jobs.connection import check_provider_connection
+
+
+@pytest.mark.parametrize(
+    "provider_data",
+    [
+        {"provider": "aws", "uid": "123456789012", "alias": "aws"},
+    ],
+)
+@patch("tasks.jobs.connection.prowler_provider_connection_test")
+@pytest.mark.django_db
+def test_check_provider_connection(
+    mock_provider_connection_test, tenants_fixture, provider_data
+):
+    provider = Provider.objects.create(**provider_data, tenant_id=tenants_fixture[0].id)
+
+    mock_test_connection_result = MagicMock()
+    mock_test_connection_result.is_connected = True
+
+    mock_provider_connection_test.return_value = mock_test_connection_result
+
+    check_provider_connection(
+        provider_id=str(provider.id),
+    )
+    provider.refresh_from_db()
+
+    mock_provider_connection_test.assert_called_once()
+    assert provider.connected is True
+    assert provider.connection_last_checked_at is not None
+    assert provider.connection_last_checked_at <= datetime.now(tz=timezone.utc)
+
+
+@patch("tasks.jobs.connection.Provider.objects.get")
+@pytest.mark.django_db
+def test_check_provider_connection_unsupported_provider(mock_provider_get):
+    mock_provider_instance = MagicMock()
+    mock_provider_instance.provider = "UNSUPPORTED_PROVIDER"
+    mock_provider_get.return_value = mock_provider_instance
+
+    with pytest.raises(
+        ValueError, match="Provider type UNSUPPORTED_PROVIDER not supported"
+    ):
+        check_provider_connection("provider_id")
+
+
+@patch("tasks.jobs.connection.Provider.objects.get")
+@patch("tasks.jobs.connection.prowler_provider_connection_test")
+@pytest.mark.django_db
+def test_check_provider_connection_exception(
+    mock_provider_connection_test, mock_provider_get
+):
+    mock_provider_instance = MagicMock()
+    mock_provider_instance.provider = Provider.ProviderChoices.AWS.value
+    mock_provider_get.return_value = mock_provider_instance
+
+    mock_provider_connection_test.return_value = MagicMock()
+    mock_provider_connection_test.return_value.is_connected = False
+    mock_provider_connection_test.return_value.error = Exception()
+
+    result = check_provider_connection(provider_id="provider_id")
+
+    assert result["connected"] is False
+    assert result["error"] is not None
+
+    mock_provider_instance.save.assert_called_once()
+    assert mock_provider_instance.connected is False
diff --git a/api/src/backend/tasks/tests/test_deletion.py b/api/src/backend/tasks/tests/test_deletion.py
new file mode 100644
index 0000000000..630d1d1fa1
--- /dev/null
+++ b/api/src/backend/tasks/tests/test_deletion.py
@@ -0,0 +1,22 @@
+import pytest
+from django.core.exceptions import ObjectDoesNotExist
+
+from api.models import Provider
+from tasks.jobs.deletion import delete_instance
+
+
+@pytest.mark.django_db
+class TestDeleteInstance:
+    def test_delete_instance_success(self, providers_fixture):
+        instance = providers_fixture[0]
+        result = delete_instance(Provider, instance.id)
+
+        assert result
+        with pytest.raises(ObjectDoesNotExist):
+            Provider.objects.get(pk=instance.id)
+
+    def test_delete_instance_does_not_exist(self):
+        non_existent_pk = "babf6796-cfcc-4fd3-9dcf-88d012247645"
+
+        with pytest.raises(ObjectDoesNotExist):
+            delete_instance(Provider, non_existent_pk)
diff --git a/api/src/backend/tasks/tests/test_scan.py b/api/src/backend/tasks/tests/test_scan.py
new file mode 100644
index 0000000000..798ebcb354
--- /dev/null
+++ b/api/src/backend/tasks/tests/test_scan.py
@@ -0,0 +1,360 @@
+from unittest.mock import patch, MagicMock
+
+import pytest
+
+from api.models import (
+    StateChoices,
+    Severity,
+    Finding,
+    Resource,
+    StatusChoices,
+    Provider,
+)
+from tasks.jobs.scan import (
+    perform_prowler_scan,
+    _create_finding_delta,
+    _store_resources,
+)
+
+
+@pytest.mark.django_db
+class TestPerformScan:
+    def test_perform_prowler_scan_success(
+        self,
+        tenants_fixture,
+        scans_fixture,
+        providers_fixture,
+    ):
+        with (
+            patch("api.db_utils.tenant_transaction"),
+            patch(
+                "tasks.jobs.scan.initialize_prowler_provider"
+            ) as mock_initialize_prowler_provider,
+            patch("tasks.jobs.scan.ProwlerScan") as mock_prowler_scan_class,
+            patch(
+                "tasks.jobs.scan.PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE",
+                new_callable=dict,
+            ) as mock_prowler_compliance_overview_template,
+            patch(
+                "api.compliance.PROWLER_CHECKS", new_callable=dict
+            ) as mock_prowler_checks,
+        ):
+            # Set up the mock PROWLER_CHECKS
+            mock_prowler_checks["aws"] = {
+                "check1": {"compliance1"},
+                "check2": {"compliance1", "compliance2"},
+            }
+
+            # Set up the mock PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE
+            mock_prowler_compliance_overview_template["aws"] = {
+                "compliance1": {
+                    "framework": "Framework 1",
+                    "version": "1.0",
+                    "provider": "aws",
+                    "description": "Description of compliance1",
+                    "requirements": {
+                        "requirement1": {
+                            "name": "Requirement 1",
+                            "description": "Description of requirement 1",
+                            "attributes": [],
+                            "checks": {"check1": None, "check2": None},
+                            "checks_status": {
+                                "pass": 0,
+                                "fail": 0,
+                                "total": 2,
+                            },
+                            "status": "PASS",
+                        }
+                    },
+                    "requirements_status": {
+                        "passed": 1,
+                        "failed": 0,
+                        "manual": 0,
+                    },
+                    "total_requirements": 1,
+                }
+            }
+
+            # Ensure the database is empty
+            assert Finding.objects.count() == 0
+            assert Resource.objects.count() == 0
+
+            tenant = tenants_fixture[0]
+            scan = scans_fixture[0]
+            provider = providers_fixture[0]
+
+            # Ensure the provider type is 'aws' to match our mocks
+            provider.provider = Provider.ProviderChoices.AWS
+            provider.save()
+
+            tenant_id = str(tenant.id)
+            scan_id = str(scan.id)
+            provider_id = str(provider.id)
+            checks_to_execute = ["check1", "check2"]
+
+            # Mock the findings returned by the prowler scan
+            finding = MagicMock()
+            finding.uid = "this_is_a_test_finding_id"
+            finding.status = StatusChoices.PASS
+            finding.status_extended = "test status extended"
+            finding.severity = Severity.medium
+            finding.check_id = "check1"
+            finding.get_metadata.return_value = {"key": "value"}
+            finding.resource_uid = "resource_uid"
+            finding.resource_name = "resource_name"
+            finding.region = "region"
+            finding.service_name = "service_name"
+            finding.resource_type = "resource_type"
+            finding.resource_tags = {"tag1": "value1", "tag2": "value2"}
+            finding.raw = {}
+
+            # Mock the ProwlerScan instance
+            mock_prowler_scan_instance = MagicMock()
+            mock_prowler_scan_instance.scan.return_value = [(100, [finding])]
+            mock_prowler_scan_class.return_value = mock_prowler_scan_instance
+
+            # Mock prowler_provider.get_regions()
+            mock_prowler_provider_instance = MagicMock()
+            mock_prowler_provider_instance.get_regions.return_value = ["region"]
+            mock_initialize_prowler_provider.return_value = (
+                mock_prowler_provider_instance
+            )
+
+            # Call the function under test
+            perform_prowler_scan(tenant_id, scan_id, provider_id, checks_to_execute)
+
+        # Refresh instances from the database
+        scan.refresh_from_db()
+        scan_finding = Finding.objects.get(scan=scan)
+        scan_resource = Resource.objects.get(provider=provider)
+
+        # Assertions
+        assert scan.tenant == tenant
+        assert scan.provider == provider
+        assert scan.state == StateChoices.COMPLETED
+        assert scan.completed_at is not None
+        assert scan.duration is not None
+        assert scan.started_at is not None
+        assert scan.unique_resource_count == 1
+        assert scan.progress == 100
+
+        assert scan_finding.uid == finding.uid
+        assert scan_finding.status == finding.status
+        assert scan_finding.status_extended == finding.status_extended
+        assert scan_finding.severity == finding.severity
+        assert scan_finding.check_id == finding.check_id
+        assert scan_finding.raw_result == finding.raw
+
+        assert scan_resource.tenant == tenant
+        assert scan_resource.uid == finding.resource_uid
+        assert scan_resource.region == finding.region
+        assert scan_resource.service == finding.service_name
+        assert scan_resource.type == finding.resource_type
+        assert scan_resource.name == finding.resource_name
+
+        # Assert that the resource tags have been created and associated
+        tags = scan_resource.tags.all()
+        assert tags.count() == 2
+        tag_keys = {tag.key for tag in tags}
+        tag_values = {tag.value for tag in tags}
+        assert tag_keys == set(finding.resource_tags.keys())
+        assert tag_values == set(finding.resource_tags.values())
+
+    @patch("tasks.jobs.scan.ProwlerScan")
+    @patch(
+        "tasks.jobs.scan.initialize_prowler_provider",
+        side_effect=Exception("Connection error"),
+    )
+    @patch("api.db_utils.tenant_transaction")
+    def test_perform_prowler_scan_no_connection(
+        self,
+        mock_tenant_transaction,
+        mock_initialize_prowler_provider,
+        mock_prowler_scan_class,
+        tenants_fixture,
+        scans_fixture,
+        providers_fixture,
+    ):
+        tenant = tenants_fixture[0]
+        scan = scans_fixture[0]
+        provider = providers_fixture[0]
+
+        tenant_id = str(tenant.id)
+        scan_id = str(scan.id)
+        provider_id = str(provider.id)
+        checks_to_execute = ["check1", "check2"]
+
+        with pytest.raises(ValueError):
+            perform_prowler_scan(tenant_id, scan_id, provider_id, checks_to_execute)
+
+        scan.refresh_from_db()
+        assert scan.state == StateChoices.FAILED
+
+    @pytest.mark.parametrize(
+        "last_status, new_status, expected_delta",
+        [
+            (None, None, Finding.DeltaChoices.NEW),
+            (None, StatusChoices.PASS, Finding.DeltaChoices.NEW),
+            (StatusChoices.PASS, StatusChoices.PASS, None),
+            (StatusChoices.PASS, StatusChoices.FAIL, Finding.DeltaChoices.CHANGED),
+            (StatusChoices.FAIL, StatusChoices.PASS, Finding.DeltaChoices.CHANGED),
+        ],
+    )
+    def test_create_finding_delta(self, last_status, new_status, expected_delta):
+        assert _create_finding_delta(last_status, new_status) == expected_delta
+
+    @patch("api.models.ResourceTag.objects.get_or_create")
+    @patch("api.models.Resource.objects.get_or_create")
+    @patch("api.db_utils.tenant_transaction")
+    def test_store_resources_new_resource(
+        self,
+        mock_tenant_transaction,
+        mock_get_or_create_resource,
+        mock_get_or_create_tag,
+    ):
+        tenant_id = "tenant123"
+        provider_instance = MagicMock()
+        provider_instance.id = "provider456"
+
+        finding = MagicMock()
+        finding.resource_uid = "resource_uid_123"
+        finding.resource_name = "resource_name"
+        finding.region = "us-west-1"
+        finding.service_name = "service_name"
+        finding.resource_type = "resource_type"
+        finding.resource_tags = {"tag1": "value1", "tag2": "value2"}
+
+        resource_instance = MagicMock()
+        resource_instance.uid = finding.resource_uid
+        resource_instance.region = finding.region
+
+        mock_get_or_create_resource.return_value = (resource_instance, True)
+        tag_instance = MagicMock()
+        mock_get_or_create_tag.return_value = (tag_instance, True)
+
+        resource, resource_uid_tuple = _store_resources(
+            finding, tenant_id, provider_instance
+        )
+
+        mock_get_or_create_resource.assert_called_once_with(
+            tenant_id=tenant_id,
+            provider=provider_instance,
+            uid=finding.resource_uid,
+            defaults={
+                "region": finding.region,
+                "service": finding.service_name,
+                "type": finding.resource_type,
+            },
+        )
+
+        assert resource == resource_instance
+        assert resource_uid_tuple == (resource_instance.uid, resource_instance.region)
+        resource_instance.upsert_or_delete_tags.assert_called_once()
+
+    @patch("api.models.ResourceTag.objects.get_or_create")
+    @patch("api.models.Resource.objects.get_or_create")
+    @patch("api.db_utils.tenant_transaction")
+    def test_store_resources_existing_resource(
+        self,
+        mock_tenant_transaction,
+        mock_get_or_create_resource,
+        mock_get_or_create_tag,
+    ):
+        tenant_id = "tenant123"
+        provider_instance = MagicMock()
+        provider_instance.id = "provider456"
+
+        finding = MagicMock()
+        finding.resource_uid = "resource_uid_123"
+        finding.resource_name = "resource_name"
+        finding.region = "us-west-2"
+        finding.service_name = "new_service"
+        finding.resource_type = "new_type"
+        finding.resource_tags = {"tag1": "value1", "tag2": "value2"}
+
+        resource_instance = MagicMock()
+        resource_instance.uid = finding.resource_uid
+        resource_instance.region = "us-west-1"
+        resource_instance.service = "old_service"
+        resource_instance.type = "old_type"
+
+        mock_get_or_create_resource.return_value = (resource_instance, False)
+
+        tag_instance = MagicMock()
+        mock_get_or_create_tag.return_value = (tag_instance, True)
+
+        resource, resource_uid_tuple = _store_resources(
+            finding, tenant_id, provider_instance
+        )
+
+        mock_get_or_create_resource.assert_called_once_with(
+            tenant_id=tenant_id,
+            provider=provider_instance,
+            uid=finding.resource_uid,
+            defaults={
+                "region": finding.region,
+                "service": finding.service_name,
+                "type": finding.resource_type,
+            },
+        )
+
+        # Check that resource fields were updated
+        assert resource_instance.region == finding.region
+        assert resource_instance.service == finding.service_name
+        assert resource_instance.type == finding.resource_type
+        resource_instance.save.assert_called_once()
+
+        assert resource == resource_instance
+        assert resource_uid_tuple == (resource_instance.uid, resource_instance.region)
+        resource_instance.upsert_or_delete_tags.assert_called_once()
+
+    @patch("api.models.ResourceTag.objects.get_or_create")
+    @patch("api.models.Resource.objects.get_or_create")
+    @patch("api.db_utils.tenant_transaction")
+    def test_store_resources_with_tags(
+        self,
+        mock_tenant_transaction,
+        mock_get_or_create_resource,
+        mock_get_or_create_tag,
+    ):
+        tenant_id = "tenant123"
+        provider_instance = MagicMock()
+        provider_instance.id = "provider456"
+
+        finding = MagicMock()
+        finding.resource_uid = "resource_uid_123"
+        finding.resource_name = "resource_name"
+        finding.region = "us-west-1"
+        finding.service_name = "service_name"
+        finding.resource_type = "resource_type"
+        finding.resource_tags = {"tag1": "value1", "tag2": "value2"}
+
+        resource_instance = MagicMock()
+        resource_instance.uid = finding.resource_uid
+        resource_instance.region = finding.region
+
+        mock_get_or_create_resource.return_value = (resource_instance, True)
+        tag_instance_1 = MagicMock()
+        tag_instance_2 = MagicMock()
+        mock_get_or_create_tag.side_effect = [
+            (tag_instance_1, True),
+            (tag_instance_2, True),
+        ]
+
+        resource, resource_uid_tuple = _store_resources(
+            finding, tenant_id, provider_instance
+        )
+
+        mock_get_or_create_tag.assert_any_call(
+            tenant_id=tenant_id, key="tag1", value="value1"
+        )
+        mock_get_or_create_tag.assert_any_call(
+            tenant_id=tenant_id, key="tag2", value="value2"
+        )
+        resource_instance.upsert_or_delete_tags.assert_called_once()
+        tags_passed = resource_instance.upsert_or_delete_tags.call_args[1]["tags"]
+        assert tag_instance_1 in tags_passed
+        assert tag_instance_2 in tags_passed
+
+        assert resource == resource_instance
+        assert resource_uid_tuple == (resource_instance.uid, resource_instance.region)
diff --git a/api/tests/README.md b/api/tests/README.md
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/tests/__init__.py b/api/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/tests/test_simple.py b/api/tests/test_simple.py
new file mode 100644
index 0000000000..4919d993ac
--- /dev/null
+++ b/api/tests/test_simple.py
@@ -0,0 +1,3 @@
+# for use with CI pipeline. Can be removed once other tests are added.
+def test_always_passes():
+    assert True