Compare commits

..

8 Commits

Author SHA1 Message Date
pedrooot aab06b25a8 docs(powerbi): update .pbit and docs 2026-05-14 16:18:17 +02:00
pedrooot 59d9056ce5 docs(powerbi): add new documentation 2026-05-14 13:06:13 +02:00
Hugo Pereira Brito 6befa78978 fix(cloudflare): plan-aware WAF FAIL hints for zones (#9896) 2026-05-14 12:27:47 +02:00
lydiavilchez 78af0c24fe fix(googleworkspace): use per-service resources for Gmail (#11169) 2026-05-14 12:01:07 +02:00
Andoni Alonso 1bb547e5e1 docs(cloudflare): add pre-configured token creation links (#11156) 2026-05-14 11:58:00 +02:00
June 1f39b01fb2 feat(sagemaker): add sagemaker_domain_sso_configured check (#11094)
Co-authored-by: Daniel Barranquero <danielbo2001@gmail.com>
2026-05-14 11:42:30 +02:00
AOrps fb0ef391f2 ci(api): replace poetry with uv (api) (#10775)
Signed-off-by: AOrps <aorbeandrews@gmail.com>
Co-authored-by: Adrián Jesús Peña Rodríguez <adrianjpr@gmail.com>
2026-05-14 11:17:17 +02:00
Pablo Fernandez Guerra (PFE) f2e6a3264d chore(ui): scope prek pre-commit to staged files, drop legacy husky (#11118)
Co-authored-by: Pablo F.G <pablo.fernandez@prowler.com>
Co-authored-by: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-14 11:12:25 +02:00
157 changed files with 8086 additions and 12446 deletions
@@ -0,0 +1,90 @@
name: 'Setup Python with uv'
description: 'Setup Python environment with uv and install dependencies'
author: 'Prowler'
inputs:
python-version:
description: 'Python version to use'
required: true
working-directory:
description: 'Working directory for uv'
required: false
default: '.'
uv-version:
description: 'uv version to install'
required: false
default: '0.11.14'
install-dependencies:
description: 'Install Python dependencies with uv'
required: false
default: 'true'
runs:
using: 'composite'
steps:
- name: Replace @master with current branch in pyproject.toml (prowler repo only)
if: github.event_name == 'pull_request' && github.base_ref == 'master' && github.repository == 'prowler-cloud/prowler'
shell: bash
working-directory: ${{ inputs.working-directory }}
env:
HEAD_REPO: ${{ github.event.pull_request.head.repo.full_name }}
run: |
BRANCH_NAME="${GITHUB_HEAD_REF:-${GITHUB_REF_NAME}}"
UPSTREAM="prowler-cloud/prowler"
if [ "$HEAD_REPO" != "$UPSTREAM" ]; then
echo "Fork PR detected (${HEAD_REPO}), rewriting VCS URL to fork"
sed -i "s|git+https://github.com/prowler-cloud/prowler\([^@]*\)@master|git+https://github.com/${HEAD_REPO}\1@$BRANCH_NAME|g" pyproject.toml
else
echo "Same-repo PR, using branch: $BRANCH_NAME"
sed -i "s|\(git+https://github.com/prowler-cloud/prowler[^@]*\)@master|\1@$BRANCH_NAME|g" pyproject.toml
fi
- name: Update uv.lock with latest Prowler commit
if: github.repository_owner == 'prowler-cloud' && github.repository != 'prowler-cloud/prowler'
shell: bash
working-directory: ${{ inputs.working-directory }}
run: |
LATEST_COMMIT=$(curl -s "https://api.github.com/repos/prowler-cloud/prowler/commits/master" | jq -r '.sha')
echo "Latest commit hash: $LATEST_COMMIT"
sed -i "s|\(git = \"https://github\.com/prowler-cloud/prowler\.git?rev=master\)#[a-f0-9]\{40\}\"|\1#${LATEST_COMMIT}\"|g" uv.lock
echo "Updated uv.lock entry:"
grep "prowler-cloud/prowler" uv.lock
- name: Update uv.lock SDK commit (prowler repo on push)
if: github.event_name == 'push' && github.ref == 'refs/heads/master' && github.repository == 'prowler-cloud/prowler'
shell: bash
working-directory: ${{ inputs.working-directory }}
run: |
LATEST_COMMIT=$(curl -s "https://api.github.com/repos/prowler-cloud/prowler/commits/master" | jq -r '.sha')
echo "Latest commit hash: $LATEST_COMMIT"
sed -i "s|\(git = \"https://github\.com/prowler-cloud/prowler\.git?rev=master\)#[a-f0-9]\{40\}\"|\1#${LATEST_COMMIT}\"|g" uv.lock
echo "Updated uv.lock entry:"
grep "prowler-cloud/prowler" uv.lock
- name: Install uv
shell: bash
env:
UV_VERSION: ${{ inputs.uv-version }}
run: pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir "uv==${UV_VERSION}"
- name: Set up Python ${{ inputs.python-version }}
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: ${{ inputs.python-version }}
cache: 'pip'
- name: Install Python dependencies
if: inputs.install-dependencies == 'true'
shell: bash
working-directory: ${{ inputs.working-directory }}
run: |
uv sync --no-install-project
uv run pip list
- name: Update Prowler Cloud API Client
if: github.repository_owner == 'prowler-cloud' && github.repository != 'prowler-cloud/prowler'
shell: bash
working-directory: ${{ inputs.working-directory }}
run: |
uv remove prowler-cloud-api-client
uv add ./prowler-cloud-api-client
+8 -8
View File
@@ -43,6 +43,7 @@ jobs:
pypi.org:443
files.pythonhosted.org:443
api.github.com:443
raw.githubusercontent.com:443
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
@@ -63,26 +64,25 @@ jobs:
api/CHANGELOG.md
api/AGENTS.md
- name: Setup Python with Poetry
- name: Setup Python with uv
if: steps.check-changes.outputs.any_changed == 'true'
uses: ./.github/actions/setup-python-poetry
uses: ./.github/actions/setup-python-uv
with:
python-version: ${{ matrix.python-version }}
working-directory: ./api
update-lock: 'true'
- name: Poetry check
- name: uv lock check
if: steps.check-changes.outputs.any_changed == 'true'
run: poetry check --lock
run: uv lock --check
- name: Ruff lint
if: steps.check-changes.outputs.any_changed == 'true'
run: poetry run ruff check . --exclude contrib
run: uv run ruff check . --exclude contrib
- name: Ruff format
if: steps.check-changes.outputs.any_changed == 'true'
run: poetry run ruff format --check . --exclude contrib
run: uv run ruff format --check . --exclude contrib
- name: Pylint
if: steps.check-changes.outputs.any_changed == 'true'
run: poetry run pylint --disable=W,C,R,E -j 0 -rn -sn src/
run: uv run pylint --disable=W,C,R,E -j 0 -rn -sn src/
+10 -8
View File
@@ -9,7 +9,7 @@ on:
- 'api/**'
- '.github/workflows/api-tests.yml'
- '.github/workflows/api-security.yml'
- '.github/actions/setup-python-poetry/**'
- '.github/actions/setup-python-uv/**'
pull_request:
branches:
- "master"
@@ -18,7 +18,7 @@ on:
- 'api/**'
- '.github/workflows/api-tests.yml'
- '.github/workflows/api-security.yml'
- '.github/actions/setup-python-poetry/**'
- '.github/actions/setup-python-uv/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -54,6 +54,7 @@ jobs:
github.com:443
auth.safetycli.com:443
pyup.io:443
raw.githubusercontent.com:443
data.safetycli.com:443
api.github.com:443
@@ -77,23 +78,24 @@ jobs:
api/CHANGELOG.md
api/AGENTS.md
- name: Setup Python with Poetry
- name: Setup Python with uv
if: steps.check-changes.outputs.any_changed == 'true'
uses: ./.github/actions/setup-python-poetry
uses: ./.github/actions/setup-python-uv
with:
python-version: ${{ matrix.python-version }}
working-directory: ./api
update-lock: 'true'
- name: Bandit
if: steps.check-changes.outputs.any_changed == 'true'
run: poetry run bandit -q -lll -x '*_test.py,./contrib/' -r .
# Exclude .venv because uv places the project venv inside ./api; otherwise
# bandit would recurse into installed third-party packages.
run: uv run bandit -q -lll -x '*_test.py,./contrib/,./.venv/' -r .
- name: Safety
if: steps.check-changes.outputs.any_changed == 'true'
# Accepted CVEs, severity threshold, and ignore expirations live in ../.safety-policy.yml
run: poetry run safety check --policy-file ../.safety-policy.yml
run: uv run safety check --policy-file ../.safety-policy.yml
- name: Vulture
if: steps.check-changes.outputs.any_changed == 'true'
run: poetry run vulture --exclude "contrib,tests,conftest.py" --min-confidence 100 .
run: uv run vulture --exclude "contrib,tests,conftest.py,.venv" --min-confidence 100 .
+4 -4
View File
@@ -87,6 +87,7 @@ jobs:
files.pythonhosted.org:443
cli.codecov.io:443
keybase.io:443
raw.githubusercontent.com:443
ingest.codecov.io:443
storage.googleapis.com:443
o26192.ingest.us.sentry.io:443
@@ -112,17 +113,16 @@ jobs:
api/CHANGELOG.md
api/AGENTS.md
- name: Setup Python with Poetry
- name: Setup Python with uv
if: steps.check-changes.outputs.any_changed == 'true'
uses: ./.github/actions/setup-python-poetry
uses: ./.github/actions/setup-python-uv
with:
python-version: ${{ matrix.python-version }}
working-directory: ./api
update-lock: 'true'
- name: Run tests with pytest
if: steps.check-changes.outputs.any_changed == 'true'
run: poetry run pytest --cov=./src/backend --cov-report=xml src/backend
run: uv run pytest --cov=./src/backend --cov-report=xml src/backend
- name: Upload coverage reports to Codecov
if: steps.check-changes.outputs.any_changed == 'true'
+6 -5
View File
@@ -339,10 +339,11 @@ jobs:
exit 1
fi
# Update poetry lock file
echo "Updating poetry.lock file..."
# Update uv lock file
echo "Updating uv.lock file..."
pip install --no-cache-dir uv==0.11.14
cd api
poetry lock
uv lock
cd ..
echo "✓ Prepared prowler dependency update to: $UPDATED_PROWLER_REF"
@@ -357,7 +358,7 @@ jobs:
base: ${{ env.BRANCH_NAME }}
add-paths: |
api/pyproject.toml
api/poetry.lock
api/uv.lock
title: "chore(api): Update prowler dependency to ${{ env.BRANCH_NAME }} for release ${{ env.PROWLER_VERSION }}"
body: |
### Description
@@ -366,7 +367,7 @@ jobs:
**Changes:**
- Updates `api/pyproject.toml` prowler dependency from `@master` to `@${{ env.BRANCH_NAME }}`
- Updates `api/poetry.lock` file with resolved dependencies
- Updates `api/uv.lock` file with resolved dependencies
This PR should be merged into the `${{ env.BRANCH_NAME }}` release branch.
+4 -2
View File
@@ -83,11 +83,13 @@ jobs:
- name: Lint with flake8
if: steps.check-changes.outputs.any_changed == 'true'
run: poetry run flake8 . --ignore=E266,W503,E203,E501,W605,E128 --exclude contrib,ui,api,skills
run: poetry run flake8 . --ignore=E266,W503,E203,E501,W605,E128 --exclude contrib,ui,api,skills,mcp_server
- name: Check format with black
if: steps.check-changes.outputs.any_changed == 'true'
run: poetry run black --exclude "api|ui|skills" --check .
# mcp_server has its own pyproject and uses ruff format, exclude it so SDK black
# does not fight ruff over rules it never formatted.
run: poetry run black --exclude "api|ui|skills|mcp_server" --check .
- name: Lint with pylint
if: steps.check-changes.outputs.any_changed == 'true'
+7 -1
View File
@@ -130,6 +130,12 @@ jobs:
echo "AWS_ACCESS_KEY_ID=${{ secrets.E2E_AWS_PROVIDER_ACCESS_KEY }}" >> .env
echo "AWS_SECRET_ACCESS_KEY=${{ secrets.E2E_AWS_PROVIDER_SECRET_KEY }}" >> .env
- name: Build API image from current code
# docker-compose.yml references prowlercloud/prowler-api:latest from the registry,
# which lags behind PR changes (e.g. the poetry -> uv migration); build locally so
# E2E exercises the API image produced by this PR.
run: docker build -t prowlercloud/prowler-api:latest ./api
- name: Start API services
run: |
export PROWLER_API_VERSION=latest
@@ -158,7 +164,7 @@ jobs:
for fixture in api/fixtures/dev/*.json; do
if [ -f "$fixture" ]; then
echo "Loading $fixture"
poetry run python manage.py loaddata "$fixture" --database admin
uv run python manage.py loaddata "$fixture" --database admin
fi
done
'
+13 -16
View File
@@ -107,24 +107,21 @@ repos:
files: { glob: ["{api,mcp_server}/**/*.py"] }
priority: 20
## PYTHON — Poetry
## PYTHON — uv (API)
- repo: https://github.com/astral-sh/uv-pre-commit
rev: 0.11.14
hooks:
- id: uv-lock
name: API - uv-lock
args: ["--check", "--project=./api"]
files: { glob: ["api/{pyproject.toml,uv.lock}"] }
pass_filenames: false
priority: 50
## PYTHON — Poetry (SDK)
- repo: https://github.com/python-poetry/poetry
rev: 2.3.4
hooks:
- id: poetry-check
name: API - poetry-check
args: ["--directory=./api"]
files: { glob: ["api/{pyproject.toml,poetry.lock}"] }
pass_filenames: false
priority: 50
- id: poetry-lock
name: API - poetry-lock
args: ["--directory=./api"]
files: { glob: ["api/{pyproject.toml,poetry.lock}"] }
pass_filenames: false
priority: 50
- id: poetry-check
name: SDK - poetry-check
args: ["--directory=./"]
@@ -186,7 +183,7 @@ repos:
entry: safety check --policy-file .safety-policy.yml
language: system
pass_filenames: false
files: { glob: ["**/pyproject.toml", "**/poetry.lock", "**/requirements*.txt", ".safety-policy.yml"] }
files: { glob: ["**/pyproject.toml", "**/poetry.lock", "**/uv.lock", "**/requirements*.txt", ".safety-policy.yml"] }
priority: 40
- id: vulture
-1
View File
@@ -121,7 +121,6 @@ Every AWS provider scan will enqueue an Attack Paths ingestion job automatically
| OpenStack | 34 | 5 | 0 | 9 | Official | UI, API, CLI |
| Vercel | 26 | 6 | 0 | 5 | Official | UI, API, CLI |
| Okta | 1 | 1 | 0 | 1 | Official | CLI |
| Scaleway [Contact us](https://prowler.com/contact) | 1 | 1 | 0 | 1 | Unofficial | CLI |
| NHN | 6 | 2 | 1 | 0 | Unofficial | CLI |
> [!Note]
+8 -8
View File
@@ -124,24 +124,24 @@ api/src/backend/
```bash
# Development
poetry run python src/backend/manage.py runserver
poetry run celery -A config.celery worker -l INFO
uv run python src/backend/manage.py runserver
uv run celery -A config.celery worker -l INFO
# Database
poetry run python src/backend/manage.py makemigrations
poetry run python src/backend/manage.py migrate
uv run python src/backend/manage.py makemigrations
uv run python src/backend/manage.py migrate
# Testing & Linting
poetry run pytest -x --tb=short
poetry run make lint
uv run pytest -x --tb=short
uv run make lint
```
---
## QA CHECKLIST
- [ ] `poetry run pytest` passes
- [ ] `poetry run make lint` passes
- [ ] `uv run pytest` passes
- [ ] `uv run make lint` passes
- [ ] Migrations created if models changed
- [ ] New endpoints have `@extend_schema` decorators
- [ ] RLS properly applied for tenant data
+1
View File
@@ -10,6 +10,7 @@ All notable changes to the **Prowler API** are documented in this file.
### 🔄 Changed
- Replace `poetry` with `uv` (`0.11.14`) as the API package manager; migrate `pyproject.toml` to `[dependency-groups]` and regenerate as `uv.lock` [(#10775)](https://github.com/prowler-cloud/prowler/pull/10775)
- Remove orphaned `gin_resources_search_idx` declaration from `Resource.Meta.indexes` (DB index dropped in `0072_drop_unused_indexes`) [(#11001)](https://github.com/prowler-cloud/prowler/pull/11001)
---
+7 -6
View File
@@ -14,6 +14,7 @@ ENV ZIZMOR_VERSION=${ZIZMOR_VERSION}
# hadolint ignore=DL3008
RUN apt-get update && apt-get install -y --no-install-recommends \
wget \
git \
libicu72 \
gcc \
g++ \
@@ -88,18 +89,18 @@ WORKDIR /home/prowler
# Ensure output directory exists
RUN mkdir -p /tmp/prowler_api_output
COPY pyproject.toml ./
COPY pyproject.toml uv.lock ./
RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir poetry==2.3.4
pip install --no-cache-dir uv==0.11.14
ENV PATH="/home/prowler/.local/bin:$PATH"
# Add `--no-root` to avoid installing the current project as a package
RUN poetry install --no-root && \
rm -rf ~/.cache/pip
# Add `--no-install-project` to avoid installing the current project as a package
RUN uv sync --no-install-project && \
rm -rf ~/.cache/uv
RUN poetry run python "$(poetry env info --path)/src/prowler/prowler/providers/m365/lib/powershell/m365_powershell.py"
RUN .venv/bin/python .venv/lib/python3.12/site-packages/prowler/providers/m365/lib/powershell/m365_powershell.py
COPY src/backend/ ./backend/
COPY docker-entrypoint.sh ./docker-entrypoint.sh
+12 -19
View File
@@ -25,12 +25,11 @@ If you dont set `DJANGO_TOKEN_SIGNING_KEY` or `DJANGO_TOKEN_VERIFYING_KEY`, t
**Important note**: Every Prowler version (or repository branches and tags) could have different variables set in its `.env` file. Please use the `.env` file that corresponds with each version.
## Local deployment
Keep in mind if you export the `.env` file to use it with local deployment that you will have to do it within the context of the Poetry interpreter, not before. Otherwise, variables will not be loaded properly.
Keep in mind if you export the `.env` file to use it with local deployment that you will have to do it within the context of the virtual environment, not before. Otherwise, variables will not be loaded properly.
To do this, you can run:
```console
poetry shell
set -a
source .env
```
@@ -78,7 +77,7 @@ docker logs -f $(docker ps --format "{{.Names}}" | grep 'api-')
## Local deployment
To use this method, you'll need to set up a Python virtual environment (version ">=3.11,<3.13") and keep dependencies updated. Additionally, ensure that `poetry` and `docker compose` are installed.
To use this method, you'll need to set up a Python virtual environment (version ">=3.11,<3.13") and keep dependencies updated. Additionally, ensure that `uv` and `docker compose` are installed.
### Clone the repository
@@ -90,11 +89,10 @@ git clone https://github.com/prowler-cloud/api.git
git clone git@github.com:prowler-cloud/api.git
```
### Install all dependencies with Poetry
### Install all dependencies with uv
```console
poetry install
poetry shell
uv sync
```
## Start the PostgreSQL Database and Valkey
@@ -139,7 +137,7 @@ gunicorn -c config/guniconf.py config.wsgi:application
## Local deployment
To use this method, you'll need to set up a Python virtual environment (version ">=3.11,<3.13") and keep dependencies updated. Additionally, ensure that `poetry` and `docker compose` are installed.
To use this method, you'll need to set up a Python virtual environment (version ">=3.11,<3.13") and keep dependencies updated. Additionally, ensure that `uv` and `docker compose` are installed.
### Clone the repository
@@ -165,11 +163,10 @@ docker compose up postgres valkey -d
### Install the Python dependencies
> You must have Poetry installed
> You must have uv installed
```console
poetry install
poetry shell
uv sync
```
### Apply migrations
@@ -246,9 +243,8 @@ docker logs -f $(docker ps --format "{{.Names}}" | grep 'api-')
For migrations, you need to force the `admin` database router. Assuming you have the correct environment variables and Python virtual environment, run:
```console
poetry shell
cd src/backend
python manage.py migrate --database admin
uv run python manage.py migrate --database admin
```
## Apply fixtures
@@ -256,9 +252,8 @@ python manage.py migrate --database admin
Fixtures are used to populate the database with initial development data.
```console
poetry shell
cd src/backend
python manage.py loaddata api/fixtures/0_dev_users.json --database admin
uv run python manage.py loaddata api/fixtures/0_dev_users.json --database admin
```
> The default credentials are `dev@prowler.com:Thisisapassword123@` or `dev2@prowler.com:Thisisapassword123@`
@@ -270,9 +265,8 @@ Note that the tests will fail if you use the same `.env` file as the development
For best results, run in a new shell with no environment variables set.
```console
poetry shell
cd src/backend
pytest
uv run pytest
```
# Custom commands
@@ -284,8 +278,7 @@ Django provides a way to create custom commands that can be run from the command
To run a custom command, you need to be in the `prowler/api/src/backend` directory and run:
```console
poetry shell
python manage.py <command_name>
uv run python manage.py <command_name>
```
## Generate dummy data
@@ -308,7 +301,7 @@ This command creates, for a given tenant, a provider, scan and a set of findings
### Example
```console
~/backend $ poetry run python manage.py findings --tenant
~/backend $ uv run python manage.py findings --tenant
fffb1893-3fc7-4623-a5d9-fae47da1c528 --findings 25000 --re
sources 1000 --batch 5000 --alias test-script
+8 -8
View File
@@ -5,9 +5,9 @@ apply_migrations() {
echo "Applying database migrations..."
# Fix Inconsistent migration history after adding sites app
poetry run python manage.py check_and_fix_socialaccount_sites_migration --database admin
uv run python manage.py check_and_fix_socialaccount_sites_migration --database admin
poetry run python manage.py migrate --database admin
uv run python manage.py migrate --database admin
}
apply_fixtures() {
@@ -15,19 +15,19 @@ apply_fixtures() {
for fixture in api/fixtures/dev/*.json; do
if [ -f "$fixture" ]; then
echo "Loading $fixture"
poetry run python manage.py loaddata "$fixture" --database admin
uv run python manage.py loaddata "$fixture" --database admin
fi
done
}
start_dev_server() {
echo "Starting the development server..."
poetry run python manage.py runserver 0.0.0.0:"${DJANGO_PORT:-8080}"
uv run python manage.py runserver 0.0.0.0:"${DJANGO_PORT:-8080}"
}
start_prod_server() {
echo "Starting the Gunicorn server..."
poetry run gunicorn -c config/guniconf.py config.wsgi:application
uv run gunicorn -c config/guniconf.py config.wsgi:application
}
resolve_worker_hostname() {
@@ -47,7 +47,7 @@ resolve_worker_hostname() {
start_worker() {
echo "Starting the worker..."
poetry run python -m celery -A config.celery worker \
uv run python -m celery -A config.celery worker \
-n "$(resolve_worker_hostname)" \
-l "${DJANGO_LOGGING_LEVEL:-info}" \
-Q celery,scans,scan-reports,deletion,backfill,overview,integrations,compliance,attack-paths-scans \
@@ -56,7 +56,7 @@ start_worker() {
start_worker_beat() {
echo "Starting the worker-beat..."
poetry run python -m celery -A config.celery beat -l "${DJANGO_LOGGING_LEVEL:-info}" --scheduler django_celery_beat.schedulers:DatabaseScheduler
uv run python -m celery -A config.celery beat -l "${DJANGO_LOGGING_LEVEL:-info}" --scheduler django_celery_beat.schedulers:DatabaseScheduler
}
manage_db_partitions() {
@@ -64,7 +64,7 @@ manage_db_partitions() {
echo "Managing DB partitions..."
# For now we skip the deletion of partitions until we define the data retention policy
# --yes auto approves the operation without the need of an interactive terminal
poetry run python manage.py pgpartition --using admin --skip-delete --yes
uv run python manage.py pgpartition --using admin --skip-delete --yes
fi
}
-9427
View File
File diff suppressed because it is too large Load Diff
+395 -26
View File
@@ -1,6 +1,25 @@
[build-system]
build-backend = "poetry.core.masonry.api"
requires = ["poetry-core"]
[dependency-groups]
dev = [
"bandit==1.7.9",
"coverage==7.5.4",
"django-silk==5.3.2",
"docker==7.1.0",
"filelock==3.20.3",
"freezegun==1.5.1",
"mypy==1.10.1",
"pylint==3.2.5",
"pytest==9.0.3",
"pytest-cov==5.0.0",
"pytest-django==4.8.0",
"pytest-env==1.1.3",
"pytest-randomly==3.15.0",
"pytest-xdist==3.6.1",
"ruff==0.5.0",
"safety==3.7.0",
"tqdm==4.67.1",
"vulture==2.14",
"prek==0.3.9"
]
[project]
authors = [{name = "Prowler Engineering", email = "engineering@prowler.com"}]
@@ -52,26 +71,376 @@ package-mode = false
requires-python = ">=3.11,<3.13"
version = "1.28.0"
[project.scripts]
celery = "src.backend.config.settings.celery"
[tool.poetry.group.dev.dependencies]
bandit = "1.7.9"
coverage = "7.5.4"
django-silk = "5.3.2"
docker = "7.1.0"
filelock = "3.20.3"
freezegun = "1.5.1"
mypy = "1.10.1"
prek = "0.3.9"
pylint = "3.2.5"
pytest = "9.0.3"
pytest-cov = "5.0.0"
pytest-django = "4.8.0"
pytest-env = "1.1.3"
pytest-randomly = "3.15.0"
pytest-xdist = "3.6.1"
ruff = "0.5.0"
safety = "3.7.0"
tqdm = "4.67.1"
vulture = "2.14"
[tool.uv]
# Transitive pins matching master to avoid silent drift; bump deliberately.
constraint-dependencies = [
"about-time==4.2.1",
"adal==1.2.7",
"aioboto3==15.5.0",
"aiobotocore==2.25.1",
"aiofiles==24.1.0",
"aiohappyeyeballs==2.6.1",
"aiohttp==3.13.5",
"aioitertools==0.13.0",
"aiosignal==1.4.0",
"alibabacloud-actiontrail20200706==2.4.1",
"alibabacloud-credentials==1.0.3",
"alibabacloud-credentials-api==1.0.0",
"alibabacloud-cs20151215==6.1.0",
"alibabacloud-darabonba-array==0.1.0",
"alibabacloud-darabonba-encode-util==0.0.2",
"alibabacloud-darabonba-map==0.0.1",
"alibabacloud-darabonba-signature-util==0.0.4",
"alibabacloud-darabonba-string==0.0.4",
"alibabacloud-darabonba-time==0.0.1",
"alibabacloud-ecs20140526==7.2.5",
"alibabacloud-endpoint-util==0.0.4",
"alibabacloud-gateway-oss==0.0.17",
"alibabacloud-gateway-oss-util==0.0.3",
"alibabacloud-gateway-sls==0.4.0",
"alibabacloud-gateway-sls-util==0.4.0",
"alibabacloud-gateway-spi==0.0.3",
"alibabacloud-openapi-util==0.2.4",
"alibabacloud-oss-util==0.0.6",
"alibabacloud-oss20190517==1.0.6",
"alibabacloud-ram20150501==1.2.0",
"alibabacloud-rds20140815==12.0.0",
"alibabacloud-sas20181203==6.1.0",
"alibabacloud-sls20201230==5.9.0",
"alibabacloud-sts20150401==1.1.6",
"alibabacloud-tea==0.4.3",
"alibabacloud-tea-openapi==0.4.4",
"alibabacloud-tea-util==0.3.14",
"alibabacloud-tea-xml==0.0.3",
"alibabacloud-vpc20160428==6.13.0",
"alive-progress==3.3.0",
"aliyun-log-fastpb==0.2.0",
"amqp==5.3.1",
"annotated-types==0.7.0",
"anyio==4.12.1",
"applicationinsights==0.11.10",
"apscheduler==3.11.2",
"argcomplete==3.5.3",
"asgiref==3.11.0",
"astroid==3.2.4",
"async-timeout==5.0.1",
"attrs==25.4.0",
"authlib==1.6.9",
"autopep8==2.3.2",
"awsipranges==0.3.3",
"azure-cli-core==2.83.0",
"azure-cli-telemetry==1.1.0",
"azure-common==1.1.28",
"azure-core==1.38.1",
"azure-identity==1.21.0",
"azure-keyvault-certificates==4.10.0",
"azure-keyvault-keys==4.10.0",
"azure-keyvault-secrets==4.10.0",
"azure-mgmt-apimanagement==5.0.0",
"azure-mgmt-applicationinsights==4.1.0",
"azure-mgmt-authorization==4.0.0",
"azure-mgmt-compute==34.0.0",
"azure-mgmt-containerinstance==10.1.0",
"azure-mgmt-containerregistry==12.0.0",
"azure-mgmt-containerservice==34.1.0",
"azure-mgmt-core==1.6.0",
"azure-mgmt-cosmosdb==9.7.0",
"azure-mgmt-databricks==2.0.0",
"azure-mgmt-datafactory==9.2.0",
"azure-mgmt-eventgrid==10.4.0",
"azure-mgmt-eventhub==11.2.0",
"azure-mgmt-keyvault==10.3.1",
"azure-mgmt-loganalytics==12.0.0",
"azure-mgmt-logic==10.0.0",
"azure-mgmt-monitor==6.0.2",
"azure-mgmt-network==28.1.0",
"azure-mgmt-postgresqlflexibleservers==1.1.0",
"azure-mgmt-rdbms==10.1.0",
"azure-mgmt-recoveryservices==3.1.0",
"azure-mgmt-recoveryservicesbackup==9.2.0",
"azure-mgmt-resource==24.0.0",
"azure-mgmt-search==9.1.0",
"azure-mgmt-security==7.0.0",
"azure-mgmt-sql==3.0.1",
"azure-mgmt-storage==22.1.1",
"azure-mgmt-subscription==3.1.1",
"azure-mgmt-synapse==2.0.0",
"azure-mgmt-web==8.0.0",
"azure-monitor-query==2.0.0",
"azure-storage-blob==12.24.1",
"azure-synapse-artifacts==0.21.0",
"backoff==2.2.1",
"bandit==1.7.9",
"billiard==4.2.4",
"blinker==1.9.0",
"boto3==1.40.61",
"botocore==1.40.61",
"cartography==0.135.0",
"celery==5.6.2",
"certifi==2026.1.4",
"cffi==2.0.0",
"charset-normalizer==3.4.4",
"circuitbreaker==2.1.3",
"click==8.3.1",
"click-didyoumean==0.3.1",
"click-plugins==1.1.1.2",
"click-repl==0.3.0",
"cloudflare==4.3.1",
"colorama==0.4.6",
"contextlib2==21.6.0",
"contourpy==1.3.3",
"coverage==7.5.4",
"cron-descriptor==1.4.5",
"crowdstrike-falconpy==1.6.0",
"cryptography==46.0.7",
"cycler==0.12.1",
"darabonba-core==1.0.5",
"dash==3.1.1",
"dash-bootstrap-components==2.0.3",
"debugpy==1.8.20",
"decorator==5.2.1",
"defusedxml==0.7.1",
"detect-secrets==1.5.0",
"dill==0.4.1",
"distro==1.9.0",
"dj-rest-auth==7.0.1",
"django==5.1.15",
"django-allauth==65.15.0",
"django-celery-beat==2.9.0",
"django-celery-results==2.6.0",
"django-cors-headers==4.4.0",
"django-environ==0.11.2",
"django-filter==24.3",
"django-guid==3.5.0",
"django-postgres-extra==2.0.9",
"django-silk==5.3.2",
"django-timezone-field==7.2.1",
"djangorestframework==3.15.2",
"djangorestframework-jsonapi==7.0.2",
"djangorestframework-simplejwt==5.5.1",
"dnspython==2.8.0",
"docker==7.1.0",
"dogpile-cache==1.5.0",
"dparse==0.6.4",
"drf-extensions==0.8.0",
"drf-nested-routers==0.95.0",
"drf-simple-apikey==2.2.1",
"drf-spectacular==0.27.2",
"drf-spectacular-jsonapi==0.5.1",
"dulwich==0.23.0",
"duo-client==5.5.0",
"durationpy==0.10",
"email-validator==2.2.0",
"execnet==2.1.2",
"filelock==3.20.3",
"flask==3.1.3",
"fonttools==4.62.1",
"freezegun==1.5.1",
"frozenlist==1.8.0",
"gevent==25.9.1",
"google-api-core==2.29.0",
"google-api-python-client==2.163.0",
"google-auth==2.48.0",
"google-auth-httplib2==0.2.0",
"google-cloud-access-context-manager==0.3.0",
"google-cloud-asset==4.2.0",
"google-cloud-org-policy==1.16.0",
"google-cloud-os-config==1.23.0",
"google-cloud-resource-manager==1.16.0",
"googleapis-common-protos==1.72.0",
"gprof2dot==2025.4.14",
"graphemeu==0.7.2",
"greenlet==3.3.1",
"grpc-google-iam-v1==0.14.3",
"grpcio==1.76.0",
"grpcio-status==1.76.0",
"gunicorn==23.0.0",
"h11==0.16.0",
"h2==4.3.0",
"hpack==4.1.0",
"httpcore==1.0.9",
"httplib2==0.31.2",
"httpx==0.28.1",
"humanfriendly==10.0",
"hyperframe==6.1.0",
"iamdata==0.1.202602021",
"idna==3.11",
"importlib-metadata==8.7.1",
"inflection==0.5.1",
"iniconfig==2.3.0",
"iso8601==2.1.0",
"isodate==0.7.2",
"isort==5.13.2",
"itsdangerous==2.2.0",
"jinja2==3.1.6",
"jiter==0.13.0",
"jmespath==1.1.0",
"joblib==1.5.3",
"jsonpatch==1.33",
"jsonpickle==4.1.1",
"jsonpointer==3.0.0",
"jsonschema==4.23.0",
"jsonschema-specifications==2025.9.1",
"keystoneauth1==5.13.0",
"kiwisolver==1.4.9",
"knack==0.11.0",
"kombu==5.6.2",
"kubernetes==32.0.1",
"lxml==5.3.2",
"lz4==4.4.5",
"markdown==3.10.2",
"markdown-it-py==4.0.0",
"markupsafe==3.0.3",
"marshmallow==4.3.0",
"matplotlib==3.10.8",
"mccabe==0.7.0",
"mdurl==0.1.2",
"microsoft-kiota-abstractions==1.9.2",
"microsoft-kiota-authentication-azure==1.9.2",
"microsoft-kiota-http==1.9.2",
"microsoft-kiota-serialization-form==1.9.2",
"microsoft-kiota-serialization-json==1.9.2",
"microsoft-kiota-serialization-multipart==1.9.2",
"microsoft-kiota-serialization-text==1.9.2",
"microsoft-security-utilities-secret-masker==1.0.0b4",
"msal==1.35.0b1",
"msal-extensions==1.2.0",
"msgraph-core==1.3.8",
"msgraph-sdk==1.55.0",
"msrest==0.7.1",
"msrestazure==0.6.4.post1",
"multidict==6.7.1",
"mypy==1.10.1",
"mypy-extensions==1.1.0",
"narwhals==2.16.0",
"neo4j==6.1.0",
"nest-asyncio==1.6.0",
"nltk==3.9.4",
"numpy==2.0.2",
"oauthlib==3.3.1",
"oci==2.169.0",
"openai==1.109.1",
"openstacksdk==4.2.0",
"opentelemetry-api==1.39.1",
"opentelemetry-sdk==1.39.1",
"opentelemetry-semantic-conventions==0.60b1",
"os-service-types==1.8.2",
"packageurl-python==0.17.6",
"packaging==26.0",
"pagerduty==6.1.0",
"pandas==2.2.3",
"pbr==7.0.3",
"pillow==12.2.0",
"pkginfo==1.12.1.2",
"platformdirs==4.5.1",
"plotly==6.5.2",
"pluggy==1.6.0",
"policyuniverse==1.5.1.20231109",
"portalocker==2.10.1",
"prek==0.3.9",
"prompt-toolkit==3.0.52",
"propcache==0.4.1",
"proto-plus==1.27.0",
"protobuf==6.33.5",
"psutil==7.2.2",
"psycopg2-binary==2.9.9",
"py-deviceid==0.1.1",
"py-iam-expand==0.1.0",
"py-ocsf-models==0.8.1",
"pyasn1==0.6.3",
"pyasn1-modules==0.4.2",
"pycodestyle==2.14.0",
"pycparser==3.0",
"pydantic==2.12.5",
"pydantic-core==2.41.5",
"pygithub==2.8.0",
"pygments==2.20.0",
"pyjwt==2.12.1",
"pylint==3.2.5",
"pymsalruntime==0.18.1",
"pynacl==1.6.2",
"pyopenssl==26.0.0",
"pyparsing==3.3.2",
"pyreadline3==3.5.4",
"pysocks==1.7.1",
"pytest==9.0.3",
"pytest-celery==1.3.0",
"pytest-cov==5.0.0",
"pytest-django==4.8.0",
"pytest-docker-tools==3.1.9",
"pytest-env==1.1.3",
"pytest-randomly==3.15.0",
"pytest-xdist==3.6.1",
"python-crontab==3.3.0",
"python-dateutil==2.9.0.post0",
"python-digitalocean==1.17.0",
"python3-saml==1.16.0",
"pytz==2025.1",
"pywin32==311",
"pyyaml==6.0.3",
"redis==7.1.0",
"referencing==0.37.0",
"regex==2026.1.15",
"reportlab==4.4.10",
"requests==2.33.1",
"requests-file==3.0.1",
"requests-oauthlib==2.0.0",
"requestsexceptions==1.4.0",
"retrying==1.4.2",
"rich==14.3.2",
"rpds-py==0.30.0",
"rsa==4.9.1",
"ruamel-yaml==0.19.1",
"ruff==0.5.0",
"s3transfer==0.14.0",
"safety==3.7.0",
"safety-schemas==0.0.16",
"scaleway==2.10.3",
"scaleway-core==2.10.3",
"schema==0.7.5",
"sentry-sdk==2.56.0",
"setuptools==80.10.2",
"shellingham==1.5.4",
"shodan==1.31.0",
"six==1.17.0",
"slack-sdk==3.39.0",
"sniffio==1.3.1",
"sqlparse==0.5.5",
"statsd==4.0.1",
"std-uritemplate==2.0.8",
"stevedore==5.6.0",
"tabulate==0.9.0",
"tenacity==9.1.2",
"tldextract==5.3.1",
"tomlkit==0.14.0",
"tqdm==4.67.1",
"typer==0.21.1",
"types-aiobotocore-ecr==3.1.1",
"typing-extensions==4.15.0",
"typing-inspection==0.4.2",
"tzdata==2025.3",
"tzlocal==5.3.1",
"uritemplate==4.2.0",
"urllib3==2.6.3",
"uuid6==2024.7.10",
"vine==5.1.0",
"vulture==2.14",
"wcwidth==0.5.3",
"websocket-client==1.9.0",
"werkzeug==3.1.7",
"workos==6.0.4",
"wrapt==1.17.3",
"xlsxwriter==3.2.9",
"xmlsec==1.3.14",
"xmltodict==1.0.2",
"yarl==1.22.0",
"zipp==3.23.0",
"zope-event==6.1",
"zope-interface==8.2",
"zstd==1.5.7.3"
]
# prowler@master needs okta==3.4.2; cartography 0.135.0 declares okta<1.0.0 for an
# integration prowler does not import.
override-dependencies = [
"okta==3.4.2"
]
Generated
+6128
View File
File diff suppressed because it is too large Load Diff
+1 -1
View File
@@ -73,7 +73,7 @@ secrets:
DJANGO_SECRETS_ENCRYPTION_KEY:
DJANGO_BROKER_VISIBILITY_TIMEOUT: 86400
releaseConfigRoot: /home/prowler/.cache/pypoetry/virtualenvs/prowler-api-NnJNioq7-py3.12/lib/python3.12/site-packages/
releaseConfigRoot: /home/prowler/.venv/lib/python3.12/site-packages/
releaseConfigPath: prowler/config/config.yaml
mainConfig:
+2 -7
View File
@@ -326,12 +326,6 @@
"user-guide/providers/openstack/authentication"
]
},
{
"group": "Scaleway",
"pages": [
"user-guide/providers/scaleway/getting-started-scaleway"
]
},
{
"group": "Vercel",
"pages": [
@@ -359,7 +353,8 @@
"group": "Cookbooks",
"pages": [
"user-guide/cookbooks/kubernetes-in-cluster",
"user-guide/cookbooks/cicd-pipeline"
"user-guide/cookbooks/cicd-pipeline",
"user-guide/cookbooks/powerbi-cis-benchmarks"
]
}
]
Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 101 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 405 KiB

-1
View File
@@ -35,7 +35,6 @@ Prowler supports a wide range of providers organized by category:
| **NHN** | Unofficial | Tenants | CLI |
| [OpenStack](/user-guide/providers/openstack/getting-started-openstack) | Official | Projects | UI, API, CLI |
| [Oracle Cloud](/user-guide/providers/oci/getting-started-oci) | Official | Tenancies / Compartments | UI, API, CLI |
| [Scaleway](/user-guide/providers/scaleway/getting-started-scaleway) [Contact us](https://prowler.com/contact) | Unofficial | Organizations | CLI |
### Infrastructure as Code Providers
@@ -0,0 +1,168 @@
---
title: "Visualize Multi-Cloud CIS Benchmarks With Power BI"
description: "Ingest Prowler compliance CSV exports into a ready-made Microsoft Power BI template that surfaces CIS Benchmark posture across AWS, Azure, Google Cloud, and Kubernetes."
---
The Multi-Cloud CIS Benchmarks Power BI template turns Prowler compliance CSV exports into an interactive dashboard. The template ingests scan results from Prowler CLI or Prowler Cloud and renders cross-provider CIS Benchmark coverage, profile-level breakdowns, regional drill-downs, and time-series trends. Center for Internet Security (CIS) Benchmarks are industry-standard configuration baselines maintained by CIS.
The template and its source files live in the Prowler repository under [`contrib/PowerBI/Multicloud CIS Benchmarks`](https://github.com/prowler-cloud/prowler/tree/master/contrib/PowerBI/Multicloud%20CIS%20Benchmarks).
<img src="/images/powerbi/report-cover.png" alt="Multi-Cloud CIS Benchmarks Power BI report cover showing aggregated compliance posture across providers" width="900" />
## Prerequisites
The setup requires the following components:
* **Microsoft Power BI Desktop:** free download from Microsoft.
* **Prowler compliance CSV exports:** produced by Prowler CLI or downloaded from Prowler Cloud or Prowler App.
* **Local directory:** holds the CSV exports that the template ingests at load time.
## Supported CIS Benchmarks
The template ships with predefined mappings for the following CIS Benchmark versions. Exports must match these versions for the dashboard to populate correctly:
| Compliance Framework | Version |
| ---------------------------------------------- | -------- |
| CIS Amazon Web Services Foundations Benchmark | v6.0 |
| CIS Microsoft Azure Foundations Benchmark | v5.0 |
| CIS Google Cloud Platform Foundation Benchmark | v4.0 |
| CIS Kubernetes Benchmark | v1.12.0 |
<Warning>
Other CIS Benchmark versions are not recognized by the template. Confirm the framework version before running the scan or downloading the export.
</Warning>
## Setup
### Step 1: Install Microsoft Power BI Desktop
Download and install Microsoft Power BI Desktop from the official Microsoft site. The template is opened with this application.
### Step 2: Generate Compliance CSV Exports
Compliance CSV exports can be generated through Prowler CLI or downloaded from Prowler Cloud and Prowler App.
#### Option A: Prowler CLI
Run a scan with the `--compliance` flag pointing to the appropriate CIS framework, for example:
```sh
prowler aws --compliance cis_6.0_aws
prowler azure --compliance cis_5.0_azure
prowler gcp --compliance cis_4.0_gcp
prowler kubernetes --compliance cis_1.12_kubernetes
```
The compliance CSV exports are written to `output/compliance/` by default.
#### Option B: Prowler Cloud or Prowler App
Open the Compliance section, select the desired CIS Benchmark, and download the CSV export.
<img src="/images/powerbi/download-compliance-scan.png" alt="Compliance section in Prowler Cloud showing the CSV download option for a CIS Benchmark scan" width="900" />
### Step 3: Create a Local Directory for the Exports
Place every CSV export in a single local directory. The template parses filenames to detect the provider, so filenames must keep the provider keyword (`aws`, `azure`, `gcp`, or `kubernetes`).
<Note>
Time-series visualizations such as "Compliance Percent Over Time" require multiple scans from different dates in the same directory.
</Note>
### Step 4: Open the Power BI Template
Download the template file [`Prowler Multicloud CIS Benchmarks.pbit`](https://github.com/prowler-cloud/prowler/raw/master/contrib/PowerBI/Multicloud%20CIS%20Benchmarks/Prowler%20Multicloud%20CIS%20Benchmarks.pbit) and open it. Power BI Desktop prompts for the full filepath to the directory created in step 3.
### Step 5: Provide the Directory Filepath
Enter the absolute filepath without quotation marks. The Windows "copy as path" feature wraps the path in quotation marks automatically; remove them before submitting.
### Step 6: Save the Report as a `.pbix` File
Once the filepath is submitted, the template ingests the CSV exports and renders the report. Save the populated report as a `.pbix` file for future use. Re-running the `.pbit` template generates a fresh report against an updated directory.
## Validation
To confirm the CSV exports were ingested correctly, open the "Configuration" tab inside the report.
<img src="/images/powerbi/validation.png" alt="Configuration tab in the Power BI report displaying loaded CIS Benchmarks, the Prowler CSV folder path, and the list of ingested exports" width="900" />
The "Configuration" tab exposes three tables:
* **Loaded CIS Benchmarks:** lists the benchmarks and versions supported by the template. This table is defined by the template itself and is not editable. All benchmarks remain listed regardless of which provider exports were supplied.
* **Prowler CSV Folder:** displays the absolute path provided during template load.
* **Loaded Prowler Exports:** lists every CSV file detected in the directory. A green checkmark identifies the file used as the latest assessment for each provider and benchmark combination.
## Report Sections
The report is organized into three navigable pages:
| Report Page | Purpose |
| ----------- | ------------------------------------------------------------------------------------ |
| Overview | Aggregates CIS Benchmark posture across AWS, Azure, Google Cloud, and Kubernetes. |
| Benchmark | Focuses on a single CIS Benchmark with profile-level and regional filters. |
| Requirement | Drill-through page that surfaces details for a single benchmark requirement. |
### Overview Page
The Overview page summarizes CIS Benchmark posture across every supported provider.
<img src="/images/powerbi/overview-page.png" alt="Overview page in the Power BI report aggregating CIS Benchmark posture across AWS, Azure, Google Cloud, and Kubernetes" width="900" />
The Overview page contains the following components:
| Component | Description |
| ---------------------------------------- | ---------------------------------------------------------------------------- |
| CIS Benchmark Overview | Table listing benchmark name, version, and overall compliance percentage. |
| Provider by Requirement Status | Bar chart breaking down requirements by status and provider. |
| Compliance Percent Heatmap | Heatmap of compliance percentage by benchmark and profile level. |
| Profile Level by Requirement Status | Bar chart breaking down requirements by status and profile level. |
| Compliance Percent Over Time by Provider | Line chart tracking overall compliance percentage over time by provider. |
### Benchmark Page
The Benchmark page focuses on a single CIS Benchmark. The benchmark, profile level, and region can be selected through dropdown filters.
<img src="/images/powerbi/benchmark-page.png" alt="Benchmark page in the Power BI report showing region heatmap, section breakdown, time-series trend, and the requirements table" width="900" />
The Benchmark page contains the following components:
| Component | Description |
| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- |
| Compliance Percent Heatmap | Heatmap of compliance percentage by region and profile level. |
| Benchmark Section by Requirement Status | Bar chart of requirements grouped by benchmark section and status. |
| Compliance Percent Over Time by Region | Line chart tracking compliance percentage over time by region. |
| Benchmark Requirements | Table listing requirement section, requirement number, requirement title, number of resources tested, status, and failing checks. |
### Requirement Page
The Requirement page is a drill-through view that exposes the full context of a single requirement. To populate the page, right-click a row in the "Benchmark Requirements" table on the Benchmark page and select "Drill through" > "Requirement".
<img src="/images/powerbi/requirement-page.png" alt="Requirement drill-through page in the Power BI report showing rationale, remediation, regional breakdown, and the resource-level check results" width="900" />
The Requirement page contains the following components:
| Component | Description |
| ------------------------------------------ | -------------------------------------------------------------------------------------------- |
| Title | Requirement title. |
| Rationale | Rationale for the requirement. |
| Remediation | Remediation guidance for the requirement. |
| Region by Check Status | Bar chart of Prowler check results grouped by region and status. |
| Resource Checks for Benchmark Requirements | Table listing resource ID, resource name, status, description, and the underlying Prowler check. |
## Walkthrough Video
A full walkthrough is available on YouTube:
[![Multi-Cloud CIS Benchmarks Power BI walkthrough video thumbnail](/images/powerbi/walkthrough-video-thumb.png)](https://www.youtube.com/watch?v=lfKFkTqBxjU)
## Related Resources
<CardGroup cols={2}>
<Card title="Compliance Frameworks" icon="shield-check" href="/user-guide/compliance/tutorials/compliance">
Review the Compliance workflow across Prowler Cloud, Prowler App, and Prowler CLI.
</Card>
<Card title="Prowler Dashboard" icon="chart-line" href="/user-guide/cli/tutorials/dashboard">
Explore the built-in local dashboard for Prowler CSV exports.
</Card>
</CardGroup>
@@ -44,6 +44,15 @@ User API Tokens are the recommended authentication method because they:
Create a **User API Token**, not an Account API Token. User API Tokens are created from the profile settings and offer finer permission control.
</Note>
**Quick Setup:** Use these pre-configured links to open the Cloudflare Dashboard with the required permissions already selected:
- [Create User API Token](https://dash.cloudflare.com/profile/api-tokens?permissionGroupKeys=%5B%7B%22key%22%3A%22account_settings%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22zone%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22zone_settings%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22dns%22%2C%22type%22%3A%22read%22%7D%5D&accountId=%2A&zoneId=all&name=Prowler%20Security%20Scanner) — creates a **User API Token** (recommended). Opens the **Create Custom Token** form prefilled with the four required read-only scopes (`Account Settings`, `Zone`, `Zone Settings`, `DNS`) and the name `Prowler Security Scanner`. Adjust **Account Resources** and **Zone Resources** to match the accounts and zones you want to scan, then click **Create Token**.
- [Create Account-Owned API Token](https://dash.cloudflare.com/?to=/:account/api-tokens&permissionGroupKeys=%5B%7B%22key%22%3A%22account_settings%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22zone%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22zone_settings%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22dns%22%2C%22type%22%3A%22read%22%7D%5D&name=Prowler%20Security%20Scanner) — creates an [account-owned token](https://developers.cloudflare.com/fundamentals/api/how-to/account-owned-token-template/) instead. Use this for automation or CI/CD where the token should not depend on a specific user account remaining active. Requires the **Super Administrator** or **Administrator** role on the account.
<Note>
Template URLs only pre-fill the token creation form. Review the permissions, configure resources, and click **Create Token** to complete the process.
</Note>
### Step 1: Create a User API Token
1. Log into the [Cloudflare Dashboard](https://dash.cloudflare.com).
@@ -14,6 +14,15 @@ Set up authentication for Cloudflare with the [Cloudflare Authentication](/user-
- Grant the required read-only permissions (`Account Settings:Read`, `Zone:Read`, `Zone Settings:Read`, `DNS:Read`)
- Identify the Cloudflare Account ID to use as the provider identifier
<Note>
**Quick Setup:** Use these pre-configured links to create a token with the required permissions already selected:
- [Create User API Token](https://dash.cloudflare.com/profile/api-tokens?permissionGroupKeys=%5B%7B%22key%22%3A%22account_settings%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22zone%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22zone_settings%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22dns%22%2C%22type%22%3A%22read%22%7D%5D&accountId=%2A&zoneId=all&name=Prowler%20Security%20Scanner) — creates a User API Token (recommended).
- [Create Account-Owned API Token](https://dash.cloudflare.com/?to=/:account/api-tokens&permissionGroupKeys=%5B%7B%22key%22%3A%22account_settings%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22zone%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22zone_settings%22%2C%22type%22%3A%22read%22%7D%2C%7B%22key%22%3A%22dns%22%2C%22type%22%3A%22read%22%7D%5D&name=Prowler%20Security%20Scanner) — creates an [account-owned token](https://developers.cloudflare.com/fundamentals/api/how-to/account-owned-token-template/), better suited for automation and CI/CD.
Both links open the Cloudflare Dashboard with the four required read-only scopes (`Account Settings`, `Zone`, `Zone Settings`, `DNS`) and the name `Prowler Security Scanner` prefilled. See [Cloudflare Authentication](/user-guide/providers/cloudflare/authentication#api-token-recommended) for the equivalent manual steps.
</Note>
<CardGroup cols={2}>
<Card title="Prowler Cloud" icon="cloud" href="#prowler-cloud">
Onboard Cloudflare using Prowler Cloud
@@ -1,51 +0,0 @@
---
title: "Getting Started With Scaleway on Prowler"
---
Prowler for Scaleway scans IAM resources in your Scaleway organization for security misconfigurations. The current release ships one check that flags API keys still owned by the account root user.
## Prerequisites
1. A Scaleway organization with IAM access.
2. A Scaleway API key with at least the `IAMReadOnly` policy bound to a dedicated IAM user (do not use the account root user).
3. Your organization ID (visible at the top right of the Scaleway console).
## Authentication
Prowler reads credentials from the standard Scaleway environment variables:
| Variable | Purpose |
|---|---|
| `SCW_ACCESS_KEY` | API key access key |
| `SCW_SECRET_KEY` | API key secret key |
| `SCW_DEFAULT_ORGANIZATION_ID` | Optional, required when the key bearer is an application |
| `SCW_DEFAULT_PROJECT_ID` | Optional, default project for project-scoped resources |
| `SCW_DEFAULT_REGION` | Optional, defaults to `fr-par` |
Alternatively, pass them as CLI flags (`--access-key`, `--secret-key`, `--organization-id`, `--project-id`, `--region`). The CLI emits a warning when secrets are passed via the command line; environment variables are preferred.
## Run a scan
```bash
export SCW_ACCESS_KEY="SCW..."
export SCW_SECRET_KEY="..."
export SCW_DEFAULT_ORGANIZATION_ID="..."
prowler scaleway
```
To run only the IAM root-key check:
```bash
prowler scaleway --check iam_no_root_api_keys
```
## Checks shipped
| Check ID | Severity | Description |
|---|---|---|
| `iam_no_root_api_keys` | Critical | Fails when any Scaleway IAM API key is still owned by the account root user. |
## Required Scaleway permissions
The API key bearer needs read access to the IAM API in order to list users and API keys. The `IAMReadOnly` policy is sufficient. Refer to the [Scaleway IAM policy reference](https://www.scaleway.com/en/docs/identity-and-access-management/iam/reference-content/permission-sets/) for the full list of permissions.
-115
View File
@@ -1,115 +0,0 @@
# Prowler Multicloud CIS Benchmarks PowerBI Template
![Prowler Report](https://github.com/user-attachments/assets/560f7f83-1616-4836-811a-16963223c72f)
## Getting Started
1. Install Microsoft PowerBI Desktop
This report requires the Microsoft PowerBI Desktop software which can be downloaded for free from Microsoft.
2. Run compliance scans in Prowler
The report uses compliance csv outputs from Prowler. Compliance scans be run using either [Prowler CLI](https://docs.prowler.com/projects/prowler-open-source/en/latest/#prowler-cli) or [Prowler Cloud/App](https://cloud.prowler.com/sign-in)
1. Prowler CLI -&gt; Run a Prowler scan using the --compliance option
2. Prowler Cloud/App -&gt; Navigate to the compliance section to download csv outputs
![Download Compliance Scan](https://github.com/user-attachments/assets/42c11a60-8ce8-4c60-a663-2371199c052b)
The template supports the following CIS Benchmarks only:
| Compliance Framework | Version |
| ---------------------------------------------- | ------- |
| CIS Amazon Web Services Foundations Benchmark | v4.0.1 |
| CIS Google Cloud Platform Foundation Benchmark | v3.0.0 |
| CIS Microsoft Azure Foundations Benchmark | v3.0.0 |
| CIS Kubernetes Benchmark | v1.10.0 |
Ensure you run or download the correct benchmark versions.
3. Create a local directory to store Prowler csvoutputs
Once downloaded, place your csv outputs in a directory on your local machine. If you rename the files, they must maintain the provider in the filename.
To use time-series capabilities such as "compliance percent over time" you'll need scans from multiple dates.
4. Download and run the PowerBI template file (.pbit)
Running the .pbit file will open PowerBI Desktop and prompt you for the full filepath to the local directory
5. Enter the full filepath to the directory created in step 3
Provide the full filepath from the root directory.
Ensure that the filepath is not wrapped in quotation marks (""). If you use Window's "copy as path" feature, it will automatically include quotation marks.
6. Save the report as a PowerBI file (.pbix)
Once the filepath is entered, the template will automatically ingest and populate the report. You can then save this file as a new PowerBI report. If you'd like to generate another report, simply re-run the template file (.pbit) from step 4.
## Validation
After setting up your dashboard, you may want to validate the Prowler csv files were ingested correctly. To do this, navigate to the "Configuration" tab.
The "loaded CIS Benchmarks" table shows the supported benchmarks and versions. This is defined by the template file and not editable by the user. All benchmarks will be loaded regardless of which providers you provided csv outputs for.
The "Prowler CSV Folder" shows the path to the local directory you provided.
The "Loaded Prowler Exports" table shows the ingested csv files from the local directory. It will mark files that are treated as the latest assessment with a green checkmark.
![Prowler Validation](https://github.com/user-attachments/assets/a543ca9b-6cbe-4ad1-b32a-d4ac2163d447)
## Report Sections
The PowerBI Report is broken into three main report pages
| Report Page | Description |
| ----------- | ----------------------------------------------------------------------------------- |
| Overview | Provides general CIS Benchmark overview across both AWS, Azure, GCP, and Kubernetes |
| Benchmark | Provides overview of a single CIS Benchmark |
| Requirement | Drill-through page to view details of a single requirement |
### Overview Page
The overview page is a general CIS Benchmark overview across both AWS, Azure, GCP, and Kubernetes.
![image](https://github.com/user-attachments/assets/94164fa9-36a4-4bb9-890d-e9a9a63a3e7d)
The page has the following components:
| Component | Description |
| ---------------------------------------- | ------------------------------------------------------------------------ |
| CIS Benchmark Overview | Table with benchmark name, Version, and overall compliance percentage |
| Provider by Requirement Status | Bar chart showing benchmark requirements by status by provider |
| Compliance Percent Heatmap | Heatmap showing compliance percent by benchmark and profile level |
| Profile level by Requirement Status | Bar chart showing requirements by status and profile level |
| Compliance Percent Over Time by Provider | Line chart showing overall compliance perecentage over time by provider. |
### Benchmark Page
The benchmark page provides an overview of a single CIS Benchmark. You can select the benchmark from the dropdown as well as scope down to specific profile levels or regions.
![image](https://github.com/user-attachments/assets/34498ee8-317b-4b81-b241-c561451d8def)
The page has the following components:
| Component | Description |
| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| Compliance Percent Heatmap | Heatmap showing compliance percent by region and profile level |
| Benchmark Section by Requirement Status | Bar chart showing benchmark requirements by bennchmark section and status |
| Compliance percent Over Time by Region | Line chart showing overall compliance percentage over time by region |
| Benchmark Requirements | Table showing requirement section, requirement number, reuqirement title, number of resources tested, status, and number of failing checks |
### Requirement Page
The requirement page is a drill-through page to view details of a single requirement. To populate the requirement page right click on a requiement from the "Benchmark Requirements" table on the benchmark page and select "Drill through" -&gt; "Requirement".
![image](https://github.com/user-attachments/assets/5c9172d9-56fe-4514-b341-7e708863fad6)
The requirement page has the following components:
| Component | Description |
| ------------------------------------------ | --------------------------------------------------------------------------------- |
| Title | Title of the requirement |
| Rationale | Rationale of the requirement |
| Remediation | Remedation guidance for the requirement |
| Region by Check Status | Bar chart showing Prowler checks by region and status |
| Resource Checks for Benchmark Requirements | Table showing Resource ID, Resource Name, Status, Description, and Prowler Checkl |
## Walkthrough Video
[![image](https://github.com/user-attachments/assets/866642c6-43ac-4aac-83d3-bb625002da0b)](https://www.youtube.com/watch?v=lfKFkTqBxjU)
@@ -76,7 +76,9 @@ class SimplifiedFindingGroup(MinimalSerializerMixin):
)
muted_count: int = Field(description="Total muted findings in this group", ge=0)
new_count: int = Field(description="Number of new non-muted findings", ge=0)
changed_count: int = Field(description="Number of changed non-muted findings", ge=0)
changed_count: int = Field(
description="Number of changed non-muted findings", ge=0
)
first_seen_at: str | None = Field(
default=None, description="First time this group was detected"
)
@@ -107,12 +109,18 @@ class DetailedFindingGroup(SimplifiedFindingGroup):
new_pass_count: int = Field(description="New non-muted PASS findings", ge=0)
new_pass_muted_count: int = Field(description="New muted PASS findings", ge=0)
new_manual_count: int = Field(description="New non-muted MANUAL findings", ge=0)
new_manual_muted_count: int = Field(description="New muted MANUAL findings", ge=0)
changed_fail_count: int = Field(description="Changed non-muted FAIL findings", ge=0)
new_manual_muted_count: int = Field(
description="New muted MANUAL findings", ge=0
)
changed_fail_count: int = Field(
description="Changed non-muted FAIL findings", ge=0
)
changed_fail_muted_count: int = Field(
description="Changed muted FAIL findings", ge=0
)
changed_pass_count: int = Field(description="Changed non-muted PASS findings", ge=0)
changed_pass_count: int = Field(
description="Changed non-muted PASS findings", ge=0
)
changed_pass_muted_count: int = Field(
description="Changed muted PASS findings", ge=0
)
@@ -464,7 +464,9 @@ class FindingGroupsTools(BaseTool):
clean_params = self.api_client.build_filter_params(params)
api_response = await self.api_client.get(endpoint, params=clean_params)
response = FindingGroupResourcesListResponse.from_api_response(api_response)
response = FindingGroupResourcesListResponse.from_api_response(
api_response
)
return response.model_dump()
except Exception as e:
self.logger.error(f"Error listing finding group resources: {e}")
Generated
+1 -33
View File
@@ -6043,38 +6043,6 @@ pydantic = ">=2.6.0"
ruamel-yaml = ">=0.17.21"
typing-extensions = ">=4.7.1"
[[package]]
name = "scaleway"
version = "2.10.3"
description = "Scaleway SDK for Python"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "scaleway-2.10.3-py3-none-any.whl", hash = "sha256:dbf381440d6caf37c878cf16445a63f4969a4aac2257c9b72c744d10ff223a0c"},
{file = "scaleway-2.10.3.tar.gz", hash = "sha256:b1f9dd1b1450767205234c6f5a345e5e25dc039c780253d698893b5c344ce594"},
]
[package.dependencies]
scaleway-core = "2.10.3"
[[package]]
name = "scaleway-core"
version = "2.10.3"
description = "Scaleway SDK for Python"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "scaleway_core-2.10.3-py3-none-any.whl", hash = "sha256:fd4112144554d6adae22ff737555eeb0e38cb1063250b3e88c9aebc1b957793b"},
{file = "scaleway_core-2.10.3.tar.gz", hash = "sha256:56432f755d694669429de51d51c1d0b3361b28dc2f939b28e4cb954610ee76be"},
]
[package.dependencies]
python-dateutil = ">=2.8.2,<3.0.0"
PyYAML = ">=6.0,<7.0"
requests = ">=2.28.1,<3.0.0"
[[package]]
name = "schema"
version = "0.7.5"
@@ -6917,4 +6885,4 @@ files = [
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.13"
content-hash = "e158ae9902d799a82e7d91cb4c0eb404d811ae3460310192fbdd198727e647cd"
content-hash = "96359a9bfe4031fb0747c22eb4b00f2a008e3fb6d07189fa0fe6ee3875b1f913"
+6 -1
View File
@@ -10,13 +10,18 @@ All notable changes to the **Prowler SDK** are documented in this file.
- `iam_user_access_not_stale_to_sagemaker` check for AWS provider with configurable `max_unused_sagemaker_access_days` (default 90) [(#11000)](https://github.com/prowler-cloud/prowler/pull/11000)
- `cloudtrail_bedrock_logging_enabled` check for AWS provider [(#10858)](https://github.com/prowler-cloud/prowler/pull/10858)
- Okta provider with OAuth 2.0 authentication and `signon_global_session_idle_timeout_15min` check [(#11079)](https://github.com/prowler-cloud/prowler/pull/11079)
- Scaleway provider with `iam_no_root_api_keys` check [(#11166)](https://github.com/prowler-cloud/prowler/pull/11166)
- `sagemaker_domain_sso_configured` check for AWS provider [(#11094)](https://github.com/prowler-cloud/prowler/pull/11094)
### 🔄 Changed
- `entra_emergency_access_exclusion` check for M365 provider now scopes the exclusion requirement to enabled Conditional Access policies with a `Block` grant control instead of every enabled policy, focusing on the lockout-relevant policy set [(#10849)](https://github.com/prowler-cloud/prowler/pull/10849)
- AWS IAM customer-managed policy checks no longer emit `FAIL` on unattached policies unless `--scan-unused-services` is enabled [(#11150)](https://github.com/prowler-cloud/prowler/pull/11150)
### 🐞 Fixed
- `zone_waf_enabled` check for Cloudflare provider now appends a plan-aware hint to the FAIL `status_extended`: a possible-false-positive note on paid plans (Pro, Business, Enterprise) where the legacy `waf` zone setting can read `off` even though WAF managed rulesets are deployed via the dashboard, and a "not available on the Cloudflare Free plan" note on Free zones [(#9896)](https://github.com/prowler-cloud/prowler/pull/9896)
- Google Workspace Gmail checks sharing a single resource row, causing the service field to be overwritten by the last check executed [(#11169)](https://github.com/prowler-cloud/prowler/pull/11169)
---
## [5.26.2] (Prowler UNRELEASED)
-5
View File
@@ -157,7 +157,6 @@ from prowler.providers.nhn.models import NHNOutputOptions
from prowler.providers.okta.models import OktaOutputOptions
from prowler.providers.openstack.models import OpenStackOutputOptions
from prowler.providers.oraclecloud.models import OCIOutputOptions
from prowler.providers.scaleway.models import ScalewayOutputOptions
from prowler.providers.vercel.models import VercelOutputOptions
@@ -432,10 +431,6 @@ def prowler():
output_options = OktaOutputOptions(
args, bulk_checks_metadata, global_provider.identity
)
elif provider == "scaleway":
output_options = ScalewayOutputOptions(
args, bulk_checks_metadata, global_provider.identity
)
# Run the quick inventory for the provider if available
if hasattr(args, "quick_inventory") and args.quick_inventory:
-1
View File
@@ -75,7 +75,6 @@ class Provider(str, Enum):
ALIBABACLOUD = "alibabacloud"
OPENSTACK = "openstack"
IMAGE = "image"
SCALEWAY = "scaleway"
VERCEL = "vercel"
OKTA = "okta"
-4
View File
@@ -741,10 +741,6 @@ def execute(
is_finding_muted_args["team_id"] = (
team.id if team else global_provider.identity.user_id
)
elif global_provider.type == "scaleway":
is_finding_muted_args["organization_id"] = (
global_provider.identity.organization_id
)
elif global_provider.type == "oraclecloud":
is_finding_muted_args["tenancy_id"] = (
global_provider.identity.tenancy_id
-47
View File
@@ -1318,53 +1318,6 @@ class CheckReportVercel(Check_Report):
return "global"
class CheckReportScaleway(Check_Report):
"""Contains the Scaleway Check's finding information.
Scaleway scans run at the organization level. Most IAM/account-level
resources are global; regional resources expose a ``region`` attribute
on the underlying object, which we surface as the report ``region``.
"""
resource_name: str
resource_id: str
organization_id: str
def __init__(
self,
metadata: Dict,
resource: Any,
resource_name: str = None,
resource_id: str = None,
organization_id: str = None,
) -> None:
"""Initialize the Scaleway Check's finding information.
Args:
metadata: Check metadata dictionary.
resource: The Scaleway resource being checked.
resource_name: Override for resource name.
resource_id: Override for resource ID.
organization_id: Override for the organization ID.
"""
super().__init__(metadata, resource)
self.resource_name = resource_name or getattr(
resource, "name", getattr(resource, "resource_name", "")
)
self.resource_id = resource_id or getattr(
resource, "id", getattr(resource, "resource_id", "")
)
self.organization_id = organization_id or getattr(
resource, "organization_id", ""
)
self._region = getattr(resource, "region", None) or "global"
@property
def region(self) -> str:
"""Scaleway regional resources expose their own region; IAM is global."""
return self._region
# Testing Pending
def load_check_metadata(metadata_file: str) -> CheckMetadata:
"""
+2 -3
View File
@@ -29,10 +29,10 @@ class ProwlerArgumentParser:
self.parser = argparse.ArgumentParser(
prog="prowler",
formatter_class=RawTextHelpFormatter,
usage="prowler [-h] [--version] {aws,azure,gcp,kubernetes,m365,github,googleworkspace,okta,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,scaleway,vercel,dashboard,iac,image,llm} ...",
usage="prowler [-h] [--version] {aws,azure,gcp,kubernetes,m365,github,googleworkspace,okta,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,vercel,dashboard,iac,image,llm} ...",
epilog="""
Available Cloud Providers:
{aws,azure,gcp,kubernetes,m365,github,googleworkspace,okta,iac,llm,image,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,scaleway,vercel}
{aws,azure,gcp,kubernetes,m365,github,googleworkspace,okta,iac,llm,image,nhn,mongodbatlas,oraclecloud,alibabacloud,cloudflare,openstack,vercel}
aws AWS Provider
azure Azure Provider
gcp GCP Provider
@@ -50,7 +50,6 @@ Available Cloud Providers:
image Container Image Provider
nhn NHN Provider (Unofficial)
mongodbatlas MongoDB Atlas Provider
scaleway Scaleway Provider
vercel Vercel Provider
Available components:
-12
View File
@@ -442,18 +442,6 @@ class Finding(BaseModel):
output_data["resource_uid"] = check_output.resource_id
output_data["region"] = "global"
elif provider.type == "scaleway":
output_data["auth_method"] = "api_key"
output_data["account_uid"] = get_nested_attribute(
provider, "identity.organization_id"
)
output_data["account_name"] = get_nested_attribute(
provider, "identity.bearer_email"
) or get_nested_attribute(provider, "identity.organization_id")
output_data["resource_name"] = check_output.resource_name
output_data["resource_uid"] = check_output.resource_id
output_data["region"] = check_output.region
elif provider.type == "alibabacloud":
output_data["auth_method"] = get_nested_attribute(
provider, "identity.identity_arn"
-71
View File
@@ -1450,77 +1450,6 @@ class HTML(Output):
)
return ""
@staticmethod
def get_scaleway_assessment_summary(provider: Provider) -> str:
"""
get_scaleway_assessment_summary gets the HTML assessment summary for the Scaleway provider
Args:
provider (Provider): the Scaleway provider object
Returns:
str: HTML assessment summary for the Scaleway provider
"""
try:
assessment_items = f"""
<li class="list-group-item">
<b>Organization ID:</b> {provider.identity.organization_id}
</li>"""
credentials_items = """
<li class="list-group-item">
<b>Authentication:</b> API Key
</li>"""
access_key = getattr(provider.session, "access_key", None)
if access_key:
credentials_items += f"""
<li class="list-group-item">
<b>Access Key:</b> {access_key}
</li>"""
bearer_type = getattr(provider.identity, "bearer_type", None)
bearer_email = getattr(provider.identity, "bearer_email", None)
bearer_id = getattr(provider.identity, "bearer_id", None)
if bearer_type:
bearer_label = bearer_email or bearer_id or "-"
credentials_items += f"""
<li class="list-group-item">
<b>Bearer:</b> {bearer_type} ({bearer_label})
</li>"""
region = getattr(provider.session, "default_region", None)
if region:
credentials_items += f"""
<li class="list-group-item">
<b>Default Region:</b> {region}
</li>"""
return f"""
<div class="col-md-2">
<div class="card">
<div class="card-header">
Scaleway Assessment Summary
</div>
<ul class="list-group list-group-flush">{assessment_items}
</ul>
</div>
</div>
<div class="col-md-4">
<div class="card">
<div class="card-header">
Scaleway Credentials
</div>
<ul class="list-group list-group-flush">{credentials_items}
</ul>
</div>
</div>"""
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
return ""
@staticmethod
def get_assessment_summary(provider: Provider) -> str:
"""
-2
View File
@@ -42,8 +42,6 @@ def stdout_report(finding, color, verbose, status, fix):
details = finding.region
if finding.check_metadata.Provider == "okta":
details = finding.region
if finding.check_metadata.Provider == "scaleway":
details = finding.region
if (verbose or fix) and (not status or finding.status in status):
if finding.muted:
-3
View File
@@ -111,9 +111,6 @@ def display_summary_table(
elif provider.type == "okta":
entity_type = "Okta Org"
audited_entities = provider.identity.org_domain
elif provider.type == "scaleway":
entity_type = "Organization"
audited_entities = provider.identity.organization_id
# Check if there are findings and that they are not all MANUAL
if findings and not all(finding.status == "MANUAL" for finding in findings):
@@ -0,0 +1,39 @@
{
"Provider": "aws",
"CheckID": "sagemaker_domain_sso_configured",
"CheckTitle": "SageMaker domains use SSO authentication instead of IAM mode",
"CheckType": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"ServiceName": "sagemaker",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "Other",
"ResourceGroup": "ai_ml",
"Description": "**SageMaker Domain** configured with **IAM Identity Center (SSO) authentication**. The check validates that each SageMaker Domain uses SSO mode (`AuthMode: SSO`) and is associated with an IAM Identity Center instance (`SingleSignOnManagedApplicationInstanceId` or `SingleSignOnApplicationArn` present), ensuring user access is centrally managed through AWS IAM Identity Center.",
"Risk": "IAM-mode domains create per-user IAM users or roles managed locally to SageMaker, drifting from the organization's identity provider and weakening lifecycle controls such as offboarding, MFA enforcement, and session policies. SSO-mode domains without an IAM Identity Center association leave authentication in an inconsistent state and bypass centralized access governance.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://docs.aws.amazon.com/sagemaker/latest/dg/onboard-sso-users.html"
],
"Remediation": {
"Code": {
"CLI": "aws sagemaker describe-domain --domain-id <domain_id> --query '{AuthMode:AuthMode,SingleSignOnManagedApplicationInstanceId:SingleSignOnManagedApplicationInstanceId,SingleSignOnApplicationArn:SingleSignOnApplicationArn}'",
"NativeIaC": "```yaml\n# CloudFormation: Create a SageMaker Domain with SSO authentication\nResources:\n SageMakerDomain:\n Type: AWS::SageMaker::Domain\n Properties:\n DomainName: <example_domain_name>\n AuthMode: SSO # Critical: enables IAM Identity Center authentication\n DefaultUserSettings:\n ExecutionRole: <example_role_arn>\n VpcId: <example_vpc_id>\n SubnetIds:\n - <example_subnet_id>\n```",
"Other": "SageMaker Domains cannot be switched from IAM to SSO mode after creation. To remediate, create a new Domain with AuthMode set to SSO and migrate user profiles.",
"Terraform": "```hcl\n# Terraform: Create a SageMaker Domain with SSO authentication\nresource \"aws_sagemaker_domain\" \"example\" {\n domain_name = \"<example_domain_name>\"\n auth_mode = \"SSO\" # Critical: enables IAM Identity Center authentication\n vpc_id = \"<example_vpc_id>\"\n subnet_ids = [\"<example_subnet_id>\"]\n\n default_user_settings {\n execution_role = \"<example_role_arn>\"\n }\n}\n```"
},
"Recommendation": {
"Text": "Configure SageMaker Domains with SSO authentication mode to anchor user access to AWS IAM Identity Center. This enforces centralized identity lifecycle management, MFA policies, and session controls. Domains created with IAM mode must be recreated with SSO mode since the auth mode cannot be changed after creation.",
"Url": "https://hub.prowler.com/check/sagemaker_domain_sso_configured"
}
},
"Categories": [
"identity-access",
"gen-ai"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
}
@@ -0,0 +1,27 @@
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.sagemaker.sagemaker_client import sagemaker_client
class sagemaker_domain_sso_configured(Check):
def execute(self):
findings = []
for domain in sagemaker_client.sagemaker_domains:
report = Check_Report_AWS(metadata=self.metadata(), resource=domain)
if domain.auth_mode == "SSO":
if (
domain.single_sign_on_managed_application_instance_id
or domain.single_sign_on_application_arn
):
report.status = "PASS"
report.status_extended = f"SageMaker domain {domain.name} is configured with SSO authentication and is associated with an IAM Identity Center instance."
else:
report.status = "FAIL"
report.status_extended = f"SageMaker domain {domain.name} is configured with SSO authentication but is not associated with an IAM Identity Center instance."
else:
report.status = "FAIL"
current_mode = domain.auth_mode if domain.auth_mode else "unknown"
report.status_extended = f"SageMaker domain {domain.name} is not configured with SSO authentication, current mode is {current_mode}."
findings.append(report)
return findings
@@ -15,6 +15,7 @@ class SageMaker(AWSService):
self.sagemaker_notebook_instances = []
self.sagemaker_models = []
self.sagemaker_training_jobs = []
self.sagemaker_domains = []
self.endpoint_configs = {}
# Retrieve resources concurrently
@@ -22,6 +23,7 @@ class SageMaker(AWSService):
self.__threading_call__(self._list_models)
self.__threading_call__(self._list_training_jobs)
self.__threading_call__(self._list_endpoint_configs)
self.__threading_call__(self._list_domains)
# Describe resources concurrently
self.__threading_call__(self._describe_model, self.sagemaker_models)
@@ -34,6 +36,7 @@ class SageMaker(AWSService):
self.__threading_call__(
self._describe_endpoint_config, list(self.endpoint_configs.values())
)
self.__threading_call__(self._describe_domain, self.sagemaker_domains)
# List tags concurrently for each resource collection
# This replaces the previous sequential sequential execution to improve performance
@@ -47,6 +50,7 @@ class SageMaker(AWSService):
self.__threading_call__(
self._list_tags_for_resource, list(self.endpoint_configs.values())
)
self.__threading_call__(self._list_tags_for_resource, self.sagemaker_domains)
def _list_notebook_instances(self, regional_client):
logger.info("SageMaker - listing notebook instances...")
@@ -218,6 +222,46 @@ class SageMaker(AWSService):
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def _list_domains(self, regional_client):
logger.info("SageMaker - listing domains...")
try:
list_domains_paginator = regional_client.get_paginator("list_domains")
for page in list_domains_paginator.paginate():
for domain in page["Domains"]:
if not self.audit_resources or (
is_resource_filtered(domain["DomainArn"], self.audit_resources)
):
self.sagemaker_domains.append(
Domain(
domain_id=domain["DomainId"],
name=domain["DomainName"],
region=regional_client.region,
arn=domain["DomainArn"],
)
)
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def _describe_domain(self, domain):
logger.info("SageMaker - describing domain...")
try:
regional_client = self.regional_clients[domain.region]
describe_domain = regional_client.describe_domain(DomainId=domain.domain_id)
if "AuthMode" in describe_domain:
domain.auth_mode = describe_domain["AuthMode"]
domain.single_sign_on_managed_application_instance_id = describe_domain.get(
"SingleSignOnManagedApplicationInstanceId"
)
domain.single_sign_on_application_arn = describe_domain.get(
"SingleSignOnApplicationArn"
)
except Exception as error:
logger.error(
f"{domain.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def _list_endpoint_configs(self, regional_client):
logger.info("SageMaker - listing endpoint configs...")
try:
@@ -303,6 +347,17 @@ class ProductionVariant(BaseModel):
initial_instance_count: int
class Domain(BaseModel):
domain_id: str
name: str
region: str
arn: str
auth_mode: Optional[str] = None
single_sign_on_managed_application_instance_id: Optional[str] = None
single_sign_on_application_arn: Optional[str] = None
tags: Optional[list] = []
class EndpointConfig(BaseModel):
name: str
region: str
+35
View File
@@ -0,0 +1,35 @@
from typing import Optional
# Cloudflare returns the plan name in ``zone.plan.name`` (e.g. "Free Website",
# "Pro Website", "Business Website", "Enterprise Website"). Free plans do not
# expose WAF managed rulesets at all, while paid plans expose them but the
# legacy ``waf`` zone setting can lag behind the actual deployment state.
PAID_PLAN_KEYWORDS = ("pro", "business", "enterprise")
FREE_PLAN_KEYWORDS = ("free",)
def _plan_matches(plan: Optional[str], keywords: tuple[str, ...]) -> bool:
if not isinstance(plan, str):
return False
plan_lower = plan.lower()
return any(keyword in plan_lower for keyword in keywords)
def is_paid_plan(plan: Optional[str]) -> bool:
"""Return True when the Cloudflare zone plan is a paid tier."""
return _plan_matches(plan, PAID_PLAN_KEYWORDS)
def is_free_plan(plan: Optional[str]) -> bool:
"""Return True when the Cloudflare zone plan is the Free tier."""
return _plan_matches(plan, FREE_PLAN_KEYWORDS)
def paid_plan_suffix(plan: Optional[str], message: str) -> str:
"""Return an explanatory suffix only when the zone is on a paid plan."""
return f" {message}" if is_paid_plan(plan) else ""
def free_plan_suffix(plan: Optional[str], message: str) -> str:
"""Return an explanatory suffix only when the zone is on the Free plan."""
return f" {message}" if is_free_plan(plan) else ""
@@ -1,6 +1,19 @@
from prowler.lib.check.models import Check, CheckReportCloudflare
from prowler.providers.cloudflare.lib.plan import (
free_plan_suffix,
paid_plan_suffix,
)
from prowler.providers.cloudflare.services.zone.zone_client import zone_client
PAID_PLAN_FALSE_POSITIVE_HINT = (
"This may be a false positive if WAF managed rulesets are configured via "
"the Cloudflare dashboard; verify manually in Security > WAF."
)
FREE_PLAN_UNAVAILABLE_HINT = (
"This may be expected because the Web Application Firewall is not "
"available on the Cloudflare Free plan."
)
class zone_waf_enabled(Check):
"""Ensure that WAF is enabled for Cloudflare zones.
@@ -35,6 +48,16 @@ class zone_waf_enabled(Check):
report.status_extended = f"WAF is enabled for zone {zone.name}."
else:
report.status = "FAIL"
report.status_extended = f"WAF is not enabled for zone {zone.name}."
# Two plan-specific hints can be appended to the FAIL message:
# - Paid plans: the legacy ``waf`` zone setting can read ``off``
# while WAF managed rulesets are deployed via the dashboard,
# so the FAIL may be a false positive.
# - Free plans: WAF is not available at all, so the FAIL is
# expected and the suffix points that out.
report.status_extended = (
f"WAF is not enabled for zone {zone.name}."
f"{paid_plan_suffix(zone.plan, PAID_PLAN_FALSE_POSITIVE_HINT)}"
f"{free_plan_suffix(zone.plan, FREE_PLAN_UNAVAILABLE_HINT)}"
)
findings.append(report)
return findings
-11
View File
@@ -416,17 +416,6 @@ class Provider(ABC):
mutelist_path=arguments.mutelist_file,
fixer_config=fixer_config,
)
elif "scaleway" in provider_class_name.lower():
provider_class(
access_key=getattr(arguments, "access_key", None),
secret_key=getattr(arguments, "secret_key", None),
organization_id=getattr(arguments, "organization_id", None),
project_id=getattr(arguments, "project_id", None),
region=getattr(arguments, "region", None),
config_path=arguments.config_file,
mutelist_path=arguments.mutelist_file,
fixer_config=fixer_config,
)
except TypeError as error:
logger.critical(
@@ -18,7 +18,10 @@ class gmail_anomalous_attachment_protection_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
enabled = gmail_client.policies.enable_anomalous_attachment_protection
@@ -18,7 +18,10 @@ class gmail_auto_forwarding_disabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
forwarding_enabled = gmail_client.policies.enable_auto_forwarding
@@ -18,7 +18,10 @@ class gmail_comprehensive_mail_storage_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
storage_enabled = gmail_client.policies.comprehensive_mail_storage_enabled
@@ -18,7 +18,10 @@ class gmail_domain_spoofing_protection_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
enabled = gmail_client.policies.detect_domain_name_spoofing
@@ -18,7 +18,10 @@ class gmail_employee_name_spoofing_protection_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
enabled = gmail_client.policies.detect_employee_name_spoofing
@@ -18,7 +18,10 @@ class gmail_encrypted_attachment_protection_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
enabled = gmail_client.policies.enable_encrypted_attachment_protection
@@ -18,7 +18,10 @@ class gmail_enhanced_pre_delivery_scanning_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
scanning_enabled = (
@@ -18,7 +18,10 @@ class gmail_external_image_scanning_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
scanning_enabled = gmail_client.policies.enable_external_image_scanning
@@ -18,7 +18,10 @@ class gmail_groups_spoofing_protection_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
enabled = gmail_client.policies.detect_groups_spoofing
@@ -18,7 +18,10 @@ class gmail_inbound_domain_spoofing_protection_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
enabled = gmail_client.policies.detect_inbound_domain_spoofing
@@ -18,7 +18,10 @@ class gmail_mail_delegation_disabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
delegation_enabled = gmail_client.policies.enable_mail_delegation
@@ -18,7 +18,10 @@ class gmail_per_user_outbound_gateway_disabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
gateway_allowed = gmail_client.policies.allow_per_user_outbound_gateway
@@ -18,7 +18,10 @@ class gmail_pop_imap_access_disabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
pop_enabled = gmail_client.policies.enable_pop_access
@@ -18,7 +18,10 @@ class gmail_script_attachment_protection_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
enabled = gmail_client.policies.enable_script_attachment_protection
@@ -18,7 +18,10 @@ class gmail_shortener_scanning_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
scanning_enabled = gmail_client.policies.enable_shortener_scanning
@@ -18,7 +18,10 @@ class gmail_unauthenticated_email_protection_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
enabled = gmail_client.policies.detect_unauthenticated_emails
@@ -18,7 +18,10 @@ class gmail_untrusted_link_warnings_enabled(Check):
if gmail_client.policies_fetched:
report = CheckReportGoogleWorkspace(
metadata=self.metadata(),
resource=gmail_client.provider.domain_resource,
resource=gmail_client.policies,
resource_id="gmailPolicies",
resource_name="Gmail Policies",
customer_id=gmail_client.provider.identity.customer_id,
)
warnings_enabled = (
@@ -1,99 +0,0 @@
# Exceptions codes from 14000 to 14999 are reserved for Scaleway exceptions
from prowler.exceptions.exceptions import ProwlerException
class ScalewayBaseException(ProwlerException):
"""Base exception for Scaleway provider errors."""
SCALEWAY_ERROR_CODES = {
(14000, "ScalewayCredentialsError"): {
"message": "Scaleway credentials not found or invalid.",
"remediation": (
"Set the SCW_ACCESS_KEY and SCW_SECRET_KEY environment variables "
"with a valid Scaleway API key. Generate one at "
"https://console.scaleway.com/iam/api-keys."
),
},
(14001, "ScalewayAuthenticationError"): {
"message": "Authentication to the Scaleway API failed.",
"remediation": (
"Verify your Scaleway API key is valid, has not expired, and that "
"the bearer has IAM read permissions on the target organization."
),
},
(14002, "ScalewaySessionError"): {
"message": "Failed to create a Scaleway API session.",
"remediation": (
"Check network connectivity and ensure the Scaleway API is "
"reachable at https://api.scaleway.com."
),
},
(14003, "ScalewayIdentityError"): {
"message": "Failed to retrieve Scaleway identity information.",
"remediation": (
"Ensure the API key has permissions to read IAM users and the "
"owning organization metadata."
),
},
(14004, "ScalewayAPIError"): {
"message": "An error occurred while calling the Scaleway API.",
"remediation": (
"Check the Scaleway API status at https://status.scaleway.com "
"and retry. Run with --log-level DEBUG for the full traceback."
),
},
}
def __init__(self, code, file=None, original_exception=None, message=None):
provider = "Scaleway"
error_info = self.SCALEWAY_ERROR_CODES.get((code, self.__class__.__name__))
if error_info is None:
error_info = {
"message": message or "Unknown Scaleway error.",
"remediation": "Check the Scaleway API documentation for more details.",
}
elif message:
error_info = error_info.copy()
error_info["message"] = message
super().__init__(
code=code,
source=provider,
file=file,
original_exception=original_exception,
error_info=error_info,
)
class ScalewayCredentialsError(ScalewayBaseException):
def __init__(self, file=None, original_exception=None, message=None):
super().__init__(
14000, file=file, original_exception=original_exception, message=message
)
class ScalewayAuthenticationError(ScalewayBaseException):
def __init__(self, file=None, original_exception=None, message=None):
super().__init__(
14001, file=file, original_exception=original_exception, message=message
)
class ScalewaySessionError(ScalewayBaseException):
def __init__(self, file=None, original_exception=None, message=None):
super().__init__(
14002, file=file, original_exception=original_exception, message=message
)
class ScalewayIdentityError(ScalewayBaseException):
def __init__(self, file=None, original_exception=None, message=None):
super().__init__(
14003, file=file, original_exception=original_exception, message=message
)
class ScalewayAPIError(ScalewayBaseException):
def __init__(self, file=None, original_exception=None, message=None):
super().__init__(
14004, file=file, original_exception=original_exception, message=message
)
@@ -1,57 +0,0 @@
SENSITIVE_ARGUMENTS = frozenset({"--access-key", "--secret-key"})
def init_parser(self):
"""Init the Scaleway provider CLI parser."""
scaleway_parser = self.subparsers.add_parser(
"scaleway",
parents=[self.common_providers_parser],
help="Scaleway Provider",
)
# Authentication
auth_subparser = scaleway_parser.add_argument_group("Authentication")
auth_subparser.add_argument(
"--access-key",
nargs="?",
default=None,
metavar="SCW_ACCESS_KEY",
help=(
"Scaleway API access key. Prefer the SCW_ACCESS_KEY env var "
"instead of passing it on the command line."
),
)
auth_subparser.add_argument(
"--secret-key",
nargs="?",
default=None,
metavar="SCW_SECRET_KEY",
help=(
"Scaleway API secret key. Prefer the SCW_SECRET_KEY env var "
"instead of passing it on the command line."
),
)
# Scope
scope_subparser = scaleway_parser.add_argument_group("Scope")
scope_subparser.add_argument(
"--organization-id",
nargs="?",
default=None,
metavar="SCW_DEFAULT_ORGANIZATION_ID",
help="Scaleway organization ID to scope the audit.",
)
scope_subparser.add_argument(
"--project-id",
nargs="?",
default=None,
metavar="SCW_DEFAULT_PROJECT_ID",
help="Default Scaleway project ID for project-scoped resources.",
)
scope_subparser.add_argument(
"--region",
nargs="?",
default=None,
metavar="SCW_DEFAULT_REGION",
help="Default Scaleway region (fr-par, nl-ams, pl-waw).",
)
@@ -1,20 +0,0 @@
from prowler.lib.check.models import CheckReportScaleway
from prowler.lib.mutelist.mutelist import Mutelist
from prowler.lib.outputs.utils import unroll_dict, unroll_tags
class ScalewayMutelist(Mutelist):
"""Scaleway-specific mutelist helper."""
def is_finding_muted(
self,
finding: CheckReportScaleway,
organization_id: str,
) -> bool:
return self.is_muted(
organization_id,
finding.check_metadata.CheckID,
finding.region or "global",
finding.resource_id or finding.resource_name,
unroll_dict(unroll_tags(finding.resource_tags)),
)
@@ -1,44 +0,0 @@
from prowler.lib.logger import logger
from prowler.providers.scaleway.exceptions.exceptions import ScalewayAPIError
class ScalewayService:
"""Base class for Scaleway services.
Centralizes the provider context (audit/fixer configuration, the
scoping organization, the authenticated ``scaleway.Client``) so each
service only worries about which Scaleway API to call.
"""
def __init__(self, service: str, provider):
self.provider = provider
self.audit_config = provider.audit_config
self.fixer_config = provider.fixer_config
self.service = service.lower() if not service.islower() else service
# Shared authenticated client and the organization in scope
self.client = provider.session.client
self.organization_id = provider.identity.organization_id
def _safe_call(self, label: str, fn, *args, **kwargs):
"""Run a Scaleway SDK call and surface failures as ScalewayAPIError.
Args:
label: Human-readable label for the call (used in logs).
fn: SDK function to invoke.
Returns:
The SDK function result, or ``None`` if the call failed.
"""
try:
return fn(*args, **kwargs)
except Exception as error:
logger.error(
f"{self.service} - {label} failed: "
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
raise ScalewayAPIError(
file=__file__,
original_exception=error,
message=f"Scaleway API call '{label}' failed.",
)
-53
View File
@@ -1,53 +0,0 @@
from typing import Any, Literal, Optional
from pydantic.v1 import BaseModel, Field
from prowler.config.config import output_file_timestamp
from prowler.providers.common.models import ProviderOutputOptions
ScalewayBearerType = Literal["user", "application"]
class ScalewaySession(BaseModel):
"""Scaleway API session information.
Stores the credentials and the underlying ``scaleway.Client`` so every
service can reuse the same authenticated client.
"""
access_key: str
secret_key: str
organization_id: Optional[str] = None
default_project_id: Optional[str] = None
default_region: Optional[str] = None
client: Any = Field(default=None, exclude=True)
class Config:
arbitrary_types_allowed = True
class ScalewayIdentityInfo(BaseModel):
"""Scaleway identity and scoping information."""
organization_id: str
bearer_id: Optional[str] = None
bearer_type: Optional[ScalewayBearerType] = None
bearer_email: Optional[str] = None
account_root_user_id: Optional[str] = None
class ScalewayOutputOptions(ProviderOutputOptions):
"""Customize output filenames for Scaleway scans."""
def __init__(self, arguments, bulk_checks_metadata, identity: ScalewayIdentityInfo):
super().__init__(arguments, bulk_checks_metadata)
if (
not hasattr(arguments, "output_filename")
or arguments.output_filename is None
):
account_fragment = identity.organization_id or "scaleway"
self.output_filename = (
f"prowler-output-{account_fragment}-{output_file_timestamp}"
)
else:
self.output_filename = arguments.output_filename
@@ -1,372 +0,0 @@
import os
from colorama import Fore, Style
from scaleway import Client
from scaleway.iam.v1alpha1 import IamV1Alpha1API
from prowler.config.config import (
default_config_file_path,
get_default_mute_file_path,
load_and_validate_config_file,
)
from prowler.lib.logger import logger
from prowler.lib.utils.utils import print_boxes
from prowler.providers.common.models import Audit_Metadata, Connection
from prowler.providers.common.provider import Provider
from prowler.providers.scaleway.exceptions.exceptions import (
ScalewayAuthenticationError,
ScalewayCredentialsError,
ScalewayIdentityError,
ScalewaySessionError,
)
from prowler.providers.scaleway.lib.mutelist.mutelist import ScalewayMutelist
from prowler.providers.scaleway.models import (
ScalewayIdentityInfo,
ScalewaySession,
)
class ScalewayProvider(Provider):
"""Scaleway provider.
Authenticates against the Scaleway API using an API key (access key +
secret key) and exposes a single global session that every service
reuses. Scaleway scopes everything to an organization, so the
organization ID is the audit identity.
"""
_type: str = "scaleway"
_session: ScalewaySession
_identity: ScalewayIdentityInfo
_audit_config: dict
_fixer_config: dict
_mutelist: ScalewayMutelist
audit_metadata: Audit_Metadata
def __init__(
self,
# Authentication credentials
access_key: str = None,
secret_key: str = None,
organization_id: str = None,
project_id: str = None,
region: str = None,
# Provider configuration
config_path: str = None,
config_content: dict | None = None,
fixer_config: dict = {},
mutelist_path: str = None,
mutelist_content: dict = None,
):
logger.info("Instantiating Scaleway provider...")
if config_content:
self._audit_config = config_content
else:
if not config_path:
config_path = default_config_file_path
self._audit_config = load_and_validate_config_file(self._type, config_path)
self._session = ScalewayProvider.setup_session(
access_key=access_key,
secret_key=secret_key,
organization_id=organization_id,
project_id=project_id,
region=region,
)
self._identity = ScalewayProvider.setup_identity(self._session)
self._fixer_config = fixer_config
if mutelist_content:
self._mutelist = ScalewayMutelist(mutelist_content=mutelist_content)
else:
if not mutelist_path:
mutelist_path = get_default_mute_file_path(self.type)
self._mutelist = ScalewayMutelist(mutelist_path=mutelist_path)
Provider.set_global_provider(self)
@property
def type(self):
return self._type
@property
def session(self):
return self._session
@property
def identity(self):
return self._identity
@property
def audit_config(self):
return self._audit_config
@property
def fixer_config(self):
return self._fixer_config
@property
def mutelist(self) -> ScalewayMutelist:
return self._mutelist
@staticmethod
def setup_session(
access_key: str = None,
secret_key: str = None,
organization_id: str = None,
project_id: str = None,
region: str = None,
) -> ScalewaySession:
"""Initialize the Scaleway API session.
Credentials can be provided as arguments (for API/SDK use) or read
from the official Scaleway environment variables:
- ``SCW_ACCESS_KEY``
- ``SCW_SECRET_KEY``
- ``SCW_DEFAULT_ORGANIZATION_ID``
- ``SCW_DEFAULT_PROJECT_ID``
- ``SCW_DEFAULT_REGION``
Args:
access_key: Scaleway API access key.
secret_key: Scaleway API secret key.
organization_id: Default organization ID to scope the audit.
project_id: Default project ID for project-scoped resources.
region: Default region.
Returns:
ScalewaySession: The initialized session, holding the
authenticated ``scaleway.Client``.
Raises:
ScalewayCredentialsError: Access or secret key missing.
ScalewaySessionError: Client instantiation failed.
"""
access = access_key or os.environ.get("SCW_ACCESS_KEY", "")
secret = secret_key or os.environ.get("SCW_SECRET_KEY", "")
org = organization_id or os.environ.get("SCW_DEFAULT_ORGANIZATION_ID") or None
project = project_id or os.environ.get("SCW_DEFAULT_PROJECT_ID") or None
default_region = region or os.environ.get("SCW_DEFAULT_REGION") or "fr-par"
if not access or not secret:
raise ScalewayCredentialsError(
file=os.path.basename(__file__),
message=(
"Scaleway credentials not found. Provide access_key and "
"secret_key or set the SCW_ACCESS_KEY and SCW_SECRET_KEY "
"environment variables."
),
)
try:
client = Client(
access_key=access,
secret_key=secret,
default_organization_id=org,
default_project_id=project,
default_region=default_region,
)
return ScalewaySession(
access_key=access,
secret_key=secret,
organization_id=org,
default_project_id=project,
default_region=default_region,
client=client,
)
except Exception as error:
logger.critical(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
raise ScalewaySessionError(
file=os.path.basename(__file__),
original_exception=error,
)
@staticmethod
def setup_identity(session: ScalewaySession) -> ScalewayIdentityInfo:
"""Resolve the audit identity by calling Scaleway IAM.
Uses ``iam.get_api_key`` on the current access key to discover the
bearer (user vs application). When the bearer is a user, the
owning organization is read from the user record; otherwise we
require ``SCW_DEFAULT_ORGANIZATION_ID``.
"""
try:
iam = IamV1Alpha1API(session.client)
current_key = iam.get_api_key(access_key=session.access_key)
bearer_id = current_key.user_id or current_key.application_id
bearer_type = (
"user"
if current_key.user_id
else ("application" if current_key.application_id else None)
)
organization_id = session.organization_id
bearer_email = None
account_root_user_id = None
# If the bearer is a user, resolve the org from the user record
# and surface the email + root user id for the credentials banner.
if current_key.user_id:
user = iam.get_user(user_id=current_key.user_id)
organization_id = organization_id or user.organization_id
bearer_email = user.email
account_root_user_id = user.account_root_user_id
elif current_key.application_id and not organization_id:
# Application keys do not expose the org directly without an
# extra call. The default org from env is preferred.
logger.warning(
"Scaleway application-scoped API key without "
"SCW_DEFAULT_ORGANIZATION_ID. Resource discovery may fail."
)
if not organization_id:
raise ScalewayIdentityError(
file=os.path.basename(__file__),
message=(
"Could not determine the Scaleway organization ID. "
"Set SCW_DEFAULT_ORGANIZATION_ID or use a user-scoped "
"API key."
),
)
return ScalewayIdentityInfo(
organization_id=organization_id,
bearer_id=bearer_id,
bearer_type=bearer_type,
bearer_email=bearer_email,
account_root_user_id=account_root_user_id,
)
except ScalewayIdentityError:
raise
except Exception as error:
logger.critical(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
raise ScalewayIdentityError(
file=os.path.basename(__file__),
original_exception=error,
)
@staticmethod
def validate_credentials(session: ScalewaySession) -> None:
"""Smoke-test credentials by resolving the current API key.
Uses ``iam.get_api_key`` because it does not require any prior
knowledge of the bearer or the owning organization.
Args:
session: The Scaleway session to validate.
Raises:
ScalewayAuthenticationError: Authentication or authorization
failed against the Scaleway IAM API.
"""
try:
iam = IamV1Alpha1API(session.client)
iam.get_api_key(access_key=session.access_key)
except Exception as error:
raise ScalewayAuthenticationError(
file=os.path.basename(__file__),
original_exception=error,
)
def print_credentials(self) -> None:
report_title = (
f"{Style.BRIGHT}Using the Scaleway credentials below:{Style.RESET_ALL}"
)
report_lines = [
f"Authentication: {Fore.YELLOW}API Key{Style.RESET_ALL}",
f"Access Key: {Fore.YELLOW}{self._session.access_key}{Style.RESET_ALL}",
f"Organization ID: {Fore.YELLOW}{self._identity.organization_id}{Style.RESET_ALL}",
]
if self._identity.bearer_type:
report_lines.append(
f"Bearer: {Fore.YELLOW}{self._identity.bearer_type}"
f" ({self._identity.bearer_email or self._identity.bearer_id})"
f"{Style.RESET_ALL}"
)
if self._session.default_region:
report_lines.append(
f"Default Region: {Fore.YELLOW}{self._session.default_region}{Style.RESET_ALL}"
)
print_boxes(report_lines, report_title)
@staticmethod
def test_connection(
access_key: str = None,
secret_key: str = None,
organization_id: str = None,
raise_on_exception: bool = True,
provider_id: str = None,
) -> Connection:
"""Test connection to Scaleway.
Args:
access_key: Scaleway access key (falls back to SCW_ACCESS_KEY).
secret_key: Scaleway secret key (falls back to SCW_SECRET_KEY).
organization_id: Organization ID to scope the audit.
raise_on_exception: Whether to raise or return errors.
provider_id: Expected Scaleway organization ID. When provided,
the resolved identity must match it; otherwise the test
fails with ``ScalewayAuthenticationError``.
Returns:
Connection: Connection object with is_connected status.
"""
try:
session = ScalewayProvider.setup_session(
access_key=access_key,
secret_key=secret_key,
organization_id=organization_id,
)
ScalewayProvider.validate_credentials(session)
# Guard for API callers that already know the expected
# organization: the credentials must point to that exact org.
if provider_id:
identity = ScalewayProvider.setup_identity(session)
if identity.organization_id != provider_id:
raise ScalewayAuthenticationError(
file=os.path.basename(__file__),
message=(
"The provided credentials do not have access to "
f"the Scaleway organization with ID: {provider_id}"
),
)
return Connection(is_connected=True)
except (
ScalewayCredentialsError,
ScalewaySessionError,
ScalewayAuthenticationError,
) as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
if raise_on_exception:
raise error
return Connection(is_connected=False, error=error)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
formatted_error = ScalewayAuthenticationError(
file=os.path.basename(__file__),
original_exception=error,
)
if raise_on_exception:
raise formatted_error
return Connection(is_connected=False, error=formatted_error)
def validate_arguments(self) -> None:
return None
@@ -1,4 +0,0 @@
from prowler.providers.common.provider import Provider
from prowler.providers.scaleway.services.iam.iam_service import IAM
iam_client = IAM(Provider.get_global_provider())
@@ -1,38 +0,0 @@
{
"Provider": "scaleway",
"CheckID": "iam_no_root_api_keys",
"CheckTitle": "Scaleway IAM API keys must not be owned by the account root user",
"CheckType": [],
"ServiceName": "iam",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "critical",
"ResourceType": "ScalewayAPIKey",
"ResourceGroup": "IAM",
"Description": "**Scaleway API keys** are checked to ensure none is bound to the **account root user**. The account root user is the original Scaleway account owner; its credentials bypass IAM policies and grant unrestricted access to the entire organization.",
"Risk": "API keys owned by the **account root user** cannot be scoped down with IAM policies. Leaking one of these keys yields immediate full control over every project, resource and billing setting in the organization, and rotating them disrupts every automation depending on root credentials.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://www.scaleway.com/en/docs/identity-and-access-management/iam/concepts/#root-account",
"https://www.scaleway.com/en/docs/identity-and-access-management/iam/how-to/create-api-keys/",
"https://www.scaleway.com/en/docs/identity-and-access-management/iam/reference-content/users-and-applications/"
],
"Remediation": {
"Code": {
"CLI": "scw iam api-key delete <ACCESS_KEY>",
"NativeIaC": "",
"Other": "1. Sign in to the Scaleway console as a user with IAM admin permissions.\n2. Create a dedicated IAM user or application scoped with the minimum required policy.\n3. Generate a new API key for that bearer and roll it out to the workloads currently using the root key.\n4. Delete the API key owned by the account root user from the IAM > API keys page.",
"Terraform": ""
},
"Recommendation": {
"Text": "Never use API keys owned by the account root user for automation. Create scoped IAM users or applications, attach the least-privilege policies, and rotate any existing root API keys to that new bearer.",
"Url": "https://hub.prowler.com/check/iam_no_root_api_keys"
}
},
"Categories": [
"identity-access"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": ""
}
@@ -1,87 +0,0 @@
from typing import List
from prowler.lib.check.models import Check, CheckReportScaleway
from prowler.providers.scaleway.services.iam.iam_client import iam_client
class iam_no_root_api_keys(Check):
"""Ensure no Scaleway IAM API key is owned by the account root user.
The account root user is the original Scaleway account owner. API keys
bound to that bearer bypass IAM policies and grant unrestricted access
to the entire organization; rotating or losing them is a critical
incident. Day-to-day automation should rely on IAM users or
applications scoped through policies instead.
"""
def execute(self) -> List[CheckReportScaleway]:
"""Iterate over the API keys cached by the IAM service.
The check degrades to ``MANUAL`` when the IAM service could not
load the prerequisite data (users or API keys) emitting ``PASS``
in those cases would silently mask the very condition the check
exists to detect.
Returns:
One ``CheckReportScaleway`` per discovered API key. ``FAIL``
when the bearer is the account root user, ``PASS`` otherwise.
A single ``MANUAL`` report is emitted when underlying IAM data
is unavailable.
"""
findings: List[CheckReportScaleway] = []
# If we could not even load the users we cannot tell who the root
# bearer is, so every API key would falsely PASS. Surface MANUAL
# explicitly so the operator investigates.
if not iam_client.users_loaded or not iam_client.api_keys_loaded:
placeholder = _IAMDataUnavailableResource(
organization_id=iam_client.organization_id
)
report = CheckReportScaleway(metadata=self.metadata(), resource=placeholder)
report.status = "MANUAL"
report.status_extended = (
"Could not retrieve Scaleway IAM users or API keys for "
f"organization {iam_client.organization_id}. Verify the "
"API key has the IAMReadOnly policy and rerun."
)
findings.append(report)
return findings
root_user_id = iam_client.account_root_user_id
for api_key in iam_client.api_keys:
report = CheckReportScaleway(metadata=self.metadata(), resource=api_key)
if root_user_id and api_key.user_id == root_user_id:
report.status = "FAIL"
report.status_extended = (
f"Scaleway API key {api_key.access_key} is owned by the "
f"account root user ({root_user_id}). Replace it with an "
f"API key bound to a dedicated IAM user or application."
)
else:
report.status = "PASS"
report.status_extended = (
f"Scaleway API key {api_key.access_key} is not owned by "
f"the account root user."
)
findings.append(report)
return findings
class _IAMDataUnavailableResource:
"""Minimal stand-in resource used when the IAM service failed to load.
``CheckReportScaleway`` derives ``resource_name``/``resource_id``/
``region``/``organization_id`` from the resource via ``getattr`` with
defaults, so this lightweight object is enough to materialize a
MANUAL finding without polluting the real domain models.
"""
def __init__(self, organization_id: str):
self.name = "iam-data-unavailable"
self.id = "iam-data-unavailable"
self.organization_id = organization_id
self.region = "global"
@@ -1,142 +0,0 @@
from typing import Optional
from pydantic.v1 import BaseModel
from scaleway.iam.v1alpha1 import IamV1Alpha1API
from prowler.lib.logger import logger
from prowler.providers.scaleway.lib.service.service import ScalewayService
class IAM(ScalewayService):
"""Scaleway IAM service.
Loads the users in scope plus every API key tied to the current
organization. Checks consume the materialized lists; nothing in this
class is lazy. Each load operation tracks success/failure separately
so checks can degrade to ``MANUAL`` when data is incomplete instead of
falsely passing.
"""
def __init__(self, provider):
super().__init__("iam", provider)
self._api = IamV1Alpha1API(self.client)
# Cached state — populated eagerly during construction
self.users: list[ScalewayUser] = []
self.api_keys: list[ScalewayAPIKey] = []
self.account_root_user_id: Optional[str] = None
# Load status flags — checks consult these to surface MANUAL when
# the underlying API call failed rather than reporting empty lists
# as a clean PASS.
self.users_loaded: bool = False
self.api_keys_loaded: bool = False
self._load_users()
self._load_api_keys()
def _load_users(self) -> None:
"""List every IAM user in the audited organization."""
try:
users = self._api.list_users_all(organization_id=self.organization_id)
for user in users:
self.users.append(
ScalewayUser(
id=user.id,
email=user.email,
username=user.username,
organization_id=user.organization_id,
account_root_user_id=user.account_root_user_id,
mfa=bool(getattr(user, "mfa", False)),
type_=(
str(user.type_) if getattr(user, "type_", None) else None
),
status=(
str(user.status) if getattr(user, "status", None) else None
),
)
)
# All users in the same org share the same account_root_user_id.
if self.users and self.users[0].account_root_user_id:
self.account_root_user_id = self.users[0].account_root_user_id
self.users_loaded = True
except Exception as error:
logger.error(
f"{self.service} - Error listing users: "
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def _load_api_keys(self) -> None:
"""List every API key in the audited organization."""
try:
api_keys = self._api.list_api_keys_all(organization_id=self.organization_id)
for key in api_keys:
self.api_keys.append(
ScalewayAPIKey(
access_key=key.access_key,
description=key.description,
user_id=key.user_id,
application_id=key.application_id,
default_project_id=key.default_project_id,
editable=bool(key.editable),
managed=bool(getattr(key, "managed", False)),
creation_ip=key.creation_ip,
created_at=str(key.created_at) if key.created_at else None,
updated_at=str(key.updated_at) if key.updated_at else None,
expires_at=str(key.expires_at) if key.expires_at else None,
)
)
self.api_keys_loaded = True
except Exception as error:
logger.error(
f"{self.service} - Error listing API keys: "
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
class ScalewayUser(BaseModel):
"""Subset of a Scaleway IAM user surface that the checks need."""
id: str
email: Optional[str] = None
username: Optional[str] = None
organization_id: Optional[str] = None
account_root_user_id: Optional[str] = None
mfa: bool = False
type_: Optional[str] = None
status: Optional[str] = None
# Provide name/id for CheckReportScaleway
name: str = ""
def __init__(self, **data):
super().__init__(**data)
self.name = self.email or self.username or self.id
class ScalewayAPIKey(BaseModel):
"""Subset of a Scaleway IAM API key surface that the checks need."""
access_key: str
description: Optional[str] = None
user_id: Optional[str] = None
application_id: Optional[str] = None
default_project_id: Optional[str] = None
editable: bool = False
managed: bool = False
creation_ip: Optional[str] = None
created_at: Optional[str] = None
updated_at: Optional[str] = None
expires_at: Optional[str] = None
# Provide name/id for CheckReportScaleway
name: str = ""
id: str = ""
def __init__(self, **data):
super().__init__(**data)
self.id = self.access_key
self.name = self.description or self.access_key
+1 -2
View File
@@ -88,8 +88,7 @@ dependencies = [
"alibabacloud_actiontrail20200706==2.4.1",
"alibabacloud_cs20151215==6.1.0",
"alibabacloud-rds20140815==12.0.0",
"alibabacloud-sls20201230==5.9.0",
"scaleway==2.10.3"
"alibabacloud-sls20201230==5.9.0"
]
description = "Prowler is an Open Source security tool to perform AWS, GCP and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness. It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks."
license = "Apache-2.0"
+6 -6
View File
@@ -456,16 +456,16 @@ def get_object(self):
```bash
# Development
cd api && poetry run python src/backend/manage.py runserver
cd api && poetry run python src/backend/manage.py shell
cd api && uv run python src/backend/manage.py runserver
cd api && uv run python src/backend/manage.py shell
# Database
cd api && poetry run python src/backend/manage.py makemigrations
cd api && poetry run python src/backend/manage.py migrate
cd api && uv run python src/backend/manage.py makemigrations
cd api && uv run python src/backend/manage.py migrate
# Testing
cd api && poetry run pytest -x --tb=short
cd api && poetry run make lint
cd api && uv run pytest -x --tb=short
cd api && uv run make lint
```
---
+7 -7
View File
@@ -432,7 +432,7 @@ def process_finding(tenant_id, finding_uid, data):
Run before every production deployment:
```bash
cd api && poetry run python src/backend/manage.py check --deploy
cd api && uv run python src/backend/manage.py check --deploy
```
### Critical Settings
@@ -454,18 +454,18 @@ cd api && poetry run python src/backend/manage.py check --deploy
```bash
# Development
cd api && poetry run python src/backend/manage.py runserver
cd api && poetry run python src/backend/manage.py shell
cd api && uv run python src/backend/manage.py runserver
cd api && uv run python src/backend/manage.py shell
# Celery
cd api && poetry run celery -A config.celery worker -l info -Q scans,overview
cd api && poetry run celery -A config.celery beat -l info
cd api && uv run celery -A config.celery worker -l info -Q scans,overview
cd api && uv run celery -A config.celery beat -l info
# Testing
cd api && poetry run pytest -x --tb=short
cd api && uv run pytest -x --tb=short
# Production checks
cd api && poetry run python src/backend/manage.py check --deploy
cd api && uv run python src/backend/manage.py check --deploy
```
---
@@ -3,7 +3,7 @@
## Django Deployment Checklist Command
```bash
cd api && poetry run python src/backend/manage.py check --deploy
cd api && uv run python src/backend/manage.py check --deploy
```
This command checks for common deployment issues and missing security settings.
+3 -3
View File
@@ -153,9 +153,9 @@ api_key = "sk-fake-test-key-for-unit-testing-only"
## Commands
```bash
cd api && poetry run pytest -x --tb=short
cd api && poetry run pytest -k "test_provider"
cd api && poetry run pytest api/src/backend/api/tests/test_rbac.py
cd api && uv run pytest -x --tb=short
cd api && uv run pytest -k "test_provider"
cd api && uv run pytest api/src/backend/api/tests/test_rbac.py
```
---
@@ -171,28 +171,28 @@ from conftest import (
```bash
# Full test suite
cd api && poetry run pytest
cd api && uv run pytest
# Fast fail on first error
cd api && poetry run pytest -x
cd api && uv run pytest -x
# Short traceback
cd api && poetry run pytest --tb=short
cd api && uv run pytest --tb=short
# Specific file
cd api && poetry run pytest api/src/backend/api/tests/test_views.py
cd api && uv run pytest api/src/backend/api/tests/test_views.py
# Pattern match
cd api && poetry run pytest -k "Provider"
cd api && uv run pytest -k "Provider"
# Verbose with print output
cd api && poetry run pytest -v -s
cd api && uv run pytest -v -s
# With coverage
cd api && poetry run pytest --cov=api --cov-report=html
cd api && uv run pytest --cov=api --cov-report=html
# Parallel execution
cd api && poetry run pytest -n auto
cd api && uv run pytest -n auto
```
---
+2 -2
View File
@@ -30,8 +30,8 @@ poetry run python prowler-cli.py aws --check check_name
poetry run pytest tests/
# API
cd api && poetry run python src/backend/manage.py runserver
cd api && poetry run pytest
cd api && uv run python src/backend/manage.py runserver
cd api && uv run pytest
# UI
cd ui && pnpm run dev
+2 -2
View File
@@ -87,7 +87,7 @@ poetry run pytest tests/providers/aws/services/ec2/ec2_ami_public/ -v
fd "test_*.py" api/src/backend/api/tests/
# 2. Run specific test
poetry run pytest api/src/backend/api/tests/test_models.py -v
uv run pytest api/src/backend/api/tests/test_models.py -v
# 3. Read existing tests
```
@@ -366,6 +366,6 @@ poetry run pytest --cov=./prowler tests/ # Coverage
```bash
poetry run pytest -x --tb=short # Run all (stop on first fail)
poetry run pytest api/src/backend/api/tests/test_file.py # Specific file
uv run pytest api/src/backend/api/tests/test_file.py # Specific file
poetry run pytest -k "test_name" -v # Filter by name
```
@@ -0,0 +1,234 @@
from unittest import mock
from prowler.providers.aws.services.sagemaker.sagemaker_service import Domain
from tests.providers.aws.utils import (
AWS_ACCOUNT_NUMBER,
AWS_REGION_EU_WEST_1,
set_mocked_aws_provider,
)
test_domain_name = "test-domain"
test_domain_id = "d-testdomain123"
domain_arn = f"arn:aws:sagemaker:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:domain/{test_domain_id}"
test_sso_instance_id = "app-test-instance-id"
test_sso_application_arn = (
f"arn:aws:sso::{AWS_ACCOUNT_NUMBER}:application/sagemaker/apl-test"
)
class Test_sagemaker_domain_sso_configured:
def test_no_domains(self):
sagemaker_client = mock.MagicMock
sagemaker_client.sagemaker_domains = []
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured.sagemaker_client",
sagemaker_client,
),
):
from prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured import (
sagemaker_domain_sso_configured,
)
check = sagemaker_domain_sso_configured()
result = check.execute()
assert len(result) == 0
def test_domain_sso_configured_with_instance_id(self):
sagemaker_client = mock.MagicMock
sagemaker_client.sagemaker_domains = [
Domain(
domain_id=test_domain_id,
name=test_domain_name,
arn=domain_arn,
region=AWS_REGION_EU_WEST_1,
auth_mode="SSO",
single_sign_on_managed_application_instance_id=test_sso_instance_id,
)
]
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured.sagemaker_client",
sagemaker_client,
),
):
from prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured import (
sagemaker_domain_sso_configured,
)
check = sagemaker_domain_sso_configured()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"SageMaker domain {test_domain_name} is configured with SSO authentication and is associated with an IAM Identity Center instance."
)
assert result[0].resource_id == test_domain_name
assert result[0].resource_arn == domain_arn
def test_domain_sso_configured_with_application_arn(self):
sagemaker_client = mock.MagicMock
sagemaker_client.sagemaker_domains = [
Domain(
domain_id=test_domain_id,
name=test_domain_name,
arn=domain_arn,
region=AWS_REGION_EU_WEST_1,
auth_mode="SSO",
single_sign_on_application_arn=test_sso_application_arn,
)
]
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured.sagemaker_client",
sagemaker_client,
),
):
from prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured import (
sagemaker_domain_sso_configured,
)
check = sagemaker_domain_sso_configured()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"SageMaker domain {test_domain_name} is configured with SSO authentication and is associated with an IAM Identity Center instance."
)
def test_domain_sso_without_identity_center(self):
sagemaker_client = mock.MagicMock
sagemaker_client.sagemaker_domains = [
Domain(
domain_id=test_domain_id,
name=test_domain_name,
arn=domain_arn,
region=AWS_REGION_EU_WEST_1,
auth_mode="SSO",
)
]
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured.sagemaker_client",
sagemaker_client,
),
):
from prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured import (
sagemaker_domain_sso_configured,
)
check = sagemaker_domain_sso_configured()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"SageMaker domain {test_domain_name} is configured with SSO authentication but is not associated with an IAM Identity Center instance."
)
assert result[0].resource_id == test_domain_name
assert result[0].resource_arn == domain_arn
def test_domain_iam_mode(self):
sagemaker_client = mock.MagicMock
sagemaker_client.sagemaker_domains = [
Domain(
domain_id=test_domain_id,
name=test_domain_name,
arn=domain_arn,
region=AWS_REGION_EU_WEST_1,
auth_mode="IAM",
)
]
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured.sagemaker_client",
sagemaker_client,
),
):
from prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured import (
sagemaker_domain_sso_configured,
)
check = sagemaker_domain_sso_configured()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"SageMaker domain {test_domain_name} is not configured with SSO authentication, current mode is IAM."
)
assert result[0].resource_id == test_domain_name
assert result[0].resource_arn == domain_arn
def test_domain_auth_mode_unknown(self):
sagemaker_client = mock.MagicMock
sagemaker_client.sagemaker_domains = [
Domain(
domain_id=test_domain_id,
name=test_domain_name,
arn=domain_arn,
region=AWS_REGION_EU_WEST_1,
)
]
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=aws_provider,
),
mock.patch(
"prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured.sagemaker_client",
sagemaker_client,
),
):
from prowler.providers.aws.services.sagemaker.sagemaker_domain_sso_configured.sagemaker_domain_sso_configured import (
sagemaker_domain_sso_configured,
)
check = sagemaker_domain_sso_configured()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"SageMaker domain {test_domain_name} is not configured with SSO authentication, current mode is unknown."
)
@@ -26,6 +26,13 @@ kms_key_id = str(uuid4())
endpoint_config_name = "endpoint-config-test"
endpoint_config_arn = f"arn:aws:sagemaker:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:endpoint-config/{endpoint_config_name}"
prod_variant_name = "Variant1"
test_domain_name = "test-domain"
test_domain_id = "d-testdomain123"
test_domain_arn = f"arn:aws:sagemaker:{AWS_REGION_EU_WEST_1}:{AWS_ACCOUNT_NUMBER}:domain/{test_domain_id}"
test_sso_instance_id = "app-test-instance-id"
test_sso_application_arn = (
f"arn:aws:sso::{AWS_ACCOUNT_NUMBER}:application/sagemaker/apl-test"
)
make_api_call = botocore.client.BaseClient._make_api_call
@@ -115,6 +122,25 @@ def mock_make_api_call(self, operation_name, kwarg):
},
]
}
if operation_name == "ListDomains":
return {
"Domains": [
{
"DomainId": test_domain_id,
"DomainName": test_domain_name,
"DomainArn": test_domain_arn,
},
],
}
if operation_name == "DescribeDomain":
return {
"DomainId": test_domain_id,
"DomainName": test_domain_name,
"DomainArn": test_domain_arn,
"AuthMode": "SSO",
"SingleSignOnManagedApplicationInstanceId": test_sso_instance_id,
"SingleSignOnApplicationArn": test_sso_application_arn,
}
return make_api_call(self, operation_name, kwarg)
@@ -249,6 +275,33 @@ class Test_SageMaker_Service:
else:
assert prod_variant.initial_instance_count == 2
# Test SageMaker list domains
def test_list_domains(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
sagemaker = SageMaker(aws_provider)
assert len(sagemaker.sagemaker_domains) == 1
assert sagemaker.sagemaker_domains[0].domain_id == test_domain_id
assert sagemaker.sagemaker_domains[0].name == test_domain_name
assert sagemaker.sagemaker_domains[0].arn == test_domain_arn
assert sagemaker.sagemaker_domains[0].region == AWS_REGION_EU_WEST_1
# Test SageMaker describe domain
def test_describe_domain(self):
aws_provider = set_mocked_aws_provider([AWS_REGION_EU_WEST_1])
sagemaker = SageMaker(aws_provider)
assert len(sagemaker.sagemaker_domains) == 1
assert sagemaker.sagemaker_domains[0].auth_mode == "SSO"
assert (
sagemaker.sagemaker_domains[
0
].single_sign_on_managed_application_instance_id
== test_sso_instance_id
)
assert (
sagemaker.sagemaker_domains[0].single_sign_on_application_arn
== test_sso_application_arn
)
# Test SageMaker _list_tags_for_resource
def test_list_tags_for_resource_calls_client(self):
"""Test that _list_tags_for_resource calls the correct AWS client and updates the resource."""
@@ -312,14 +365,17 @@ class Test_SageMaker_Service:
patch(
"prowler.providers.aws.services.sagemaker.sagemaker_service.SageMaker._list_endpoint_configs"
),
patch(
"prowler.providers.aws.services.sagemaker.sagemaker_service.SageMaker._list_domains"
),
):
sagemaker_service = SageMaker(audit_info)
# Check that __threading_call__ was called for _list_tags_for_resource
# (at least 4 calls expected, one for each resource type)
# (one for each resource type: models, notebooks, training jobs, endpoint configs, domains)
tag_calls = [
c
for c in mock_threading_call.call_args_list
if c[0][0] == sagemaker_service._list_tags_for_resource
]
assert len(tag_calls) == 4
assert len(tag_calls) == 5
@@ -136,3 +136,79 @@ class Test_zone_waf_enabled:
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
def test_zone_waf_disabled_paid_plan_includes_hint(self):
zone_client = mock.MagicMock
zone_client.zones = {
ZONE_ID: CloudflareZone(
id=ZONE_ID,
name=ZONE_NAME,
status="active",
paused=False,
plan="Pro Website",
settings=CloudflareZoneSettings(
waf="off",
),
)
}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_cloudflare_provider(),
),
mock.patch(
"prowler.providers.cloudflare.services.zone.zone_waf_enabled.zone_waf_enabled.zone_client",
new=zone_client,
),
):
from prowler.providers.cloudflare.services.zone.zone_waf_enabled.zone_waf_enabled import (
zone_waf_enabled,
)
check = zone_waf_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert "WAF is not enabled" in result[0].status_extended
assert "false positive" in result[0].status_extended
assert "Cloudflare dashboard" in result[0].status_extended
def test_zone_waf_disabled_free_plan_includes_unavailable_hint(self):
zone_client = mock.MagicMock
zone_client.zones = {
ZONE_ID: CloudflareZone(
id=ZONE_ID,
name=ZONE_NAME,
status="active",
paused=False,
plan="Free Website",
settings=CloudflareZoneSettings(
waf="off",
),
)
}
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_cloudflare_provider(),
),
mock.patch(
"prowler.providers.cloudflare.services.zone.zone_waf_enabled.zone_waf_enabled.zone_client",
new=zone_client,
),
):
from prowler.providers.cloudflare.services.zone.zone_waf_enabled.zone_waf_enabled import (
zone_waf_enabled,
)
check = zone_waf_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert "WAF is not enabled" in result[0].status_extended
assert "not available on the Cloudflare Free plan" in (
result[0].status_extended
)
assert "false positive" not in result[0].status_extended
@@ -3,7 +3,6 @@ from unittest.mock import patch
from prowler.providers.googleworkspace.services.gmail.gmail_service import GmailPolicies
from tests.providers.googleworkspace.googleworkspace_fixtures import (
CUSTOMER_ID,
DOMAIN,
set_mocked_googleworkspace_provider,
)
@@ -38,7 +37,7 @@ class TestGmailAnomalousAttachmentProtectionEnabled:
assert len(findings) == 1
assert findings[0].status == "PASS"
assert "WARNING" in findings[0].status_extended
assert findings[0].resource_name == DOMAIN
assert findings[0].resource_name == "Gmail Policies"
assert findings[0].customer_id == CUSTOMER_ID
def test_fail_no_action(self):

Some files were not shown because too many files have changed in this diff Show More