Compare commits

..

19 Commits

Author SHA1 Message Date
Pablo Lara
a75755c8c5 WIP: add change role to the user's invitations 2024-12-15 10:37:51 +01:00
Pablo Lara
3e0568f381 chore: add change role to the user's invitations 2024-12-13 12:38:08 +01:00
Pablo Lara
fec66a3685 Merge branch 'master' into PRWLR-4669-Roles-Page-API-UI 2024-12-13 06:02:29 +01:00
Pablo Lara
ba335de6b3 chore: fix error with exports 2024-12-11 08:30:12 +01:00
Pablo Lara
93051d55d5 feat: add role when invite an user 2024-12-10 18:14:57 +01:00
Pablo Lara
161c56ffe4 feat: add permission column to roles table 2024-12-10 11:28:10 +01:00
Pablo Lara
e306322630 Merge branch 'PRWLR-5688-Implement-permission-status-filter' into PRWLR-4669-Roles-Page-API-UI 2024-12-10 10:47:14 +01:00
Adrián Jesús Peña Rodríguez
b4eb6e8076 feat(rbac): add permission_state field to role serializer 2024-12-10 10:45:09 +01:00
Pablo Lara
b54e9334b9 chore: add and edit roles is working now 2024-12-10 10:44:34 +01:00
Adrián Jesús Peña Rodríguez
5fd1af7559 feat(rbac): add permission_state filter 2024-12-10 10:33:26 +01:00
Pablo Lara
83c7ced6ff Merge branch 'feat-rbac' into PRWLR-4669-Roles-Page-API-UI 2024-12-10 09:29:24 +01:00
Adrián Jesús Peña Rodríguez
67d9ff2419 ref(provider_groups): ref the provider_groups relationships endpoint (#6033) 2024-12-10 09:28:22 +01:00
Pablo Lara
130fddae1e feat: edit role feature 2024-12-10 09:08:25 +01:00
Pablo Lara
04b9f81e26 feat: add new role feature 2024-12-10 07:24:01 +01:00
Pablo Lara
29bc697487 feat: add roles page 2024-12-05 15:25:07 +01:00
Pablo Lara
381aa93f55 chore: add roles item to the sidebar 2024-12-05 09:12:31 +01:00
Adrián Jesús Peña Rodríguez
2bee4b986f Merge branch 'master' into feat-rbac 2024-12-04 15:42:47 +01:00
Adrián Jesús Peña Rodríguez
9723b8fac1 Merge branch 'master' into feat-rbac 2024-12-04 12:51:39 +01:00
Adrián Jesús Peña Rodríguez
67ef67add9 feat(api-rbac): RBAC system (#5903) 2024-11-26 12:32:55 +01:00
1608 changed files with 14706 additions and 46852 deletions

30
.env
View File

@@ -6,14 +6,13 @@
PROWLER_UI_VERSION="latest"
SITE_URL=http://localhost:3000
API_BASE_URL=http://prowler-api:8080/api/v1
NEXT_PUBLIC_API_DOCS_URL=http://prowler-api:8080/api/v1/docs
AUTH_TRUST_HOST=true
UI_PORT=3000
# openssl rand -base64 32
AUTH_SECRET="N/c6mnaS5+SWq81+819OrzQZlmx1Vxtp/orjttJSmw8="
#### Prowler API Configuration ####
PROWLER_API_VERSION="stable"
PROWLER_API_VERSION="latest"
# PostgreSQL settings
# If running Django and celery on host, use 'localhost', else use 'postgres-db'
POSTGRES_HOST=postgres-db
@@ -30,23 +29,6 @@ VALKEY_HOST=valkey
VALKEY_PORT=6379
VALKEY_DB=0
# API scan settings
# The AWS access key to be used when uploading scan artifacts to an S3 bucket
# If left empty, default AWS credentials resolution behavior will be used
ARTIFACTS_AWS_ACCESS_KEY_ID=""
# The AWS secret key to be used when uploading scan artifacts to an S3 bucket
ARTIFACTS_AWS_SECRET_ACCESS_KEY=""
# An optional AWS session token
ARTIFACTS_AWS_SESSION_TOKEN=""
# The AWS region where your S3 bucket is located (e.g., "us-east-1")
ARTIFACTS_AWS_DEFAULT_REGION=""
# The name of the S3 bucket where scan artifacts should be stored
ARTIFACTS_AWS_S3_OUTPUT_BUCKET=""
# Django settings
DJANGO_ALLOWED_HOSTS=localhost,127.0.0.1,prowler-api
DJANGO_BIND_ADDRESS=0.0.0.0
@@ -58,12 +40,9 @@ DJANGO_LOGGING_FORMATTER=human_readable
# Select one of [DEBUG|INFO|WARNING|ERROR|CRITICAL]
# Applies to both Django and Celery Workers
DJANGO_LOGGING_LEVEL=INFO
# Defaults to the maximum available based on CPU cores if not set.
DJANGO_WORKERS=4
# Token lifetime is in minutes
DJANGO_ACCESS_TOKEN_LIFETIME=30
# Token lifetime is in minutes
DJANGO_REFRESH_TOKEN_LIFETIME=1440
DJANGO_WORKERS=4 # Defaults to the maximum available based on CPU cores if not set.
DJANGO_ACCESS_TOKEN_LIFETIME=30 # Token lifetime is in minutes
DJANGO_REFRESH_TOKEN_LIFETIME=1440 # Token lifetime is in minutes
DJANGO_CACHE_MAX_AGE=3600
DJANGO_STALE_WHILE_REVALIDATE=60
DJANGO_MANAGE_DB_PARTITIONS=True
@@ -108,4 +87,3 @@ jQIDAQAB
-----END PUBLIC KEY-----"
# openssl rand -base64 32
DJANGO_SECRETS_ENCRYPTION_KEY="oE/ltOhp/n1TdbHjVmzcjDPLcLA41CVI/4Rk+UB5ESc="
DJANGO_BROKER_VISIBILITY_TIMEOUT=86400

View File

@@ -16,17 +16,6 @@ updates:
- "dependencies"
- "pip"
- package-ecosystem: "pip"
directory: "/api"
schedule:
interval: "daily"
open-pull-requests-limit: 10
target-branch: master
labels:
- "dependencies"
- "pip"
- "component/api"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
@@ -38,7 +27,7 @@ updates:
- "github_actions"
- package-ecosystem: "npm"
directory: "/ui"
directory: "/"
schedule:
interval: "daily"
open-pull-requests-limit: 10
@@ -46,17 +35,6 @@ updates:
labels:
- "dependencies"
- "npm"
- "component/ui"
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: master
labels:
- "dependencies"
- "docker"
# v4.6
- package-ecosystem: "pip"
@@ -81,17 +59,6 @@ updates:
- "github_actions"
- "v4"
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "weekly"
open-pull-requests-limit: 10
target-branch: v4.6
labels:
- "dependencies"
- "docker"
- "v4"
# v3
- package-ecosystem: "pip"
directory: "/"

5
.github/labeler.yml vendored
View File

@@ -22,11 +22,6 @@ provider/kubernetes:
- any-glob-to-any-file: "prowler/providers/kubernetes/**"
- any-glob-to-any-file: "tests/providers/kubernetes/**"
provider/github:
- changed-files:
- any-glob-to-any-file: "prowler/providers/github/**"
- any-glob-to-any-file: "tests/providers/github/**"
github_actions:
- changed-files:
- any-glob-to-any-file: ".github/workflows/*"

View File

@@ -15,8 +15,7 @@ Please include a summary of the change and which issue is fixed. List any depend
- [ ] Review if the code is being covered by tests.
- [ ] Review if code is being documented following this specification https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings
- [ ] Review if backport is needed.
- [ ] Review if is needed to change the [Readme.md](https://github.com/prowler-cloud/prowler/blob/master/README.md)
### License
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.

View File

@@ -23,7 +23,6 @@ env:
# Tags
LATEST_TAG: latest
RELEASE_TAG: ${{ github.event.release.tag_name }}
STABLE_TAG: stable
WORKING_DIRECTORY: ./api
@@ -32,34 +31,19 @@ env:
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler-api
jobs:
repository-check:
name: Repository check
runs-on: ubuntu-latest
outputs:
is_repo: ${{ steps.repository_check.outputs.is_repo }}
steps:
- name: Repository check
id: repository_check
working-directory: /tmp
run: |
if [[ ${{ github.repository }} == "prowler-cloud/prowler" ]]
then
echo "is_repo=true" >> "${GITHUB_OUTPUT}"
else
echo "This action only runs for prowler-cloud/prowler"
echo "is_repo=false" >> "${GITHUB_OUTPUT}"
fi
# Build Prowler OSS container
container-build-push:
needs: repository-check
if: needs.repository-check.outputs.is_repo == 'true'
runs-on: ubuntu-latest
defaults:
run:
working-directory: ${{ env.WORKING_DIRECTORY }}
steps:
- name: Repository check
working-directory: /tmp
run: |
[[ ${{ github.repository }} != "prowler-cloud/prowler" ]] && echo "This action only runs for prowler-cloud/prowler"; exit 0
- name: Checkout
uses: actions/checkout@v4
@@ -93,6 +77,5 @@ jobs:
push: true
tags: |
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@@ -15,12 +15,16 @@ on:
push:
branches:
- "master"
- "v3"
- "v4.*"
- "v5.*"
paths:
- "api/**"
pull_request:
branches:
- "master"
- "v3"
- "v4.*"
- "v5.*"
paths:
- "api/**"

View File

@@ -4,13 +4,11 @@ on:
push:
branches:
- "master"
- "v5.*"
paths:
- "api/**"
pull_request:
branches:
- "master"
- "v5.*"
paths:
- "api/**"
@@ -89,7 +87,7 @@ jobs:
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
python -m pip install --upgrade pip
pipx install poetry==1.8.5
pipx install poetry
- name: Set up Python ${{ matrix.python-version }}
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
@@ -114,7 +112,7 @@ jobs:
working-directory: ./api
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry check --lock
poetry lock --check
- name: Lint with ruff
working-directory: ./api

View File

@@ -11,7 +11,7 @@ jobs:
with:
fetch-depth: 0
- name: TruffleHog OSS
uses: trufflesecurity/trufflehog@v3.88.4
uses: trufflesecurity/trufflehog@v3.86.1
with:
path: ./
base: ${{ github.event.repository.default_branch }}

View File

@@ -68,7 +68,7 @@ jobs:
- name: Install Poetry
run: |
pipx install poetry==1.8.5
pipx install poetry
pipx inject poetry poetry-bumpversion
- name: Get Prowler version

View File

@@ -17,7 +17,6 @@ on:
- "master"
- "v3"
- "v4.*"
- "v5.*"
paths-ignore:
- 'ui/**'
- 'api/**'
@@ -26,7 +25,6 @@ on:
- "master"
- "v3"
- "v4.*"
- "v5.*"
paths-ignore:
- 'ui/**'
- 'api/**'

View File

@@ -37,14 +37,12 @@ jobs:
README.md
mkdocs.yml
.backportrc.json
.env
docker-compose*
- name: Install poetry
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
python -m pip install --upgrade pip
pipx install poetry==1.8.5
pipx install poetry
- name: Set up Python ${{ matrix.python-version }}
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
@@ -67,7 +65,7 @@ jobs:
- name: Poetry check
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
run: |
poetry check --lock
poetry lock --check
- name: Lint with flake8
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'

View File

@@ -10,40 +10,12 @@ env:
CACHE: "poetry"
jobs:
repository-check:
name: Repository check
runs-on: ubuntu-latest
outputs:
is_repo: ${{ steps.repository_check.outputs.is_repo }}
steps:
- name: Repository check
id: repository_check
working-directory: /tmp
run: |
if [[ ${{ github.repository }} == "prowler-cloud/prowler" ]]
then
echo "is_repo=true" >> "${GITHUB_OUTPUT}"
else
echo "This action only runs for prowler-cloud/prowler"
echo "is_repo=false" >> "${GITHUB_OUTPUT}"
fi
release-prowler-job:
runs-on: ubuntu-latest
needs: repository-check
if: needs.repository-check.outputs.is_repo == 'true'
env:
POETRY_VIRTUALENVS_CREATE: "false"
name: Release Prowler to PyPI
steps:
- name: Repository check
working-directory: /tmp
run: |
if [[ "${{ github.repository }}" != "prowler-cloud/prowler" ]]; then
echo "This action only runs for prowler-cloud/prowler"
exit 1
fi
- name: Get Prowler version
run: |
PROWLER_VERSION="${{ env.RELEASE_TAG }}"
@@ -68,7 +40,7 @@ jobs:
- name: Install dependencies
run: |
pipx install poetry==1.8.5
pipx install poetry
- name: Setup Python
uses: actions/setup-python@v5

View File

@@ -23,7 +23,6 @@ env:
# Tags
LATEST_TAG: latest
RELEASE_TAG: ${{ github.event.release.tag_name }}
STABLE_TAG: stable
WORKING_DIRECTORY: ./ui
@@ -32,34 +31,19 @@ env:
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler-ui
jobs:
repository-check:
name: Repository check
runs-on: ubuntu-latest
outputs:
is_repo: ${{ steps.repository_check.outputs.is_repo }}
steps:
- name: Repository check
id: repository_check
working-directory: /tmp
run: |
if [[ ${{ github.repository }} == "prowler-cloud/prowler" ]]
then
echo "is_repo=true" >> "${GITHUB_OUTPUT}"
else
echo "This action only runs for prowler-cloud/prowler"
echo "is_repo=false" >> "${GITHUB_OUTPUT}"
fi
# Build Prowler OSS container
container-build-push:
needs: repository-check
if: needs.repository-check.outputs.is_repo == 'true'
runs-on: ubuntu-latest
defaults:
run:
working-directory: ${{ env.WORKING_DIRECTORY }}
steps:
- name: Repository check
working-directory: /tmp
run: |
[[ ${{ github.repository }} != "prowler-cloud/prowler" ]] && echo "This action only runs for prowler-cloud/prowler"; exit 0
- name: Checkout
uses: actions/checkout@v4
@@ -93,6 +77,5 @@ jobs:
push: true
tags: |
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@@ -15,12 +15,14 @@ on:
push:
branches:
- "master"
- "v4.*"
- "v5.*"
paths:
- "ui/**"
pull_request:
branches:
- "master"
- "v4.*"
- "v5.*"
paths:
- "ui/**"

View File

@@ -1,16 +1,9 @@
name: UI - Pull Request
on:
push:
branches:
- "master"
- "v5.*"
paths:
- "ui/**"
pull_request:
branches:
- master
- "v5.*"
paths:
- 'ui/**'
@@ -27,7 +20,7 @@ jobs:
with:
persist-credentials: false
- name: Setup Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node-version }}
- name: Install dependencies

1
.gitignore vendored
View File

@@ -45,7 +45,6 @@ junit-reports/
# Terraform
.terraform*
*.tfstate
*.tfstate.*
# .env
ui/.env*

View File

@@ -27,7 +27,6 @@ repos:
hooks:
- id: shellcheck
exclude: contrib
## PYTHON
- repo: https://github.com/myint/autoflake
rev: v2.3.1
@@ -62,25 +61,8 @@ repos:
rev: 1.8.0
hooks:
- id: poetry-check
name: API - poetry-check
args: ["--directory=./api"]
pass_filenames: false
- id: poetry-lock
name: API - poetry-lock
args: ["--no-update", "--directory=./api"]
pass_filenames: false
- id: poetry-check
name: SDK - poetry-check
args: ["--directory=./"]
pass_filenames: false
- id: poetry-lock
name: SDK - poetry-lock
args: ["--no-update", "--directory=./"]
pass_filenames: false
args: ["--no-update"]
- repo: https://github.com/hadolint/hadolint
rev: v2.13.0-beta
@@ -108,7 +90,7 @@ repos:
- id: bandit
name: bandit
description: "Bandit is a tool for finding common security issues in Python code"
entry: bash -c 'bandit -q -lll -x '*_test.py,./contrib/,./.venv/' -r .'
entry: bash -c 'bandit -q -lll -x '*_test.py,./contrib/' -r .'
language: system
files: '.*\.py'
@@ -121,6 +103,7 @@ repos:
- id: vulture
name: vulture
description: "Vulture finds unused code in Python programs."
entry: bash -c 'vulture --exclude "contrib,.venv,api/src/backend/api/tests/,api/src/backend/conftest.py,api/src/backend/tasks/tests/" --min-confidence 100 .'
entry: bash -c 'vulture --exclude "contrib" --min-confidence 100 .'
exclude: 'api/src/backend/'
language: system
files: '.*\.py'

View File

@@ -71,12 +71,10 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) |
|---|---|---|---|---|
| AWS | 564 | 82 | 30 | 10 |
| GCP | 77 | 13 | 4 | 3 |
| Azure | 140 | 18 | 5 | 3 |
| Kubernetes | 83 | 7 | 2 | 7 |
> You can list the checks, services, compliance frameworks and categories with `prowler <provider> --list-checks`, `prowler <provider> --list-services`, `prowler <provider> --list-compliance` and `prowler <provider> --list-categories`.
| AWS | 561 | 81 -> `prowler aws --list-services` | 30 -> `prowler aws --list-compliance` | 9 -> `prowler aws --list-categories` |
| GCP | 77 | 13 -> `prowler gcp --list-services` | 3 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
| Azure | 139 | 18 -> `prowler azure --list-services` | 4 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | 1 -> `prowler kubernetes --list-compliance` | 7 -> `prowler kubernetes --list-categories` |
# 💻 Installation
@@ -100,7 +98,6 @@ curl -LO https://raw.githubusercontent.com/prowler-cloud/prowler/refs/heads/mast
docker compose up -d
```
> Containers are built for `linux/amd64`. If your workstation's architecture is different, please set `DOCKER_DEFAULT_PLATFORM=linux/amd64` in your environment or use the `--platform linux/amd64` flag in the docker command.
> Enjoy Prowler App at http://localhost:3000 by signing up with your email and password.
### From GitHub

View File

@@ -22,7 +22,6 @@ DJANGO_SECRETS_ENCRYPTION_KEY=""
# Decide whether to allow Django manage database table partitions
DJANGO_MANAGE_DB_PARTITIONS=[True|False]
DJANGO_CELERY_DEADLOCK_ATTEMPTS=5
DJANGO_BROKER_VISIBILITY_TIMEOUT=86400
# PostgreSQL settings
# If running django and celery on host, use 'localhost', else use 'postgres-db'

View File

@@ -1,4 +1,4 @@
FROM python:3.12.8-alpine3.20 AS build
FROM python:3.12-alpine AS build
LABEL maintainer="https://github.com/prowler-cloud/api"

View File

@@ -28,7 +28,7 @@ start_prod_server() {
start_worker() {
echo "Starting the worker..."
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans -E --max-tasks-per-child 1
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans -E
}
start_worker_beat() {

1402
api/poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,11 +8,11 @@ description = "Prowler's API (Django/DRF)"
license = "Apache-2.0"
name = "prowler-api"
package-mode = false
version = "1.4.0"
version = "1.0.0"
[tool.poetry.dependencies]
celery = {extras = ["pytest"], version = "^5.4.0"}
django = "5.1.5"
django = "5.1.1"
django-celery-beat = "^2.7.0"
django-celery-results = "^2.5.1"
django-cors-headers = "4.4.0"
@@ -27,7 +27,7 @@ drf-nested-routers = "^0.94.1"
drf-spectacular = "0.27.2"
drf-spectacular-jsonapi = "0.5.1"
gunicorn = "23.0.0"
prowler = {git = "https://github.com/prowler-cloud/prowler.git", branch = "master"}
prowler = {git = "https://github.com/prowler-cloud/prowler.git", tag = "5.0.0"}
psycopg2-binary = "2.9.9"
pytest-celery = {extras = ["redis"], version = "^1.0.1"}
# Needed for prowler compatibility
@@ -37,7 +37,6 @@ uuid6 = "2024.7.10"
[tool.poetry.group.dev.dependencies]
bandit = "1.7.9"
coverage = "7.5.4"
django-silk = "5.3.2"
docker = "7.1.0"
freezegun = "1.5.1"
mypy = "1.10.1"
@@ -49,8 +48,8 @@ pytest-env = "1.1.3"
pytest-randomly = "3.15.0"
pytest-xdist = "3.6.1"
ruff = "0.5.0"
safety = "3.2.9"
vulture = "2.14"
safety = "3.2.3"
vulture = "2.11"
[tool.poetry.scripts]
celery = "src.backend.config.settings.celery"

View File

@@ -1,23 +1,23 @@
import uuid
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.db import connection, transaction
from rest_framework import permissions
from rest_framework.exceptions import NotAuthenticated
from rest_framework.filters import SearchFilter
from rest_framework_json_api import filters
from rest_framework_json_api.serializers import ValidationError
from rest_framework_json_api.views import ModelViewSet
from rest_framework_simplejwt.authentication import JWTAuthentication
from api.db_router import MainRouter
from api.db_utils import POSTGRES_USER_VAR, rls_transaction
from api.filters import CustomDjangoFilterBackend
from api.models import Role, Tenant
from api.rbac.permissions import HasPermissions
from api.db_router import MainRouter
class BaseViewSet(ModelViewSet):
authentication_classes = [JWTAuthentication]
required_permissions = []
permission_classes = [permissions.IsAuthenticated, HasPermissions]
permission_classes = [permissions.IsAuthenticated]
filter_backends = [
filters.QueryParameterValidationFilter,
filters.OrderingFilter,
@@ -31,17 +31,6 @@ class BaseViewSet(ModelViewSet):
ordering_fields = "__all__"
ordering = ["id"]
def initial(self, request, *args, **kwargs):
"""
Sets required_permissions before permissions are checked.
"""
self.set_required_permissions()
super().initial(request, *args, **kwargs)
def set_required_permissions(self):
"""This is an abstract method that must be implemented by subclasses."""
NotImplemented
def get_queryset(self):
raise NotImplementedError
@@ -61,7 +50,13 @@ class BaseRLSViewSet(BaseViewSet):
if tenant_id is None:
raise NotAuthenticated("Tenant ID is not present in token")
with rls_transaction(tenant_id):
try:
uuid.UUID(tenant_id)
except ValueError:
raise ValidationError("Tenant ID must be a valid UUID")
with connection.cursor() as cursor:
cursor.execute(f"SELECT set_config('api.tenant_id', '{tenant_id}', TRUE);")
self.request.tenant_id = tenant_id
return super().initial(request, *args, **kwargs)
@@ -115,7 +110,8 @@ class BaseTenantViewset(BaseViewSet):
):
user_id = str(request.user.id)
with rls_transaction(value=user_id, parameter=POSTGRES_USER_VAR):
with connection.cursor() as cursor:
cursor.execute(f"SELECT set_config('api.user_id', '{user_id}', TRUE);")
return super().initial(request, *args, **kwargs)
# TODO: DRY this when we have time
@@ -126,7 +122,13 @@ class BaseTenantViewset(BaseViewSet):
if tenant_id is None:
raise NotAuthenticated("Tenant ID is not present in token")
with rls_transaction(tenant_id):
try:
uuid.UUID(tenant_id)
except ValueError:
raise ValidationError("Tenant ID must be a valid UUID")
with connection.cursor() as cursor:
cursor.execute(f"SELECT set_config('api.tenant_id', '{tenant_id}', TRUE);")
self.request.tenant_id = tenant_id
return super().initial(request, *args, **kwargs)
@@ -147,6 +149,12 @@ class BaseUserViewset(BaseViewSet):
if tenant_id is None:
raise NotAuthenticated("Tenant ID is not present in token")
with rls_transaction(tenant_id):
try:
uuid.UUID(tenant_id)
except ValueError:
raise ValidationError("Tenant ID must be a valid UUID")
with connection.cursor() as cursor:
cursor.execute(f"SELECT set_config('api.tenant_id', '{tenant_id}', TRUE);")
self.request.tenant_id = tenant_id
return super().initial(request, *args, **kwargs)

View File

@@ -4,17 +4,13 @@ class MainRouter:
def db_for_read(self, model, **hints): # noqa: F841
model_table_name = model._meta.db_table
if model_table_name.startswith("django_") or model_table_name.startswith(
"silk_"
):
if model_table_name.startswith("django_"):
return self.admin_db
return None
def db_for_write(self, model, **hints): # noqa: F841
model_table_name = model._meta.db_table
if model_table_name.startswith("django_") or model_table_name.startswith(
"silk_"
):
if model_table_name.startswith("django_"):
return self.admin_db
return None

View File

@@ -1,14 +1,13 @@
import secrets
import uuid
from contextlib import contextmanager
from datetime import datetime, timedelta, timezone
from django.conf import settings
from django.contrib.auth.models import BaseUserManager
from django.core.paginator import Paginator
from django.db import connection, models, transaction
from psycopg2 import connect as psycopg2_connect
from psycopg2.extensions import AsIs, new_type, register_adapter, register_type
from rest_framework_json_api.serializers import ValidationError
DB_USER = settings.DATABASES["default"]["USER"] if not settings.TESTING else "test"
DB_PASSWORD = (
@@ -24,8 +23,6 @@ TASK_RUNNER_DB_TABLE = "django_celery_results_taskresult"
POSTGRES_TENANT_VAR = "api.tenant_id"
POSTGRES_USER_VAR = "api.user_id"
SET_CONFIG_QUERY = "SELECT set_config(%s, %s::text, TRUE);"
@contextmanager
def psycopg_connection(database_alias: str):
@@ -47,23 +44,10 @@ def psycopg_connection(database_alias: str):
@contextmanager
def rls_transaction(value: str, parameter: str = POSTGRES_TENANT_VAR):
"""
Creates a new database transaction setting the given configuration value for Postgres RLS. It validates the
if the value is a valid UUID.
Args:
value (str): Database configuration parameter value.
parameter (str): Database configuration parameter name, by default is 'api.tenant_id'.
"""
def tenant_transaction(tenant_id: str):
with transaction.atomic():
with connection.cursor() as cursor:
try:
# just in case the value is an UUID object
uuid.UUID(str(value))
except ValueError:
raise ValidationError("Must be a valid UUID")
cursor.execute(SET_CONFIG_QUERY, [parameter, value])
cursor.execute(f"SELECT set_config('api.tenant_id', '{tenant_id}', TRUE);")
yield cursor
@@ -119,18 +103,15 @@ def batch_delete(queryset, batch_size=5000):
total_deleted = 0
deletion_summary = {}
while True:
# Get a batch of IDs to delete
batch_ids = set(
queryset.values_list("id", flat=True).order_by("id")[:batch_size]
)
if not batch_ids:
# No more objects to delete
break
paginator = Paginator(queryset.order_by("id").only("id"), batch_size)
for page_num in paginator.page_range:
batch_ids = [obj.id for obj in paginator.page(page_num).object_list]
deleted_count, deleted_info = queryset.filter(id__in=batch_ids).delete()
total_deleted += deleted_count
for model_label, count in deleted_info.items():
deletion_summary[model_label] = deletion_summary.get(model_label, 0) + count

View File

@@ -1,10 +1,6 @@
import uuid
from functools import wraps
from django.db import connection, transaction
from rest_framework_json_api.serializers import ValidationError
from api.db_utils import POSTGRES_TENANT_VAR, SET_CONFIG_QUERY
def set_tenant(func):
@@ -35,7 +31,7 @@ def set_tenant(func):
pass
# When calling the task
some_task.delay(arg1, tenant_id="8db7ca86-03cc-4d42-99f6-5e480baf6ab5")
some_task.delay(arg1, tenant_id="1234-abcd-5678")
# The tenant context will be set before the task logic executes.
"""
@@ -47,12 +43,9 @@ def set_tenant(func):
tenant_id = kwargs.pop("tenant_id")
except KeyError:
raise KeyError("This task requires the tenant_id")
try:
uuid.UUID(tenant_id)
except ValueError:
raise ValidationError("Tenant ID must be a valid UUID")
with connection.cursor() as cursor:
cursor.execute(SET_CONFIG_QUERY, [POSTGRES_TENANT_VAR, tenant_id])
cursor.execute(f"SELECT set_config('api.tenant_id', '{tenant_id}', TRUE);")
return func(*args, **kwargs)

View File

@@ -22,22 +22,21 @@ from api.db_utils import (
StatusEnumField,
)
from api.models import (
ComplianceOverview,
Finding,
Invitation,
Membership,
PermissionChoices,
Provider,
ProviderGroup,
ProviderSecret,
Resource,
ResourceTag,
Role,
Scan,
ScanSummary,
SeverityChoices,
StateChoices,
StatusChoices,
ProviderSecret,
Invitation,
Role,
ComplianceOverview,
Task,
User,
)
@@ -319,28 +318,6 @@ class FindingFilter(FilterSet):
field_name="resources__type", lookup_expr="icontains"
)
# Temporarily disabled until we implement tag filtering in the UI
# resource_tag_key = CharFilter(field_name="resources__tags__key")
# resource_tag_key__in = CharInFilter(
# field_name="resources__tags__key", lookup_expr="in"
# )
# resource_tag_key__icontains = CharFilter(
# field_name="resources__tags__key", lookup_expr="icontains"
# )
# resource_tag_value = CharFilter(field_name="resources__tags__value")
# resource_tag_value__in = CharInFilter(
# field_name="resources__tags__value", lookup_expr="in"
# )
# resource_tag_value__icontains = CharFilter(
# field_name="resources__tags__value", lookup_expr="icontains"
# )
# resource_tags = CharInFilter(
# method="filter_resource_tag",
# lookup_expr="in",
# help_text="Filter by resource tags `key:value` pairs.\nMultiple values may be "
# "separated by commas.",
# )
scan = UUIDFilter(method="filter_scan_id")
scan__in = UUIDInFilter(method="filter_scan_id_in")
@@ -448,16 +425,6 @@ class FindingFilter(FilterSet):
return queryset.filter(id__lte=end).filter(inserted_at__lte=value)
def filter_resource_tag(self, queryset, name, value):
overall_query = Q()
for key_value_pair in value:
tag_key, tag_value = key_value_pair.split(":", 1)
overall_query |= Q(
resources__tags__key__icontains=tag_key,
resources__tags__value__icontains=tag_value,
)
return queryset.filter(overall_query).distinct()
@staticmethod
def maybe_date_to_datetime(value):
dt = value
@@ -518,12 +485,29 @@ class UserFilter(FilterSet):
class RoleFilter(FilterSet):
inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date")
updated_at = DateFilter(field_name="updated_at", lookup_expr="date")
permission_state = ChoiceFilter(
choices=PermissionChoices.choices, method="filter_permission_state"
)
permission_state = CharFilter(method="filter_permission_state")
def filter_permission_state(self, queryset, name, value):
return Role.filter_by_permission_state(queryset, value)
permission_fields = [
"manage_users",
"manage_account",
"manage_billing",
"manage_providers",
"manage_integrations",
"manage_scans",
]
q_all_true = Q(**{field: True for field in permission_fields})
q_all_false = Q(**{field: False for field in permission_fields})
if value == "unlimited":
return queryset.filter(q_all_true)
elif value == "none":
return queryset.filter(q_all_false)
elif value == "limited":
return queryset.exclude(q_all_true | q_all_false)
else:
return queryset.none()
class Meta:
model = Role
@@ -575,25 +559,3 @@ class ScanSummaryFilter(FilterSet):
"inserted_at": ["date", "gte", "lte"],
"region": ["exact", "icontains", "in"],
}
class ServiceOverviewFilter(ScanSummaryFilter):
muted_findings = None
def is_valid(self):
# Check if at least one of the inserted_at filters is present
inserted_at_filters = [
self.data.get("inserted_at"),
self.data.get("inserted_at__gte"),
self.data.get("inserted_at__lte"),
]
if not any(inserted_at_filters):
raise ValidationError(
{
"inserted_at": [
"At least one of filter[inserted_at], filter[inserted_at__gte], or "
"filter[inserted_at__lte] is required."
]
}
)
return super().is_valid()

View File

@@ -6,7 +6,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.823Z",
"updated_at": "2024-10-18T10:46:04.841Z",
"first_seen_at": "2024-10-18T10:46:04.823Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -62,7 +61,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.855Z",
"updated_at": "2024-10-18T10:46:04.858Z",
"first_seen_at": "2024-10-18T10:46:04.855Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
"delta": "new",
"status": "FAIL",
@@ -118,7 +116,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.869Z",
"updated_at": "2024-10-18T10:46:04.876Z",
"first_seen_at": "2024-10-18T10:46:04.869Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -174,7 +171,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.888Z",
"updated_at": "2024-10-18T10:46:04.892Z",
"first_seen_at": "2024-10-18T10:46:04.888Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -230,7 +226,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.901Z",
"updated_at": "2024-10-18T10:46:04.905Z",
"first_seen_at": "2024-10-18T10:46:04.901Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -286,7 +281,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.915Z",
"updated_at": "2024-10-18T10:46:04.919Z",
"first_seen_at": "2024-10-18T10:46:04.915Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -342,7 +336,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.929Z",
"updated_at": "2024-10-18T10:46:04.934Z",
"first_seen_at": "2024-10-18T10:46:04.929Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -398,7 +391,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.944Z",
"updated_at": "2024-10-18T10:46:04.947Z",
"first_seen_at": "2024-10-18T10:46:04.944Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -454,7 +446,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.957Z",
"updated_at": "2024-10-18T10:46:04.962Z",
"first_seen_at": "2024-10-18T10:46:04.957Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
"delta": "new",
"status": "PASS",
@@ -510,7 +501,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.971Z",
"updated_at": "2024-10-18T10:46:04.975Z",
"first_seen_at": "2024-10-18T10:46:04.971Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -566,7 +556,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.984Z",
"updated_at": "2024-10-18T10:46:04.989Z",
"first_seen_at": "2024-10-18T10:46:04.984Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -622,7 +611,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.999Z",
"updated_at": "2024-10-18T10:46:05.003Z",
"first_seen_at": "2024-10-18T10:46:04.999Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -678,7 +666,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.013Z",
"updated_at": "2024-10-18T10:46:05.018Z",
"first_seen_at": "2024-10-18T10:46:05.013Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -734,7 +721,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.029Z",
"updated_at": "2024-10-18T10:46:05.033Z",
"first_seen_at": "2024-10-18T10:46:05.029Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -790,7 +776,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.045Z",
"updated_at": "2024-10-18T10:46:05.050Z",
"first_seen_at": "2024-10-18T10:46:05.045Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -846,7 +831,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.061Z",
"updated_at": "2024-10-18T10:46:05.065Z",
"first_seen_at": "2024-10-18T10:46:05.061Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -902,7 +886,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.080Z",
"updated_at": "2024-10-18T10:46:05.085Z",
"first_seen_at": "2024-10-18T10:46:05.080Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -958,7 +941,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.099Z",
"updated_at": "2024-10-18T10:46:05.104Z",
"first_seen_at": "2024-10-18T10:46:05.099Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -1014,7 +996,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.115Z",
"updated_at": "2024-10-18T10:46:05.121Z",
"first_seen_at": "2024-10-18T10:46:05.115Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
"delta": "new",
"status": "FAIL",
@@ -1070,7 +1051,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.489Z",
"updated_at": "2024-10-18T11:16:24.506Z",
"first_seen_at": "2024-10-18T10:46:04.823Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -1126,7 +1106,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.518Z",
"updated_at": "2024-10-18T11:16:24.521Z",
"first_seen_at": "2024-10-18T10:46:04.855Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
"delta": null,
"status": "FAIL",
@@ -1182,7 +1161,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.526Z",
"updated_at": "2024-10-18T11:16:24.529Z",
"first_seen_at": "2024-10-18T10:46:04.869Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -1238,7 +1216,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.535Z",
"updated_at": "2024-10-18T11:16:24.538Z",
"first_seen_at": "2024-10-18T10:46:04.888Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1294,7 +1271,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.544Z",
"updated_at": "2024-10-18T11:16:24.546Z",
"first_seen_at": "2024-10-18T10:46:04.901Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -1350,7 +1326,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.551Z",
"updated_at": "2024-10-18T11:16:24.554Z",
"first_seen_at": "2024-10-18T10:46:04.915Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1406,7 +1381,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.560Z",
"updated_at": "2024-10-18T11:16:24.562Z",
"first_seen_at": "2024-10-18T10:46:04.929Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1462,7 +1436,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.567Z",
"updated_at": "2024-10-18T11:16:24.569Z",
"first_seen_at": "2024-10-18T10:46:04.944Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1518,7 +1491,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.573Z",
"updated_at": "2024-10-18T11:16:24.575Z",
"first_seen_at": "2024-10-18T10:46:04.957Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
"delta": null,
"status": "PASS",
@@ -1574,7 +1546,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.580Z",
"updated_at": "2024-10-18T11:16:24.582Z",
"first_seen_at": "2024-10-18T10:46:04.971Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -1630,7 +1601,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.587Z",
"updated_at": "2024-10-18T11:16:24.589Z",
"first_seen_at": "2024-10-18T10:46:04.984Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1686,7 +1656,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.595Z",
"updated_at": "2024-10-18T11:16:24.597Z",
"first_seen_at": "2024-10-18T10:46:04.999Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1742,7 +1711,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.602Z",
"updated_at": "2024-10-18T11:16:24.604Z",
"first_seen_at": "2024-10-18T10:46:05.013Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -1798,7 +1766,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.610Z",
"updated_at": "2024-10-18T11:16:24.612Z",
"first_seen_at": "2024-10-18T10:46:05.029Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1854,7 +1821,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.617Z",
"updated_at": "2024-10-18T11:16:24.620Z",
"first_seen_at": "2024-10-18T10:46:05.045Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1910,7 +1876,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.625Z",
"updated_at": "2024-10-18T11:16:24.627Z",
"first_seen_at": "2024-10-18T10:46:05.061Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1966,7 +1931,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.632Z",
"updated_at": "2024-10-18T11:16:24.634Z",
"first_seen_at": "2024-10-18T10:46:05.080Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -2022,7 +1986,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.639Z",
"updated_at": "2024-10-18T11:16:24.642Z",
"first_seen_at": "2024-10-18T10:46:05.099Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -2078,7 +2041,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.646Z",
"updated_at": "2024-10-18T11:16:24.648Z",
"first_seen_at": "2024-10-18T10:46:05.115Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
"delta": null,
"status": "FAIL",
@@ -2134,7 +2096,6 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:26.033Z",
"updated_at": "2024-10-18T11:16:26.045Z",
"first_seen_at": "2024-10-18T11:16:26.033Z",
"uid": "prowler-aws-account_security_contact_information_is_registered-112233445566-us-east-1-112233445566",
"delta": "new",
"status": "MANUAL",

View File

@@ -64,7 +64,7 @@
"pk": "3f01e759-bdf9-4a99-8888-1ab805b79f93",
"fields": {
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"name": "admin_test",
"name": "admin",
"manage_users": true,
"manage_account": true,
"manage_billing": true,

View File

@@ -552,7 +552,7 @@ class Migration(migrations.Migration):
migrations.AddConstraint(
model_name="providergroupmembership",
constraint=models.UniqueConstraint(
fields=("provider_id", "provider_group"),
fields=("provider_id", "provider_group_id"),
name="unique_provider_group_membership",
),
),

View File

@@ -1,17 +1,15 @@
# Generated by Django 5.1.1 on 2024-12-05 12:29
import uuid
import api.rls
import django.db.models.deletion
import uuid
from django.conf import settings
from django.db import migrations, models
import api.rls
class Migration(migrations.Migration):
dependencies = [
("api", "0003_update_provider_unique_constraint_with_is_deleted"),
("api", "0002_token_migrations"),
]
operations = [

View File

@@ -1,23 +0,0 @@
# Generated by Django 5.1.1 on 2024-12-20 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0002_token_migrations"),
]
operations = [
migrations.RemoveConstraint(
model_name="provider",
name="unique_provider_uids",
),
migrations.AddConstraint(
model_name="provider",
constraint=models.UniqueConstraint(
fields=("tenant_id", "provider", "uid", "is_deleted"),
name="unique_provider_uids",
),
),
]

View File

@@ -1,44 +0,0 @@
from django.db import migrations
from api.db_router import MainRouter
def create_admin_role(apps, schema_editor):
Tenant = apps.get_model("api", "Tenant")
Role = apps.get_model("api", "Role")
User = apps.get_model("api", "User")
UserRoleRelationship = apps.get_model("api", "UserRoleRelationship")
for tenant in Tenant.objects.using(MainRouter.admin_db).all():
admin_role, _ = Role.objects.using(MainRouter.admin_db).get_or_create(
name="admin",
tenant=tenant,
defaults={
"manage_users": True,
"manage_account": True,
"manage_billing": True,
"manage_providers": True,
"manage_integrations": True,
"manage_scans": True,
"unlimited_visibility": True,
},
)
users = User.objects.using(MainRouter.admin_db).filter(
membership__tenant=tenant
)
for user in users:
UserRoleRelationship.objects.using(MainRouter.admin_db).get_or_create(
user=user,
role=admin_role,
tenant=tenant,
)
class Migration(migrations.Migration):
dependencies = [
("api", "0004_rbac"),
]
operations = [
migrations.RunPython(create_admin_role),
]

View File

@@ -1,15 +0,0 @@
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0005_rbac_missing_admin_roles"),
]
operations = [
migrations.AddField(
model_name="finding",
name="first_seen_at",
field=models.DateTimeField(editable=False, null=True),
),
]

View File

@@ -1,25 +0,0 @@
# Generated by Django 5.1.5 on 2025-01-28 15:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0006_findings_first_seen"),
]
operations = [
migrations.AddIndex(
model_name="scan",
index=models.Index(
fields=["tenant_id", "provider_id", "state", "inserted_at"],
name="scans_prov_state_insert_idx",
),
),
migrations.AddIndex(
model_name="scansummary",
index=models.Index(
fields=["tenant_id", "scan_id"], name="scan_summaries_tenant_scan_idx"
),
),
]

View File

@@ -1,64 +0,0 @@
import json
from datetime import datetime, timedelta, timezone
import django.db.models.deletion
from django.db import migrations, models
from django_celery_beat.models import PeriodicTask
from api.db_utils import rls_transaction
from api.models import Scan, StateChoices
def migrate_daily_scheduled_scan_tasks(apps, schema_editor):
for daily_scheduled_scan_task in PeriodicTask.objects.filter(
task="scan-perform-scheduled"
):
task_kwargs = json.loads(daily_scheduled_scan_task.kwargs)
tenant_id = task_kwargs["tenant_id"]
provider_id = task_kwargs["provider_id"]
current_time = datetime.now(timezone.utc)
scheduled_time_today = datetime.combine(
current_time.date(),
daily_scheduled_scan_task.start_time.time(),
tzinfo=timezone.utc,
)
if current_time < scheduled_time_today:
next_scan_date = scheduled_time_today
else:
next_scan_date = scheduled_time_today + timedelta(days=1)
with rls_transaction(tenant_id):
Scan.objects.create(
tenant_id=tenant_id,
name="Daily scheduled scan",
provider_id=provider_id,
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.SCHEDULED,
scheduled_at=next_scan_date,
scheduler_task_id=daily_scheduled_scan_task.id,
)
class Migration(migrations.Migration):
atomic = False
dependencies = [
("api", "0007_scan_and_scan_summaries_indexes"),
("django_celery_beat", "0019_alter_periodictasks_options"),
]
operations = [
migrations.AddField(
model_name="scan",
name="scheduler_task",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="django_celery_beat.periodictask",
),
),
migrations.RunPython(migrate_daily_scheduled_scan_tasks),
]

View File

@@ -11,7 +11,6 @@ from django.core.validators import MinLengthValidator
from django.db import models
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
from django_celery_beat.models import PeriodicTask
from django_celery_results.models import TaskResult
from psqlextra.manager import PostgresManager
from psqlextra.models import PostgresPartitionedModel
@@ -70,21 +69,6 @@ class StateChoices(models.TextChoices):
CANCELLED = "cancelled", _("Cancelled")
class PermissionChoices(models.TextChoices):
"""
Represents the different permission states that a role can have.
Attributes:
UNLIMITED: Indicates that the role possesses all permissions.
LIMITED: Indicates that the role has some permissions but not all.
NONE: Indicates that the role does not have any permissions.
"""
UNLIMITED = "unlimited", _("Unlimited permissions")
LIMITED = "limited", _("Limited permissions")
NONE = "none", _("No permissions")
class ActiveProviderManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(self.active_provider_filter())
@@ -272,7 +256,7 @@ class Provider(RowLevelSecurityProtectedModel):
constraints = [
models.UniqueConstraint(
fields=("tenant_id", "provider", "uid", "is_deleted"),
fields=("tenant_id", "provider", "uid"),
name="unique_provider_uids",
),
RowLevelSecurityConstraint(
@@ -310,7 +294,7 @@ class ProviderGroup(RowLevelSecurityProtectedModel):
]
class JSONAPIMeta:
resource_name = "provider-groups"
resource_name = "provider-group"
class ProviderGroupMembership(RowLevelSecurityProtectedModel):
@@ -323,7 +307,7 @@ class ProviderGroupMembership(RowLevelSecurityProtectedModel):
db_table = "provider_group_memberships"
constraints = [
models.UniqueConstraint(
fields=["provider_id", "provider_group"],
fields=["provider_id", "provider_group_id"],
name="unique_provider_group_membership",
),
RowLevelSecurityConstraint(
@@ -411,9 +395,6 @@ class Scan(RowLevelSecurityProtectedModel):
started_at = models.DateTimeField(null=True, blank=True)
completed_at = models.DateTimeField(null=True, blank=True)
next_scan_at = models.DateTimeField(null=True, blank=True)
scheduler_task = models.ForeignKey(
PeriodicTask, on_delete=models.CASCADE, null=True, blank=True
)
# TODO: mutelist foreign key
class Meta(RowLevelSecurityProtectedModel.Meta):
@@ -432,10 +413,6 @@ class Scan(RowLevelSecurityProtectedModel):
fields=["provider", "state", "trigger", "scheduled_at"],
name="scans_prov_state_trig_sche_idx",
),
models.Index(
fields=["tenant_id", "provider_id", "state", "inserted_at"],
name="scans_prov_state_insert_idx",
),
]
class JSONAPIMeta:
@@ -523,8 +500,8 @@ class Resource(RowLevelSecurityProtectedModel):
through="ResourceTagMapping",
)
def get_tags(self, tenant_id: str) -> dict:
return {tag.key: tag.value for tag in self.tags.filter(tenant_id=tenant_id)}
def get_tags(self) -> dict:
return {tag.key: tag.value for tag in self.tags.all()}
def clear_tags(self):
self.tags.clear()
@@ -623,7 +600,6 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
id = models.UUIDField(primary_key=True, default=uuid7, editable=False)
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
first_seen_at = models.DateTimeField(editable=False, null=True)
uid = models.CharField(max_length=300)
delta = FindingDeltaEnumField(
@@ -888,38 +864,6 @@ class Role(RowLevelSecurityProtectedModel):
Invitation, through="InvitationRoleRelationship", related_name="roles"
)
# Filter permission_state
PERMISSION_FIELDS = [
"manage_users",
"manage_account",
"manage_billing",
"manage_providers",
"manage_integrations",
"manage_scans",
]
@property
def permission_state(self):
values = [getattr(self, field) for field in self.PERMISSION_FIELDS]
if all(values):
return PermissionChoices.UNLIMITED
elif not any(values):
return PermissionChoices.NONE
else:
return PermissionChoices.LIMITED
@classmethod
def filter_by_permission_state(cls, queryset, value):
q_all_true = Q(**{field: True for field in cls.PERMISSION_FIELDS})
q_all_false = Q(**{field: False for field in cls.PERMISSION_FIELDS})
if value == PermissionChoices.UNLIMITED:
return queryset.filter(q_all_true)
elif value == PermissionChoices.NONE:
return queryset.filter(q_all_false)
else:
return queryset.exclude(q_all_true | q_all_false)
class Meta:
db_table = "roles"
constraints = [
@@ -935,7 +879,7 @@ class Role(RowLevelSecurityProtectedModel):
]
class JSONAPIMeta:
resource_name = "roles"
resource_name = "role"
class RoleProviderGroupRelationship(RowLevelSecurityProtectedModel):
@@ -1108,12 +1052,6 @@ class ScanSummary(RowLevelSecurityProtectedModel):
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
),
]
indexes = [
models.Index(
fields=["tenant_id", "scan_id"],
name="scan_summaries_tenant_scan_idx",
)
]
class JSONAPIMeta:
resource_name = "scan-summaries"

View File

@@ -1,12 +1,8 @@
from config.django.base import DISABLE_RBAC
from enum import Enum
from typing import Optional
from django.db.models import QuerySet
from rest_framework.permissions import BasePermission
from api.db_router import MainRouter
from api.models import Provider, Role, User
class Permissions(Enum):
MANAGE_USERS = "manage_users"
@@ -25,13 +21,15 @@ class HasPermissions(BasePermission):
"""
def has_permission(self, request, view):
# This is for testing/demo purposes only
if DISABLE_RBAC:
return True
required_permissions = getattr(view, "required_permissions", [])
if not required_permissions:
return True
user_roles = (
User.objects.using(MainRouter.admin_db).get(id=request.user.id).roles.all()
)
user_roles = request.user.roles.all()
if not user_roles:
return False
@@ -40,36 +38,3 @@ class HasPermissions(BasePermission):
return False
return True
def get_role(user: User) -> Optional[Role]:
"""
Retrieve the first role assigned to the given user.
Returns:
The user's first Role instance if the user has any roles, otherwise None.
"""
return user.roles.first()
def get_providers(role: Role) -> QuerySet[Provider]:
"""
Return a distinct queryset of Providers accessible by the given role.
If the role has no associated provider groups, an empty queryset is returned.
Args:
role: A Role instance.
Returns:
A QuerySet of Provider objects filtered by the role's provider groups.
If the role has no provider groups, returns an empty queryset.
"""
tenant = role.tenant
provider_groups = role.provider_groups.all()
if not provider_groups.exists():
return Provider.objects.none()
return Provider.objects.filter(
tenant=tenant, provider_groups__in=provider_groups
).distinct()

View File

@@ -2,7 +2,7 @@ from contextlib import nullcontext
from rest_framework_json_api.renderers import JSONRenderer
from api.db_utils import rls_transaction
from api.db_utils import tenant_transaction
class APIJSONRenderer(JSONRenderer):
@@ -13,9 +13,9 @@ class APIJSONRenderer(JSONRenderer):
tenant_id = getattr(request, "tenant_id", None) if request else None
include_param_present = "include" in request.query_params if request else False
# Use rls_transaction if needed for included resources, otherwise do nothing
# Use tenant_transaction if needed for included resources, otherwise do nothing
context_manager = (
rls_transaction(tenant_id)
tenant_transaction(tenant_id)
if tenant_id and include_param_present
else nullcontext()
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,12 @@
import pytest
from conftest import TEST_PASSWORD, get_api_tokens, get_authorization_header
from django.urls import reverse
from unittest.mock import patch
from rest_framework.test import APIClient
from conftest import TEST_PASSWORD, get_api_tokens, get_authorization_header
@patch("api.v1.views.MainRouter.admin_db", new="default")
@pytest.mark.django_db
def test_basic_authentication():
client = APIClient()
@@ -95,85 +98,3 @@ def test_refresh_token(create_test_user, tenants_fixture):
format="vnd.api+json",
)
assert new_refresh_response.status_code == 200
@pytest.mark.django_db
def test_user_me_when_inviting_users(create_test_user, tenants_fixture, roles_fixture):
client = APIClient()
role = roles_fixture[0]
user1_email = "user1@testing.com"
user2_email = "user2@testing.com"
password = "thisisapassword123"
user1_response = client.post(
reverse("user-list"),
data={
"data": {
"type": "users",
"attributes": {
"name": "user1",
"email": user1_email,
"password": password,
},
}
},
format="vnd.api+json",
)
assert user1_response.status_code == 201
user1_access_token, _ = get_api_tokens(client, user1_email, password)
user1_headers = get_authorization_header(user1_access_token)
user2_invitation = client.post(
reverse("invitation-list"),
data={
"data": {
"type": "invitations",
"attributes": {"email": user2_email},
"relationships": {
"roles": {
"data": [
{
"type": "roles",
"id": str(role.id),
}
]
}
},
}
},
format="vnd.api+json",
headers=user1_headers,
)
assert user2_invitation.status_code == 201
invitation_token = user2_invitation.json()["data"]["attributes"]["token"]
user2_response = client.post(
reverse("user-list") + f"?invitation_token={invitation_token}",
data={
"data": {
"type": "users",
"attributes": {
"name": "user2",
"email": user2_email,
"password": password,
},
}
},
format="vnd.api+json",
)
assert user2_response.status_code == 201
user2_access_token, _ = get_api_tokens(client, user2_email, password)
user2_headers = get_authorization_header(user2_access_token)
user1_me = client.get(reverse("user-me"), headers=user1_headers)
assert user1_me.status_code == 200
assert user1_me.json()["data"]["attributes"]["email"] == user1_email
user2_me = client.get(reverse("user-me"), headers=user2_headers)
assert user2_me.status_code == 200
assert user2_me.json()["data"]["attributes"]["email"] == user2_email

View File

@@ -1,85 +0,0 @@
from unittest.mock import Mock, patch
import pytest
from conftest import get_api_tokens, get_authorization_header
from django.urls import reverse
from rest_framework.test import APIClient
from api.models import Provider
@patch("api.v1.views.Task.objects.get")
@patch("api.v1.views.delete_provider_task.delay")
@pytest.mark.django_db
def test_delete_provider_without_executing_task(
mock_delete_task, mock_task_get, create_test_user, tenants_fixture, tasks_fixture
):
client = APIClient()
test_user = "test_email@prowler.com"
test_password = "test_password"
prowler_task = tasks_fixture[0]
task_mock = Mock()
task_mock.id = prowler_task.id
mock_delete_task.return_value = task_mock
mock_task_get.return_value = prowler_task
user_creation_response = client.post(
reverse("user-list"),
data={
"data": {
"type": "users",
"attributes": {
"name": "test",
"email": test_user,
"password": test_password,
},
}
},
format="vnd.api+json",
)
assert user_creation_response.status_code == 201
access_token, _ = get_api_tokens(client, test_user, test_password)
auth_headers = get_authorization_header(access_token)
create_provider_response = client.post(
reverse("provider-list"),
data={
"data": {
"type": "providers",
"attributes": {
"provider": Provider.ProviderChoices.AWS,
"uid": "123456789012",
},
}
},
format="vnd.api+json",
headers=auth_headers,
)
assert create_provider_response.status_code == 201
provider_id = create_provider_response.json()["data"]["id"]
provider_uid = create_provider_response.json()["data"]["attributes"]["uid"]
remove_provider = client.delete(
reverse("provider-detail", kwargs={"pk": provider_id}),
headers=auth_headers,
)
assert remove_provider.status_code == 202
recreate_provider_response = client.post(
reverse("provider-list"),
data={
"data": {
"type": "providers",
"attributes": {
"provider": Provider.ProviderChoices.AWS,
"uid": provider_uid,
},
}
},
format="vnd.api+json",
headers=auth_headers,
)
assert recreate_provider_response.status_code == 201

View File

@@ -11,9 +11,9 @@ from conftest import TEST_USER, TEST_PASSWORD, get_api_tokens, get_authorization
def test_check_resources_between_different_tenants(
schedule_mock,
enforce_test_user_db_connection,
patch_testing_flag,
authenticated_api_client,
tenants_fixture,
set_user_admin_roles_fixture,
):
client = authenticated_api_client

View File

@@ -1,3 +1,5 @@
# TODO: Enable this tests
import pytest
from django.urls import reverse
from rest_framework import status
@@ -29,14 +31,6 @@ class TestUserViewSet:
== create_test_user_rbac.email
)
def test_retrieve_user_with_no_roles(
self, authenticated_client_rbac_noroles, create_test_user_rbac_no_roles
):
response = authenticated_client_rbac_noroles.get(
reverse("user-detail", kwargs={"pk": create_test_user_rbac_no_roles.id})
)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_retrieve_user_with_no_permissions(
self, authenticated_client_no_permissions_rbac, create_test_user
):
@@ -45,6 +39,7 @@ class TestUserViewSet:
)
assert response.status_code == status.HTTP_403_FORBIDDEN
@patch("api.db_router.MainRouter.admin_db", new="default")
def test_create_user_with_all_permissions(self, authenticated_client_rbac):
valid_user_payload = {
"name": "test",
@@ -57,6 +52,7 @@ class TestUserViewSet:
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["data"]["attributes"]["email"] == "new_user@test.com"
@patch("api.db_router.MainRouter.admin_db", new="default")
def test_create_user_with_no_permissions(
self, authenticated_client_no_permissions_rbac
):

View File

@@ -6,10 +6,8 @@ from django.db.utils import ConnectionRouter
from api.db_router import MainRouter
from api.rls import Tenant
from config.django.base import DATABASE_ROUTERS as PROD_DATABASE_ROUTERS
from unittest.mock import patch
@patch("api.db_router.MainRouter.admin_db", new="admin")
class TestMainDatabaseRouter:
@pytest.fixture(scope="module")
def router(self):

View File

@@ -2,15 +2,7 @@ from datetime import datetime, timezone
from enum import Enum
from unittest.mock import patch
import pytest
from api.db_utils import (
batch_delete,
enum_to_choices,
generate_random_token,
one_week_from_now,
)
from api.models import Provider
from api.db_utils import enum_to_choices, one_week_from_now, generate_random_token
class TestEnumToChoices:
@@ -114,26 +106,3 @@ class TestGenerateRandomToken:
token = generate_random_token(length=5, symbols="")
# Default symbols
assert len(token) == 5
class TestBatchDelete:
@pytest.fixture
def create_test_providers(self, tenants_fixture):
tenant = tenants_fixture[0]
provider_id = 123456789012
provider_count = 10
for i in range(provider_count):
Provider.objects.create(
tenant=tenant,
uid=f"{provider_id + i}",
provider=Provider.ProviderChoices.AWS,
)
return provider_count
@pytest.mark.django_db
def test_batch_delete(self, create_test_providers):
_, summary = batch_delete(
Provider.objects.all(), batch_size=create_test_providers // 2
)
assert Provider.objects.all().count() == 0
assert summary == {"api.Provider": create_test_providers}

View File

@@ -1,9 +1,7 @@
import uuid
from unittest.mock import call, patch
from unittest.mock import patch, call
import pytest
from api.db_utils import POSTGRES_TENANT_VAR, SET_CONFIG_QUERY
from api.decorators import set_tenant
@@ -17,12 +15,12 @@ class TestSetTenantDecorator:
def random_func(arg):
return arg
tenant_id = str(uuid.uuid4())
tenant_id = "1234-abcd-5678"
result = random_func("test_arg", tenant_id=tenant_id)
assert (
call(SET_CONFIG_QUERY, [POSTGRES_TENANT_VAR, tenant_id])
call(f"SELECT set_config('api.tenant_id', '{tenant_id}', TRUE);")
in mock_cursor.execute.mock_calls
)
assert result == "test_arg"

View File

@@ -7,10 +7,9 @@ from api.models import Resource, ResourceTag
class TestResourceModel:
def test_setting_tags(self, providers_fixture):
provider, *_ = providers_fixture
tenant_id = provider.tenant_id
resource = Resource.objects.create(
tenant_id=tenant_id,
tenant_id=provider.tenant_id,
provider=provider,
uid="arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef0",
name="My Instance 1",
@@ -21,12 +20,12 @@ class TestResourceModel:
tags = [
ResourceTag.objects.create(
tenant_id=tenant_id,
tenant_id=provider.tenant_id,
key="key",
value="value",
),
ResourceTag.objects.create(
tenant_id=tenant_id,
tenant_id=provider.tenant_id,
key="key2",
value="value2",
),
@@ -34,9 +33,9 @@ class TestResourceModel:
resource.upsert_or_delete_tags(tags)
assert len(tags) == len(resource.tags.filter(tenant_id=tenant_id))
assert len(tags) == len(resource.tags.all())
tags_dict = resource.get_tags(tenant_id=tenant_id)
tags_dict = resource.get_tags()
for tag in tags:
assert tag.key in tags_dict
@@ -44,51 +43,47 @@ class TestResourceModel:
def test_adding_tags(self, resources_fixture):
resource, *_ = resources_fixture
tenant_id = str(resource.tenant_id)
tags = [
ResourceTag.objects.create(
tenant_id=tenant_id,
tenant_id=resource.tenant_id,
key="env",
value="test",
),
]
before_count = len(resource.tags.filter(tenant_id=tenant_id))
before_count = len(resource.tags.all())
resource.upsert_or_delete_tags(tags)
assert before_count + 1 == len(resource.tags.filter(tenant_id=tenant_id))
assert before_count + 1 == len(resource.tags.all())
tags_dict = resource.get_tags(tenant_id=tenant_id)
tags_dict = resource.get_tags()
assert "env" in tags_dict
assert tags_dict["env"] == "test"
def test_adding_duplicate_tags(self, resources_fixture):
resource, *_ = resources_fixture
tenant_id = str(resource.tenant_id)
tags = resource.tags.filter(tenant_id=tenant_id)
tags = resource.tags.all()
before_count = len(resource.tags.filter(tenant_id=tenant_id))
before_count = len(resource.tags.all())
resource.upsert_or_delete_tags(tags)
# should be the same number of tags
assert before_count == len(resource.tags.filter(tenant_id=tenant_id))
assert before_count == len(resource.tags.all())
def test_add_tags_none(self, resources_fixture):
resource, *_ = resources_fixture
tenant_id = str(resource.tenant_id)
resource.upsert_or_delete_tags(None)
assert len(resource.tags.filter(tenant_id=tenant_id)) == 0
assert resource.get_tags(tenant_id=tenant_id) == {}
assert len(resource.tags.all()) == 0
assert resource.get_tags() == {}
def test_clear_tags(self, resources_fixture):
resource, *_ = resources_fixture
tenant_id = str(resource.tenant_id)
resource.clear_tags()
assert len(resource.tags.filter(tenant_id=tenant_id)) == 0
assert resource.get_tags(tenant_id=tenant_id) == {}
assert len(resource.tags.all()) == 0
assert resource.get_tags() == {}

File diff suppressed because it is too large Load Diff

View File

@@ -14,24 +14,24 @@ from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework_simplejwt.tokens import RefreshToken
from api.models import (
ComplianceOverview,
Finding,
Invitation,
InvitationRoleRelationship,
Membership,
Provider,
ProviderGroup,
ProviderGroupMembership,
ProviderSecret,
Resource,
ResourceTag,
Finding,
ProviderSecret,
Invitation,
InvitationRoleRelationship,
Role,
RoleProviderGroupRelationship,
UserRoleRelationship,
ComplianceOverview,
Scan,
StateChoices,
Task,
User,
UserRoleRelationship,
)
from api.rls import Tenant
@@ -445,12 +445,7 @@ class MembershipSerializer(serializers.ModelSerializer):
# Provider Groups
class ProviderGroupSerializer(RLSSerializer, BaseWriteSerializer):
providers = serializers.ResourceRelatedField(
queryset=Provider.objects.all(), many=True, required=False
)
roles = serializers.ResourceRelatedField(
queryset=Role.objects.all(), many=True, required=False
)
providers = serializers.ResourceRelatedField(many=True, read_only=True)
def validate(self, attrs):
if ProviderGroup.objects.filter(name=attrs.get("name")).exists():
@@ -480,93 +475,21 @@ class ProviderGroupSerializer(RLSSerializer, BaseWriteSerializer):
}
class ProviderGroupIncludedSerializer(ProviderGroupSerializer):
class ProviderGroupIncludedSerializer(RLSSerializer, BaseWriteSerializer):
class Meta:
model = ProviderGroup
fields = ["id", "name"]
class ProviderGroupCreateSerializer(ProviderGroupSerializer):
providers = serializers.ResourceRelatedField(
queryset=Provider.objects.all(), many=True, required=False
)
roles = serializers.ResourceRelatedField(
queryset=Role.objects.all(), many=True, required=False
)
class ProviderGroupUpdateSerializer(RLSSerializer, BaseWriteSerializer):
"""
Serializer for updating the ProviderGroup model.
Only allows "name" field to be updated.
"""
class Meta:
model = ProviderGroup
fields = [
"id",
"name",
"inserted_at",
"updated_at",
"providers",
"roles",
]
def create(self, validated_data):
providers = validated_data.pop("providers", [])
roles = validated_data.pop("roles", [])
tenant_id = self.context.get("tenant_id")
provider_group = ProviderGroup.objects.create(
tenant_id=tenant_id, **validated_data
)
through_model_instances = [
ProviderGroupMembership(
provider_group=provider_group,
provider=provider,
tenant_id=tenant_id,
)
for provider in providers
]
ProviderGroupMembership.objects.bulk_create(through_model_instances)
through_model_instances = [
RoleProviderGroupRelationship(
provider_group=provider_group,
role=role,
tenant_id=tenant_id,
)
for role in roles
]
RoleProviderGroupRelationship.objects.bulk_create(through_model_instances)
return provider_group
class ProviderGroupUpdateSerializer(ProviderGroupSerializer):
def update(self, instance, validated_data):
tenant_id = self.context.get("tenant_id")
if "providers" in validated_data:
providers = validated_data.pop("providers")
instance.providers.clear()
through_model_instances = [
ProviderGroupMembership(
provider_group=instance,
provider=provider,
tenant_id=tenant_id,
)
for provider in providers
]
ProviderGroupMembership.objects.bulk_create(through_model_instances)
if "roles" in validated_data:
roles = validated_data.pop("roles")
instance.roles.clear()
through_model_instances = [
RoleProviderGroupRelationship(
provider_group=instance,
role=role,
tenant_id=tenant_id,
)
for role in roles
]
RoleProviderGroupRelationship.objects.bulk_create(through_model_instances)
return super().update(instance, validated_data)
fields = ["id", "name"]
class ProviderResourceIdentifierSerializer(serializers.Serializer):
@@ -819,14 +742,6 @@ class ScanTaskSerializer(RLSSerializer):
]
class ScanReportSerializer(RLSSerializer):
class Meta:
model = Scan
fields = [
"id",
]
class ResourceTagSerializer(RLSSerializer):
"""
Serializer for the ResourceTag model
@@ -882,7 +797,7 @@ class ResourceSerializer(RLSSerializer):
}
)
def get_tags(self, obj):
return obj.get_tags(self.context.get("tenant_id"))
return obj.get_tags()
def get_fields(self):
"""`type` is a Python reserved keyword."""
@@ -913,7 +828,6 @@ class FindingSerializer(RLSSerializer):
"raw_result",
"inserted_at",
"updated_at",
"first_seen_at",
"url",
# Relationships
"scan",
@@ -926,7 +840,6 @@ class FindingSerializer(RLSSerializer):
}
# To be removed when the related endpoint is removed as well
class FindingDynamicFilterSerializer(serializers.Serializer):
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
@@ -935,19 +848,6 @@ class FindingDynamicFilterSerializer(serializers.Serializer):
resource_name = "finding-dynamic-filters"
class FindingMetadataSerializer(serializers.Serializer):
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
resource_types = serializers.ListField(
child=serializers.CharField(), allow_empty=True
)
# Temporarily disabled until we implement tag filtering in the UI
# tags = serializers.JSONField(help_text="Tags are described as key-value pairs.")
class Meta:
resource_name = "findings-metadata"
# Provider secrets
class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
@staticmethod
@@ -1020,7 +920,7 @@ class KubernetesProviderSecret(serializers.Serializer):
class AWSRoleAssumptionProviderSecret(serializers.Serializer):
role_arn = serializers.CharField()
external_id = serializers.CharField()
external_id = serializers.CharField(required=False)
role_session_name = serializers.CharField(required=False)
session_duration = serializers.IntegerField(
required=False, min_value=900, max_value=43200
@@ -1067,10 +967,6 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
"description": "The Amazon Resource Name (ARN) of the role to assume. Required for AWS role "
"assumption.",
},
"external_id": {
"type": "string",
"description": "An identifier to enhance security for role assumption.",
},
"aws_access_key_id": {
"type": "string",
"description": "The AWS access key ID. Only required if the environment lacks pre-configured "
@@ -1092,6 +988,11 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
"default": 3600,
"description": "The duration (in seconds) for the role session.",
},
"external_id": {
"type": "string",
"description": "An optional identifier to enhance security for role assumption; may be "
"required by the role administrator.",
},
"role_session_name": {
"type": "string",
"description": "An identifier for the role session, useful for tracking sessions in AWS logs. "
@@ -1105,7 +1006,7 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
"pattern": "^[a-zA-Z0-9=,.@_-]+$",
},
},
"required": ["role_arn", "external_id"],
"required": ["role_arn"],
},
{
"type": "object",
@@ -1255,12 +1156,6 @@ class InvitationSerializer(RLSSerializer):
roles = serializers.ResourceRelatedField(many=True, queryset=Role.objects.all())
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
tenant_id = self.context.get("tenant_id")
if tenant_id is not None:
self.fields["roles"].queryset = Role.objects.filter(tenant_id=tenant_id)
class Meta:
model = Invitation
fields = [
@@ -1280,12 +1175,6 @@ class InvitationSerializer(RLSSerializer):
class InvitationBaseWriteSerializer(BaseWriteSerializer):
roles = serializers.ResourceRelatedField(many=True, queryset=Role.objects.all())
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
tenant_id = self.context.get("tenant_id")
if tenant_id is not None:
self.fields["roles"].queryset = Role.objects.filter(tenant_id=tenant_id)
def validate_email(self, value):
user = User.objects.filter(email=value).first()
tenant_id = self.context["tenant_id"]
@@ -1346,10 +1235,6 @@ class InvitationCreateSerializer(InvitationBaseWriteSerializer, RLSSerializer):
class InvitationUpdateSerializer(InvitationBaseWriteSerializer):
roles = serializers.ResourceRelatedField(
required=False, many=True, queryset=Role.objects.all()
)
class Meta:
model = Invitation
fields = ["id", "email", "expires_at", "state", "token", "roles"]
@@ -1362,19 +1247,15 @@ class InvitationUpdateSerializer(InvitationBaseWriteSerializer):
}
def update(self, instance, validated_data):
roles = validated_data.pop("roles", [])
tenant_id = self.context.get("tenant_id")
if "roles" in validated_data:
roles = validated_data.pop("roles")
instance.roles.clear()
new_relationships = [
InvitationRoleRelationship(
role=r, invitation=instance, tenant_id=tenant_id
)
for r in roles
]
InvitationRoleRelationship.objects.bulk_create(new_relationships)
invitation = super().update(instance, validated_data)
if roles:
instance.roles.clear()
for role in roles:
InvitationRoleRelationship.objects.create(
role=role, invitation=invitation, tenant_id=tenant_id
)
return invitation
@@ -1393,27 +1274,30 @@ class InvitationAcceptSerializer(RLSSerializer):
class RoleSerializer(RLSSerializer, BaseWriteSerializer):
permission_state = serializers.SerializerMethodField()
users = serializers.ResourceRelatedField(
queryset=User.objects.all(), many=True, required=False
)
provider_groups = serializers.ResourceRelatedField(
queryset=ProviderGroup.objects.all(), many=True, required=False
many=True, queryset=ProviderGroup.objects.all()
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
tenant_id = self.context.get("tenant_id")
if tenant_id is not None:
self.fields["users"].queryset = User.objects.filter(
membership__tenant__id=tenant_id
)
self.fields["provider_groups"].queryset = ProviderGroup.objects.filter(
tenant_id=self.context.get("tenant_id")
)
permission_state = serializers.SerializerMethodField()
def get_permission_state(self, obj) -> str:
return obj.permission_state
def get_permission_state(self, obj):
permission_fields = [
"manage_users",
"manage_account",
"manage_billing",
"manage_providers",
"manage_integrations",
"manage_scans",
]
values = [getattr(obj, field) for field in permission_fields]
if all(values):
return "unlimited"
elif not any(values):
return "none"
else:
return "limited"
def validate(self, attrs):
if Role.objects.filter(name=attrs.get("name")).exists():
@@ -1439,11 +1323,9 @@ class RoleSerializer(RLSSerializer, BaseWriteSerializer):
"name",
"manage_users",
"manage_account",
# Disable for the first release
# "manage_billing",
# "manage_integrations",
# /Disable for the first release
"manage_billing",
"manage_providers",
"manage_integrations",
"manage_scans",
"permission_state",
"unlimited_visibility",
@@ -1458,18 +1340,12 @@ class RoleSerializer(RLSSerializer, BaseWriteSerializer):
"id": {"read_only": True},
"inserted_at": {"read_only": True},
"updated_at": {"read_only": True},
"users": {"read_only": True},
"url": {"read_only": True},
}
class RoleCreateSerializer(RoleSerializer):
provider_groups = serializers.ResourceRelatedField(
many=True, queryset=ProviderGroup.objects.all(), required=False
)
users = serializers.ResourceRelatedField(
many=True, queryset=User.objects.all(), required=False
)
def create(self, validated_data):
provider_groups = validated_data.pop("provider_groups", [])
users = validated_data.pop("users", [])
@@ -1488,7 +1364,7 @@ class RoleCreateSerializer(RoleSerializer):
through_model_instances = [
UserRoleRelationship(
role=role,
role=user,
user=user,
tenant_id=tenant_id,
)
@@ -1500,36 +1376,19 @@ class RoleCreateSerializer(RoleSerializer):
class RoleUpdateSerializer(RoleSerializer):
def update(self, instance, validated_data):
tenant_id = self.context.get("tenant_id")
if "provider_groups" in validated_data:
provider_groups = validated_data.pop("provider_groups")
instance.provider_groups.clear()
through_model_instances = [
RoleProviderGroupRelationship(
role=instance,
provider_group=provider_group,
tenant_id=tenant_id,
)
for provider_group in provider_groups
]
RoleProviderGroupRelationship.objects.bulk_create(through_model_instances)
if "users" in validated_data:
users = validated_data.pop("users")
instance.users.clear()
through_model_instances = [
UserRoleRelationship(
role=instance,
user=user,
tenant_id=tenant_id,
)
for user in users
]
UserRoleRelationship.objects.bulk_create(through_model_instances)
return super().update(instance, validated_data)
class Meta:
model = Role
fields = [
"id",
"name",
"manage_users",
"manage_account",
"manage_billing",
"manage_providers",
"manage_integrations",
"manage_scans",
"unlimited_visibility",
]
class ProviderGroupResourceIdentifierSerializer(serializers.Serializer):
@@ -1744,7 +1603,7 @@ class OverviewProviderSerializer(serializers.Serializer):
"properties": {
"pass": {"type": "integer"},
"fail": {"type": "integer"},
"muted": {"type": "integer"},
"manual": {"type": "integer"},
"total": {"type": "integer"},
},
}
@@ -1753,7 +1612,7 @@ class OverviewProviderSerializer(serializers.Serializer):
return {
"pass": obj["findings_passed"],
"fail": obj["findings_failed"],
"muted": obj["findings_muted"],
"manual": obj["findings_manual"],
"total": obj["total_findings"],
}
@@ -1813,24 +1672,6 @@ class OverviewSeveritySerializer(serializers.Serializer):
return {"version": "v1"}
class OverviewServiceSerializer(serializers.Serializer):
id = serializers.CharField(source="service")
total = serializers.IntegerField()
_pass = serializers.IntegerField()
fail = serializers.IntegerField()
muted = serializers.IntegerField()
class JSONAPIMeta:
resource_name = "services-overview"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["pass"] = self.fields.pop("_pass")
def get_root_meta(self, _resource, _many):
return {"version": "v1"}
# Schedules

View File

@@ -3,28 +3,28 @@ from drf_spectacular.views import SpectacularRedocView
from rest_framework_nested import routers
from api.v1.views import (
ComplianceOverviewViewSet,
CustomTokenObtainView,
CustomTokenRefreshView,
FindingViewSet,
InvitationAcceptViewSet,
InvitationViewSet,
MembershipViewSet,
OverviewViewSet,
ProviderGroupProvidersRelationshipView,
ProviderGroupViewSet,
ProviderGroupProvidersRelationshipView,
ProviderSecretViewSet,
InvitationViewSet,
InvitationAcceptViewSet,
RoleViewSet,
RoleProviderGroupRelationshipView,
UserRoleRelationshipView,
OverviewViewSet,
ComplianceOverviewViewSet,
ProviderViewSet,
ResourceViewSet,
RoleProviderGroupRelationshipView,
RoleViewSet,
ScanViewSet,
ScheduleViewSet,
SchemaView,
TaskViewSet,
TenantMembersViewSet,
TenantViewSet,
UserRoleRelationshipView,
UserViewSet,
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,21 +1,10 @@
from celery import Celery, Task
from config.env import env
BROKER_VISIBILITY_TIMEOUT = env.int("DJANGO_BROKER_VISIBILITY_TIMEOUT", default=86400)
celery_app = Celery("tasks")
celery_app.config_from_object("django.conf:settings", namespace="CELERY")
celery_app.conf.update(result_extended=True, result_expires=None)
celery_app.conf.broker_transport_options = {
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT
}
celery_app.conf.result_backend_transport_options = {
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT
}
celery_app.conf.visibility_timeout = BROKER_VISIBILITY_TIMEOUT
celery_app.autodiscover_tasks(["api"])
@@ -46,10 +35,10 @@ class RLSTask(Task):
**options,
)
task_result_instance = TaskResult.objects.get(task_id=result.task_id)
from api.db_utils import rls_transaction
from api.db_utils import tenant_transaction
tenant_id = kwargs.get("tenant_id")
with rls_transaction(tenant_id):
with tenant_transaction(tenant_id):
APITask.objects.create(
id=task_result_instance.task_id,
tenant_id=tenant_id,

View File

@@ -207,3 +207,6 @@ CACHE_STALE_WHILE_REVALIDATE = env.int("DJANGO_STALE_WHILE_REVALIDATE", 60)
TESTING = False
# Disable RBAC during tests/demos
DISABLE_RBAC = False

View File

@@ -1,6 +1,7 @@
from config.django.base import * # noqa
from config.env import env
DEBUG = env.bool("DJANGO_DEBUG", default=True)
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["*"])

View File

@@ -1,6 +1,7 @@
from config.django.base import * # noqa
from config.env import env
DEBUG = env.bool("DJANGO_DEBUG", default=False)
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])

View File

@@ -1,6 +1,7 @@
from config.django.base import * # noqa
from config.env import env
DEBUG = env.bool("DJANGO_DEBUG", default=False)
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
@@ -9,8 +10,8 @@ DATABASES = {
"default": {
"ENGINE": "psqlextra.backend",
"NAME": "prowler_db_test",
"USER": env("POSTGRES_USER", default="prowler_admin"),
"PASSWORD": env("POSTGRES_PASSWORD", default="postgres"),
"USER": env("POSTGRES_USER", default="prowler"),
"PASSWORD": env("POSTGRES_PASSWORD", default="S3cret"),
"HOST": env("POSTGRES_HOST", default="localhost"),
"PORT": env("POSTGRES_PORT", default="5432"),
},

View File

@@ -1,39 +1,38 @@
import logging
from datetime import datetime, timedelta, timezone
from unittest.mock import patch
import pytest
from django.conf import settings
from django.db import connection as django_connection
from django.db import connections as django_connections
from datetime import datetime, timezone, timedelta
from django.db import connections as django_connections, connection as django_connection
from django.urls import reverse
from django_celery_results.models import TaskResult
from prowler.lib.check.models import Severity
from prowler.lib.outputs.finding import Status
from rest_framework import status
from rest_framework.test import APIClient
from unittest.mock import patch
from api.db_utils import rls_transaction
from api.models import (
ComplianceOverview,
Finding,
Invitation,
Membership,
)
from api.models import (
User,
Provider,
ProviderGroup,
ProviderSecret,
Resource,
ResourceTag,
Role,
Scan,
ScanSummary,
StateChoices,
Task,
User,
Membership,
ProviderSecret,
Invitation,
ComplianceOverview,
UserRoleRelationship,
)
from api.rls import Tenant
from api.v1.serializers import TokenSerializer
from prowler.lib.check.models import Severity
from prowler.lib.outputs.finding import Status
API_JSON_CONTENT_TYPE = "application/vnd.api+json"
NO_TENANT_HTTP_STATUS = status.HTTP_401_UNAUTHORIZED
@@ -76,6 +75,16 @@ def disable_logging():
logging.disable(logging.CRITICAL)
@pytest.fixture(scope="function")
def patch_testing_flag():
"""
Fixture to patch the TESTING flag to True during tests.
"""
with patch("api.rbac.permissions.DISABLE_RBAC", True):
with patch("api.v1.views.DISABLE_RBAC", True):
yield
@pytest.fixture(scope="session", autouse=True)
def create_test_user(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
@@ -88,14 +97,16 @@ def create_test_user(django_db_setup, django_db_blocker):
@pytest.fixture(scope="function")
def create_test_user_rbac(django_db_setup, django_db_blocker, tenants_fixture):
def create_test_user_rbac(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
user = User.objects.create_user(
name="testing",
email="rbac@rbac.com",
password=TEST_PASSWORD,
)
tenant = tenants_fixture[0]
tenant = Tenant.objects.create(
name="Tenant Test",
)
Membership.objects.create(
user=user,
tenant=tenant,
@@ -120,24 +131,6 @@ def create_test_user_rbac(django_db_setup, django_db_blocker, tenants_fixture):
return user
@pytest.fixture(scope="function")
def create_test_user_rbac_no_roles(django_db_setup, django_db_blocker, tenants_fixture):
with django_db_blocker.unblock():
user = User.objects.create_user(
name="testing",
email="rbac_noroles@rbac.com",
password=TEST_PASSWORD,
)
tenant = tenants_fixture[0]
Membership.objects.create(
user=user,
tenant=tenant,
role=Membership.RoleChoices.OWNER,
)
return user
@pytest.fixture(scope="function")
def create_test_user_rbac_limited(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
@@ -176,32 +169,8 @@ def create_test_user_rbac_limited(django_db_setup, django_db_blocker):
@pytest.fixture
def authenticated_client_rbac(create_test_user_rbac, tenants_fixture, client):
client.user = create_test_user_rbac
tenant_id = tenants_fixture[0].id
serializer = TokenSerializer(
data={
"type": "tokens",
"email": "rbac@rbac.com",
"password": TEST_PASSWORD,
"tenant_id": tenant_id,
}
)
serializer.is_valid(raise_exception=True)
access_token = serializer.validated_data["access"]
client.defaults["HTTP_AUTHORIZATION"] = f"Bearer {access_token}"
return client
@pytest.fixture
def authenticated_client_rbac_noroles(
create_test_user_rbac_no_roles, tenants_fixture, client
):
client.user = create_test_user_rbac_no_roles
serializer = TokenSerializer(
data={
"type": "tokens",
"email": "rbac_noroles@rbac.com",
"password": TEST_PASSWORD,
}
data={"type": "tokens", "email": "rbac@rbac.com", "password": TEST_PASSWORD}
)
serializer.is_valid()
access_token = serializer.validated_data["access"]
@@ -228,9 +197,7 @@ def authenticated_client_no_permissions_rbac(
@pytest.fixture
def authenticated_client(
create_test_user, tenants_fixture, set_user_admin_roles_fixture, client
):
def authenticated_client(create_test_user, tenants_fixture, client):
client.user = create_test_user
serializer = TokenSerializer(
data={"type": "tokens", "email": TEST_USER, "password": TEST_PASSWORD}
@@ -279,33 +246,10 @@ def tenants_fixture(create_test_user):
return tenant1, tenant2, tenant3
@pytest.fixture
def set_user_admin_roles_fixture(create_test_user, tenants_fixture):
user = create_test_user
for tenant in tenants_fixture[:2]:
with rls_transaction(str(tenant.id)):
role = Role.objects.create(
name="admin",
tenant_id=tenant.id,
manage_users=True,
manage_account=True,
manage_billing=True,
manage_providers=True,
manage_integrations=True,
manage_scans=True,
unlimited_visibility=True,
)
UserRoleRelationship.objects.create(
user=user,
role=role,
tenant_id=tenant.id,
)
@pytest.fixture
def invitations_fixture(create_test_user, tenants_fixture):
user = create_test_user
tenant = tenants_fixture[0]
*_, tenant = tenants_fixture
valid_invitation = Invitation.objects.create(
email="testing@prowler.com",
state=Invitation.State.PENDING,
@@ -324,20 +268,6 @@ def invitations_fixture(create_test_user, tenants_fixture):
return valid_invitation, expired_invitation
@pytest.fixture
def users_fixture(django_user_model):
user1 = User.objects.create_user(
name="user1", email="test_unit0@prowler.com", password="S3cret"
)
user2 = User.objects.create_user(
name="user2", email="test_unit1@prowler.com", password="S3cret"
)
user3 = User.objects.create_user(
name="user3", email="test_unit2@prowler.com", password="S3cret"
)
return user1, user2, user3
@pytest.fixture
def providers_fixture(tenants_fixture):
tenant, *_ = tenants_fixture
@@ -395,23 +325,6 @@ def provider_groups_fixture(tenants_fixture):
return pgroup1, pgroup2, pgroup3
@pytest.fixture
def admin_role_fixture(tenants_fixture):
tenant, *_ = tenants_fixture
return Role.objects.get_or_create(
name="admin",
tenant_id=tenant.id,
manage_users=True,
manage_account=True,
manage_billing=True,
manage_providers=True,
manage_integrations=True,
manage_scans=True,
unlimited_visibility=True,
)[0]
@pytest.fixture
def roles_fixture(tenants_fixture):
tenant, *_ = tenants_fixture
@@ -448,19 +361,8 @@ def roles_fixture(tenants_fixture):
manage_scans=True,
unlimited_visibility=True,
)
role4 = Role.objects.create(
name="Role Four",
tenant_id=tenant.id,
manage_users=False,
manage_account=False,
manage_billing=False,
manage_providers=False,
manage_integrations=False,
manage_scans=False,
unlimited_visibility=False,
)
return role1, role2, role3, role4
return role1, role2, role3
@pytest.fixture
@@ -626,7 +528,6 @@ def findings_fixture(scans_fixture, resources_fixture):
"CheckId": "test_check_id",
"Description": "test description apple sauce",
},
first_seen_at="2024-01-02T00:00:00Z",
)
finding1.add_resources([resource1])
@@ -652,7 +553,6 @@ def findings_fixture(scans_fixture, resources_fixture):
"CheckId": "test_check_id",
"Description": "test description orange juice",
},
first_seen_at="2024-01-02T00:00:00Z",
)
finding2.add_resources([resource2])
@@ -792,107 +692,10 @@ def get_api_tokens(
data=json_body,
format="vnd.api+json",
)
return (
response.json()["data"]["attributes"]["access"],
response.json()["data"]["attributes"]["refresh"],
)
@pytest.fixture
def scan_summaries_fixture(tenants_fixture, providers_fixture):
tenant = tenants_fixture[0]
provider = providers_fixture[0]
scan = Scan.objects.create(
name="overview scan",
provider=provider,
trigger=Scan.TriggerChoices.MANUAL,
state=StateChoices.COMPLETED,
tenant=tenant,
)
ScanSummary.objects.create(
tenant=tenant,
check_id="check1",
service="service1",
severity="high",
region="region1",
_pass=1,
fail=0,
muted=0,
total=1,
new=1,
changed=0,
unchanged=0,
fail_new=0,
fail_changed=0,
pass_new=1,
pass_changed=0,
muted_new=0,
muted_changed=0,
scan=scan,
)
ScanSummary.objects.create(
tenant=tenant,
check_id="check1",
service="service1",
severity="high",
region="region2",
_pass=0,
fail=1,
muted=1,
total=2,
new=2,
changed=0,
unchanged=0,
fail_new=1,
fail_changed=0,
pass_new=0,
pass_changed=0,
muted_new=1,
muted_changed=0,
scan=scan,
)
ScanSummary.objects.create(
tenant=tenant,
check_id="check2",
service="service2",
severity="critical",
region="region1",
_pass=1,
fail=0,
muted=0,
total=1,
new=1,
changed=0,
unchanged=0,
fail_new=0,
fail_changed=0,
pass_new=1,
pass_changed=0,
muted_new=0,
muted_changed=0,
scan=scan,
)
return response.json()["data"]["attributes"]["access"], response.json()["data"][
"attributes"
]["refresh"]
def get_authorization_header(access_token: str) -> dict:
return {"Authorization": f"Bearer {access_token}"}
def pytest_collection_modifyitems(items):
"""Ensure test_rbac.py is executed first."""
items.sort(key=lambda item: 0 if "test_rbac.py" in item.nodeid else 1)
def pytest_configure(config):
# Apply the mock before the test session starts. This is necessary to avoid admin error when running the
# 0004_rbac_missing_admin_roles migration
patch("api.db_router.MainRouter.admin_db", new="default").start()
def pytest_unconfigure(config):
# Stop all patches after the test session ends. This is necessary to avoid admin error when running the
# 0004_rbac_missing_admin_roles migration
patch.stopall()

View File

@@ -5,14 +5,10 @@ from django_celery_beat.models import IntervalSchedule, PeriodicTask
from rest_framework_json_api.serializers import ValidationError
from tasks.tasks import perform_scheduled_scan_task
from api.db_utils import rls_transaction
from api.models import Provider, Scan, StateChoices
from api.models import Provider
def schedule_provider_scan(provider_instance: Provider):
tenant_id = str(provider_instance.tenant_id)
provider_id = str(provider_instance.id)
schedule, _ = IntervalSchedule.objects.get_or_create(
every=24,
period=IntervalSchedule.HOURS,
@@ -21,9 +17,23 @@ def schedule_provider_scan(provider_instance: Provider):
# Create a unique name for the periodic task
task_name = f"scan-perform-scheduled-{provider_instance.id}"
if PeriodicTask.objects.filter(
interval=schedule, name=task_name, task="scan-perform-scheduled"
).exists():
# Schedule the task
_, created = PeriodicTask.objects.get_or_create(
interval=schedule,
name=task_name,
task="scan-perform-scheduled",
kwargs=json.dumps(
{
"tenant_id": str(provider_instance.tenant_id),
"provider_id": str(provider_instance.id),
}
),
one_off=False,
defaults={
"start_time": datetime.now(timezone.utc) + timedelta(hours=24),
},
)
if not created:
raise ValidationError(
[
{
@@ -35,36 +45,9 @@ def schedule_provider_scan(provider_instance: Provider):
]
)
with rls_transaction(tenant_id):
scheduled_scan = Scan.objects.create(
tenant_id=tenant_id,
name="Daily scheduled scan",
provider_id=provider_id,
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.AVAILABLE,
scheduled_at=datetime.now(timezone.utc),
)
# Schedule the task
periodic_task_instance = PeriodicTask.objects.create(
interval=schedule,
name=task_name,
task="scan-perform-scheduled",
kwargs=json.dumps(
{
"tenant_id": tenant_id,
"provider_id": provider_id,
}
),
one_off=False,
start_time=datetime.now(timezone.utc) + timedelta(hours=24),
)
scheduled_scan.scheduler_task_id = periodic_task_instance.id
scheduled_scan.save()
return perform_scheduled_scan_task.apply_async(
kwargs={
"tenant_id": str(provider_instance.tenant_id),
"provider_id": provider_id,
"provider_id": str(provider_instance.id),
},
)

View File

@@ -2,7 +2,7 @@ from celery.utils.log import get_task_logger
from django.db import transaction
from api.db_router import MainRouter
from api.db_utils import batch_delete, rls_transaction
from api.db_utils import batch_delete, tenant_transaction
from api.models import Finding, Provider, Resource, Scan, ScanSummary, Tenant
logger = get_task_logger(__name__)
@@ -66,7 +66,7 @@ def delete_tenant(pk: str):
deletion_summary = {}
for provider in Provider.objects.using(MainRouter.admin_db).filter(tenant_id=pk):
with rls_transaction(pk):
with tenant_transaction(pk):
summary = delete_provider(provider.id)
deletion_summary.update(summary)

View File

@@ -1,13 +1,8 @@
import os
import time
import zipfile
from copy import deepcopy
from datetime import datetime, timezone
import boto3
from botocore.exceptions import ClientError
from celery.utils.log import get_task_logger
from config.env import env
from config.settings.celery import CELERY_DEADLOCK_ATTEMPTS
from django.db import IntegrityError, OperationalError
from django.db.models import Case, Count, IntegerField, Sum, When
@@ -16,7 +11,7 @@ from api.compliance import (
PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE,
generate_scan_compliance,
)
from api.db_utils import rls_transaction
from api.db_utils import tenant_transaction
from api.models import (
ComplianceOverview,
Finding,
@@ -30,117 +25,15 @@ from api.models import (
from api.models import StatusChoices as FindingStatus
from api.utils import initialize_prowler_provider
from api.v1.serializers import ScanTaskSerializer
from prowler.config.config import (
csv_file_suffix,
html_file_suffix,
json_asff_file_suffix,
json_ocsf_file_suffix,
output_file_timestamp,
tmp_output_directory,
)
from prowler.lib.outputs.asff.asff import ASFF
from prowler.lib.outputs.csv.csv import CSV
from prowler.lib.outputs.finding import Finding as ProwlerFinding
from prowler.lib.outputs.html.html import HTML
from prowler.lib.outputs.ocsf.ocsf import OCSF
from prowler.lib.scan.scan import Scan as ProwlerScan
logger = get_task_logger(__name__)
# Predefined mapping for output formats and their configurations
OUTPUT_FORMATS_MAPPING = {
"csv": {
"class": CSV,
"suffix": csv_file_suffix,
"kwargs": {},
},
"json-asff": {"class": ASFF, "suffix": json_asff_file_suffix, "kwargs": {}},
"json-ocsf": {"class": OCSF, "suffix": json_ocsf_file_suffix, "kwargs": {}},
"html": {"class": HTML, "suffix": html_file_suffix, "kwargs": {"stats": {}}},
}
# Mapping provider types to their identity components for output paths
PROVIDER_IDENTITY_MAP = {
"aws": lambda p: p.identity.account,
"azure": lambda p: p.identity.tenant_domain,
"gcp": lambda p: p.identity.profile,
"kubernetes": lambda p: p.identity.context.replace(":", "_").replace("/", "_"),
}
def _compress_output_files(output_directory: str) -> str:
"""
Compress output files from all configured output formats into a ZIP archive.
Args:
output_directory (str): The directory where the output files are located.
The function looks up all known suffixes in OUTPUT_FORMATS_MAPPING
and compresses those files into a single ZIP.
Returns:
str: The full path to the newly created ZIP archive.
"""
zip_path = f"{output_directory}.zip"
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
for suffix in [config["suffix"] for config in OUTPUT_FORMATS_MAPPING.values()]:
zipf.write(
f"{output_directory}{suffix}",
f"artifacts/{output_directory.split('/')[-1]}{suffix}",
)
return zip_path
def _upload_to_s3(tenant_id: str, zip_path: str, scan_id: str) -> str:
"""
Upload the specified ZIP file to an S3 bucket.
If the S3 bucket environment variables are not configured,
the function returns None without performing an upload.
Args:
tenant_id (str): The tenant identifier, used as part of the S3 key prefix.
zip_path (str): The local file system path to the ZIP file to be uploaded.
scan_id (str): The scan identifier, used as part of the S3 key prefix.
Returns:
str: The S3 URI of the uploaded file (e.g., "s3://<bucket>/<key>") if successful.
None: If the required environment variables for the S3 bucket are not set.
Raises:
botocore.exceptions.ClientError: If the upload attempt to S3 fails for any reason.
"""
if not env.str("ARTIFACTS_AWS_S3_OUTPUT_BUCKET", ""):
return
if env.str("ARTIFACTS_AWS_ACCESS_KEY_ID", ""):
s3 = boto3.client(
"s3",
aws_access_key_id=env.str("ARTIFACTS_AWS_ACCESS_KEY_ID"),
aws_secret_access_key=env.str("ARTIFACTS_AWS_SECRET_ACCESS_KEY"),
aws_session_token=env.str("ARTIFACTS_AWS_SESSION_TOKEN"),
region_name=env.str("ARTIFACTS_AWS_DEFAULT_REGION"),
)
else:
s3 = boto3.client("s3")
s3_key = f"{tenant_id}/{scan_id}/{os.path.basename(zip_path)}"
try:
s3.upload_file(
Filename=zip_path,
Bucket=env.str("ARTIFACTS_AWS_S3_OUTPUT_BUCKET"),
Key=s3_key,
)
return f"s3://{env.str("ARTIFACTS_AWS_S3_OUTPUT_BUCKET")}/{s3_key}"
except ClientError as e:
logger.error(f"S3 upload failed: {str(e)}")
raise e
def _create_finding_delta(
last_status: FindingStatus | None | str, new_status: FindingStatus | None
) -> Finding.DeltaChoices | None:
) -> Finding.DeltaChoices:
"""
Determine the delta status of a finding based on its previous and current status.
@@ -160,11 +53,7 @@ def _create_finding_delta(
def _store_resources(
finding: ProwlerFinding,
tenant_id: str,
provider_instance: Provider,
resource_cache: dict,
tag_cache: dict,
finding: ProwlerFinding, tenant_id: str, provider_instance: Provider
) -> tuple[Resource, tuple[str, str]]:
"""
Store resource information from a finding, including tags, in the database.
@@ -176,91 +65,40 @@ def _store_resources(
Returns:
tuple:
- Resource: The resource instance created or updated from the database.
- Resource: The resource instance created or retrieved from the database.
- tuple[str, str]: A tuple containing the resource UID and region.
"""
resource_uid = finding.resource_uid
with tenant_transaction(tenant_id):
resource_instance, created = Resource.objects.get_or_create(
tenant_id=tenant_id,
provider=provider_instance,
uid=finding.resource_uid,
defaults={
"region": finding.region,
"service": finding.service_name,
"type": finding.resource_type,
},
)
# Check cache or create/update resource
if resource_uid in resource_cache:
resource_instance = resource_cache[resource_uid]
update_fields = []
for field, value in [
("region", finding.region),
("service", finding.service_name),
("type", finding.resource_type),
("name", finding.resource_name),
]:
if getattr(resource_instance, field) != value:
setattr(resource_instance, field, value)
update_fields.append(field)
if update_fields:
with rls_transaction(tenant_id):
resource_instance.save(update_fields=update_fields)
else:
with rls_transaction(tenant_id):
resource_instance, _ = Resource.objects.update_or_create(
tenant_id=tenant_id,
provider=provider_instance,
uid=resource_uid,
defaults={
"region": finding.region,
"service": finding.service_name,
"type": finding.resource_type,
"name": finding.resource_name,
},
)
resource_cache[resource_uid] = resource_instance
# Process tags with caching
tags = []
for key, value in finding.resource_tags.items():
tag_key = (key, value)
if tag_key not in tag_cache:
with rls_transaction(tenant_id):
tag_instance, _ = ResourceTag.objects.get_or_create(
tenant_id=tenant_id, key=key, value=value
)
tag_cache[tag_key] = tag_instance
tags.append(tag_cache[tag_key])
with rls_transaction(tenant_id):
if not created:
resource_instance.region = finding.region
resource_instance.service = finding.service_name
resource_instance.type = finding.resource_type
resource_instance.save()
with tenant_transaction(tenant_id):
tags = [
ResourceTag.objects.get_or_create(
tenant_id=tenant_id, key=key, value=value
)[0]
for key, value in finding.resource_tags.items()
]
resource_instance.upsert_or_delete_tags(tags=tags)
return resource_instance, (resource_instance.uid, resource_instance.region)
def _generate_output_directory(
prowler_provider: object, tenant_id: str, scan_id: str
) -> str:
"""
Generate a dynamic output directory path based on the given provider type.
Args:
prowler_provider (object): An object that has a `type` attribute indicating
the provider type (e.g., "aws", "azure", etc.).
tenant_id (str): A unique identifier for the tenant. Used to build the output path.
scan_id (str): A unique identifier for the scan. Included in the output path.
Returns:
str: The complete path to the output directory, including the tenant ID, scan ID,
provider identity, and a timestamp.
"""
provider_type = prowler_provider.type
get_identity = PROVIDER_IDENTITY_MAP.get(provider_type, lambda _: "unknown")
return (
f"{tmp_output_directory}/{tenant_id}/{scan_id}/prowler-output-"
f"{get_identity(prowler_provider)}-{output_file_timestamp}"
)
def perform_prowler_scan(
tenant_id: str,
scan_id: str,
provider_id: str,
checks_to_execute: list[str] = None,
tenant_id: str, scan_id: str, provider_id: str, checks_to_execute: list[str] = None
):
"""
Perform a scan using Prowler and store the findings and resources in the database.
@@ -278,15 +116,13 @@ def perform_prowler_scan(
ValueError: If the provider cannot be connected.
"""
generate_compliance = False
check_status_by_region = {}
exception = None
unique_resources = set()
start_time = time.time()
resource_cache = {}
tag_cache = {}
last_status_cache = {}
with rls_transaction(tenant_id):
with tenant_transaction(tenant_id):
provider_instance = Provider.objects.get(pk=provider_id)
scan_instance = Scan.objects.get(pk=scan_id)
scan_instance.state = StateChoices.EXECUTING
@@ -294,8 +130,7 @@ def perform_prowler_scan(
scan_instance.save()
try:
# Provider initialization
with rls_transaction(tenant_id):
with tenant_transaction(tenant_id):
try:
prowler_provider = initialize_prowler_provider(provider_instance)
provider_instance.connected = True
@@ -310,66 +145,102 @@ def perform_prowler_scan(
)
provider_instance.save()
# Scan configuration
prowler_scan = ProwlerScan(
provider=prowler_provider, checks=checks_to_execute or []
)
output_directory = _generate_output_directory(
prowler_provider, tenant_id, scan_id
)
# Create the output directory
os.makedirs("/".join(output_directory.split("/")[:-1]), exist_ok=True)
generate_compliance = provider_instance.provider != Provider.ProviderChoices.GCP
prowler_scan = ProwlerScan(provider=prowler_provider, checks=checks_to_execute)
all_findings = []
for progress, findings, stats in prowler_scan.scan():
resource_cache = {}
tag_cache = {}
last_status_cache = {}
for progress, findings in prowler_scan.scan():
for finding in findings:
if finding is None:
logger.error(f"None finding detected on scan {scan_id}.")
continue
for attempt in range(CELERY_DEADLOCK_ATTEMPTS):
try:
resource_instance, resource_uid_region = _store_resources(
finding,
tenant_id,
provider_instance,
resource_cache,
tag_cache,
)
unique_resources.add(resource_uid_region)
break
with tenant_transaction(tenant_id):
# Process resource
resource_uid = finding.resource_uid
if resource_uid not in resource_cache:
# Get or create the resource
resource_instance, _ = Resource.objects.get_or_create(
tenant_id=tenant_id,
provider=provider_instance,
uid=resource_uid,
defaults={
"region": finding.region,
"service": finding.service_name,
"type": finding.resource_type,
"name": finding.resource_name,
},
)
resource_cache[resource_uid] = resource_instance
else:
resource_instance = resource_cache[resource_uid]
# Update resource fields if necessary
updated_fields = []
if resource_instance.region != finding.region:
resource_instance.region = finding.region
updated_fields.append("region")
if resource_instance.service != finding.service_name:
resource_instance.service = finding.service_name
updated_fields.append("service")
if resource_instance.type != finding.resource_type:
resource_instance.type = finding.resource_type
updated_fields.append("type")
if updated_fields:
with tenant_transaction(tenant_id):
resource_instance.save(update_fields=updated_fields)
except (OperationalError, IntegrityError) as db_err:
if attempt < CELERY_DEADLOCK_ATTEMPTS - 1:
logger.warning(
f"Database error ({type(db_err).__name__}) "
f"processing resource {finding.resource_uid}, retrying..."
f"{'Deadlock error' if isinstance(db_err, OperationalError) else 'Integrity error'} "
f"detected when processing resource {resource_uid} on scan {scan_id}. Retrying..."
)
time.sleep(0.1 * (2**attempt))
continue
else:
raise db_err
# Finding processing
with rls_transaction(tenant_id):
# Update tags
tags = []
with tenant_transaction(tenant_id):
for key, value in finding.resource_tags.items():
tag_key = (key, value)
if tag_key not in tag_cache:
tag_instance, _ = ResourceTag.objects.get_or_create(
tenant_id=tenant_id, key=key, value=value
)
tag_cache[tag_key] = tag_instance
else:
tag_instance = tag_cache[tag_key]
tags.append(tag_instance)
resource_instance.upsert_or_delete_tags(tags=tags)
unique_resources.add((resource_instance.uid, resource_instance.region))
# Process finding
with tenant_transaction(tenant_id):
finding_uid = finding.uid
if finding_uid not in last_status_cache:
most_recent = (
most_recent_finding = (
Finding.objects.filter(uid=finding_uid)
.order_by("-inserted_at")
.values("status", "first_seen_at")
.order_by("-id")
.values("status")
.first()
)
last_status, first_seen = (
(most_recent["status"], most_recent["first_seen_at"])
if most_recent
else (None, None)
last_status = (
most_recent_finding["status"]
if most_recent_finding
else None
)
last_status_cache[finding_uid] = (last_status, first_seen)
last_status_cache[finding_uid] = last_status
else:
last_status, first_seen = last_status_cache[finding_uid]
last_status = last_status_cache[finding_uid]
status = FindingStatus[finding.status]
delta = _create_finding_delta(last_status, status)
first_seen = first_seen or datetime.now(tz=timezone.utc)
# Create the finding
finding_instance = Finding.objects.create(
tenant_id=tenant_id,
uid=finding_uid,
@@ -382,96 +253,91 @@ def perform_prowler_scan(
raw_result=finding.raw,
check_id=finding.check_id,
scan=scan_instance,
first_seen_at=first_seen,
)
finding_instance.add_resources([resource_instance])
# Update compliance status
if finding.status.value != "MUTED":
region_data = check_status_by_region.setdefault(finding.region, {})
if region_data.get(finding.check_id) != "FAIL":
region_data[finding.check_id] = finding.status.value
# Update compliance data if applicable
if not generate_compliance or finding.status.value == "MUTED":
continue
# Progress updates and output generation
with rls_transaction(tenant_id):
region_dict = check_status_by_region.setdefault(finding.region, {})
current_status = region_dict.get(finding.check_id)
if current_status == "FAIL":
continue
region_dict[finding.check_id] = finding.status.value
# Update scan progress
with tenant_transaction(tenant_id):
scan_instance.progress = progress
scan_instance.save()
all_findings.extend(findings)
# Generate output files
for mode, config in OUTPUT_FORMATS_MAPPING.items():
kwargs = dict(config["kwargs"])
if mode == "html":
kwargs["provider"] = prowler_provider
kwargs["stats"] = stats
config["class"](
findings=all_findings,
create_file_descriptor=True,
file_path=output_directory,
file_extension=config["suffix"],
).batch_write_data_to_file(**kwargs)
scan_instance.state = StateChoices.COMPLETED
# Compress output files
zip_path = _compress_output_files(output_directory)
# Save to configured storage
_upload_to_s3(tenant_id, zip_path, scan_id)
except Exception as e:
logger.error(f"Scan {scan_id} failed: {str(e)}")
logger.error(f"Error performing scan {scan_id}: {e}")
exception = e
scan_instance.state = StateChoices.FAILED
finally:
# Final scan updates
with rls_transaction(tenant_id):
with tenant_transaction(tenant_id):
scan_instance.duration = time.time() - start_time
scan_instance.completed_at = datetime.now(tz=timezone.utc)
scan_instance.unique_resource_count = len(unique_resources)
scan_instance.save()
# Compliance processing
if not exception:
if exception is None and generate_compliance:
try:
regions = prowler_provider.get_regions()
except AttributeError:
regions = set()
compliance_template = PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE[
provider_instance.provider
]
compliance_overview = {
region: deepcopy(compliance_template)
for region in getattr(prowler_provider, "get_regions", lambda: set())()
compliance_overview_by_region = {
region: deepcopy(compliance_template) for region in regions
}
for region, checks in check_status_by_region.items():
for check_id, status in checks.items():
for region, check_status in check_status_by_region.items():
compliance_data = compliance_overview_by_region.setdefault(
region, deepcopy(compliance_template)
)
for check_name, status in check_status.items():
generate_scan_compliance(
compliance_overview.setdefault(
region, deepcopy(compliance_template)
),
compliance_data,
provider_instance.provider,
check_id,
check_name,
status,
)
ComplianceOverview.objects.bulk_create(
[
ComplianceOverview(
tenant_id=tenant_id,
scan=scan_instance,
region=region,
compliance_id=compliance_id,
**compliance_data,
# Prepare compliance overview objects
compliance_overview_objects = []
for region, compliance_data in compliance_overview_by_region.items():
for compliance_id, compliance in compliance_data.items():
compliance_overview_objects.append(
ComplianceOverview(
tenant_id=tenant_id,
scan=scan_instance,
region=region,
compliance_id=compliance_id,
framework=compliance["framework"],
version=compliance["version"],
description=compliance["description"],
requirements=compliance["requirements"],
requirements_passed=compliance["requirements_status"]["passed"],
requirements_failed=compliance["requirements_status"]["failed"],
requirements_manual=compliance["requirements_status"]["manual"],
total_requirements=compliance["total_requirements"],
)
)
for region, data in compliance_overview.items()
for compliance_id, compliance_data in data.items()
]
)
with tenant_transaction(tenant_id):
ComplianceOverview.objects.bulk_create(compliance_overview_objects)
if exception:
if exception is not None:
raise exception
return ScanTaskSerializer(instance=scan_instance).data
serializer = ScanTaskSerializer(instance=scan_instance)
return serializer.data
def aggregate_findings(tenant_id: str, scan_id: str):
@@ -502,8 +368,8 @@ def aggregate_findings(tenant_id: str, scan_id: str):
- muted_new: Muted findings with a delta of 'new'.
- muted_changed: Muted findings with a delta of 'changed'.
"""
with rls_transaction(tenant_id):
findings = Finding.objects.filter(tenant_id=tenant_id, scan_id=scan_id)
with tenant_transaction(tenant_id):
findings = Finding.objects.filter(scan_id=scan_id)
aggregation = findings.values(
"check_id",
@@ -598,28 +464,29 @@ def aggregate_findings(tenant_id: str, scan_id: str):
),
)
ScanSummary.objects.bulk_create(
[
ScanSummary(
tenant_id=tenant_id,
scan_id=scan_id,
check_id=agg["check_id"],
service=agg["resources__service"],
severity=agg["severity"],
region=agg["resources__region"],
**{
k: v or 0
for k, v in agg.items()
if k
not in {
"check_id",
"resources__service",
"severity",
"resources__region",
}
},
)
for agg in aggregation
],
batch_size=3000,
)
with tenant_transaction(tenant_id):
scan_aggregations = {
ScanSummary(
tenant_id=tenant_id,
scan_id=scan_id,
check_id=agg["check_id"],
service=agg["resources__service"],
severity=agg["severity"],
region=agg["resources__region"],
fail=agg["fail"],
_pass=agg["_pass"],
muted=agg["muted"],
total=agg["total"],
new=agg["new"],
changed=agg["changed"],
unchanged=agg["unchanged"],
fail_new=agg["fail_new"],
fail_changed=agg["fail_changed"],
pass_new=agg["pass_new"],
pass_changed=agg["pass_changed"],
muted_new=agg["muted_new"],
muted_changed=agg["muted_changed"],
)
for agg in aggregation
}
ScanSummary.objects.bulk_create(scan_aggregations, batch_size=3000)

View File

@@ -1,14 +1,15 @@
from datetime import datetime, timedelta, timezone
from celery import shared_task
from config.celery import RLSTask
from django_celery_beat.models import PeriodicTask
from tasks.jobs.connection import check_provider_connection
from tasks.jobs.deletion import delete_provider, delete_tenant
from tasks.jobs.scan import aggregate_findings, perform_prowler_scan
from tasks.utils import get_next_execution_datetime
from api.db_utils import rls_transaction
from api.db_utils import tenant_transaction
from api.decorators import set_tenant
from api.models import Scan, StateChoices
from api.models import Provider, Scan
@shared_task(base=RLSTask, name="provider-connection-check")
@@ -98,43 +99,29 @@ def perform_scheduled_scan_task(self, tenant_id: str, provider_id: str):
"""
task_id = self.request.id
with rls_transaction(tenant_id):
with tenant_transaction(tenant_id):
provider_instance = Provider.objects.get(pk=provider_id)
periodic_task_instance = PeriodicTask.objects.get(
name=f"scan-perform-scheduled-{provider_id}"
)
next_scan_datetime = get_next_execution_datetime(task_id, provider_id)
scan_instance, _ = Scan.objects.get_or_create(
next_scan_date = datetime.combine(
datetime.now(timezone.utc), periodic_task_instance.start_time.time()
) + timedelta(hours=24)
scan_instance = Scan.objects.create(
tenant_id=tenant_id,
provider_id=provider_id,
name="Daily scheduled scan",
provider=provider_instance,
trigger=Scan.TriggerChoices.SCHEDULED,
state__in=(StateChoices.SCHEDULED, StateChoices.AVAILABLE),
scheduler_task_id=periodic_task_instance.id,
defaults={"state": StateChoices.SCHEDULED},
next_scan_at=next_scan_date,
task_id=task_id,
)
scan_instance.task_id = task_id
scan_instance.save()
try:
result = perform_prowler_scan(
tenant_id=tenant_id,
scan_id=str(scan_instance.id),
provider_id=provider_id,
)
except Exception as e:
raise e
finally:
with rls_transaction(tenant_id):
Scan.objects.get_or_create(
tenant_id=tenant_id,
name="Daily scheduled scan",
provider_id=provider_id,
trigger=Scan.TriggerChoices.SCHEDULED,
state=StateChoices.SCHEDULED,
scheduled_at=next_scan_datetime,
scheduler_task_id=periodic_task_instance.id,
)
result = perform_prowler_scan(
tenant_id=tenant_id,
scan_id=str(scan_instance.id),
provider_id=provider_id,
)
perform_scan_summary_task.apply_async(
kwargs={
"tenant_id": tenant_id,

View File

@@ -6,8 +6,6 @@ from django_celery_beat.models import IntervalSchedule, PeriodicTask
from rest_framework_json_api.serializers import ValidationError
from tasks.beat import schedule_provider_scan
from api.models import Scan
@pytest.mark.django_db
class TestScheduleProviderScan:
@@ -17,11 +15,9 @@ class TestScheduleProviderScan:
with patch(
"tasks.tasks.perform_scheduled_scan_task.apply_async"
) as mock_apply_async:
assert Scan.all_objects.count() == 0
result = schedule_provider_scan(provider_instance)
assert result is not None
assert Scan.all_objects.count() == 1
mock_apply_async.assert_called_once_with(
kwargs={

View File

@@ -1,3 +1,5 @@
from unittest.mock import patch
import pytest
from django.core.exceptions import ObjectDoesNotExist
from tasks.jobs.deletion import delete_provider, delete_tenant
@@ -22,6 +24,7 @@ class TestDeleteProvider:
delete_provider(non_existent_pk)
@patch("api.db_router.MainRouter.admin_db", new="default")
@pytest.mark.django_db
class TestDeleteTenant:
def test_delete_tenant_success(self, tenants_fixture, providers_fixture):

View File

@@ -1,4 +1,3 @@
import uuid
from unittest.mock import MagicMock, patch
import pytest
@@ -27,7 +26,7 @@ class TestPerformScan:
providers_fixture,
):
with (
patch("api.db_utils.rls_transaction"),
patch("api.db_utils.tenant_transaction"),
patch(
"tasks.jobs.scan.initialize_prowler_provider"
) as mock_initialize_prowler_provider,
@@ -166,10 +165,10 @@ class TestPerformScan:
"tasks.jobs.scan.initialize_prowler_provider",
side_effect=Exception("Connection error"),
)
@patch("api.db_utils.rls_transaction")
@patch("api.db_utils.tenant_transaction")
def test_perform_prowler_scan_no_connection(
self,
mock_rls_transaction,
mock_tenant_transaction,
mock_initialize_prowler_provider,
mock_prowler_scan_class,
tenants_fixture,
@@ -206,14 +205,14 @@ class TestPerformScan:
@patch("api.models.ResourceTag.objects.get_or_create")
@patch("api.models.Resource.objects.get_or_create")
@patch("api.db_utils.rls_transaction")
@patch("api.db_utils.tenant_transaction")
def test_store_resources_new_resource(
self,
mock_rls_transaction,
mock_tenant_transaction,
mock_get_or_create_resource,
mock_get_or_create_tag,
):
tenant_id = uuid.uuid4()
tenant_id = "tenant123"
provider_instance = MagicMock()
provider_instance.id = "provider456"
@@ -254,14 +253,14 @@ class TestPerformScan:
@patch("api.models.ResourceTag.objects.get_or_create")
@patch("api.models.Resource.objects.get_or_create")
@patch("api.db_utils.rls_transaction")
@patch("api.db_utils.tenant_transaction")
def test_store_resources_existing_resource(
self,
mock_rls_transaction,
mock_tenant_transaction,
mock_get_or_create_resource,
mock_get_or_create_tag,
):
tenant_id = uuid.uuid4()
tenant_id = "tenant123"
provider_instance = MagicMock()
provider_instance.id = "provider456"
@@ -311,14 +310,14 @@ class TestPerformScan:
@patch("api.models.ResourceTag.objects.get_or_create")
@patch("api.models.Resource.objects.get_or_create")
@patch("api.db_utils.rls_transaction")
@patch("api.db_utils.tenant_transaction")
def test_store_resources_with_tags(
self,
mock_rls_transaction,
mock_tenant_transaction,
mock_get_or_create_resource,
mock_get_or_create_tag,
):
tenant_id = uuid.uuid4()
tenant_id = "tenant123"
provider_instance = MagicMock()
provider_instance.id = "provider456"

View File

@@ -1,76 +0,0 @@
from datetime import datetime, timedelta, timezone
from unittest.mock import patch
import pytest
from django_celery_beat.models import IntervalSchedule, PeriodicTask
from django_celery_results.models import TaskResult
from tasks.utils import get_next_execution_datetime
@pytest.mark.django_db
class TestGetNextExecutionDatetime:
@pytest.fixture
def setup_periodic_task(self, db):
# Create a periodic task with an hourly interval
interval = IntervalSchedule.objects.create(
every=1, period=IntervalSchedule.HOURS
)
periodic_task = PeriodicTask.objects.create(
name="scan-perform-scheduled-123",
task="scan-perform-scheduled",
interval=interval,
)
return periodic_task
@pytest.fixture
def setup_task_result(self, db):
# Create a task result record
task_result = TaskResult.objects.create(
task_id="abc123",
task_name="scan-perform-scheduled",
status="SUCCESS",
date_created=datetime.now(timezone.utc) - timedelta(hours=1),
result="Success",
)
return task_result
def test_get_next_execution_datetime_success(
self, setup_task_result, setup_periodic_task
):
task_result = setup_task_result
periodic_task = setup_periodic_task
# Mock periodic_task_name on TaskResult
with patch.object(
TaskResult, "periodic_task_name", return_value=periodic_task.name
):
next_execution = get_next_execution_datetime(
task_id=task_result.task_id, provider_id="123"
)
expected_time = task_result.date_created + timedelta(hours=1)
assert next_execution == expected_time
def test_get_next_execution_datetime_fallback_to_provider_id(
self, setup_task_result, setup_periodic_task
):
task_result = setup_task_result
# Simulate the case where `periodic_task_name` is missing
with patch.object(TaskResult, "periodic_task_name", return_value=None):
next_execution = get_next_execution_datetime(
task_id=task_result.task_id, provider_id="123"
)
expected_time = task_result.date_created + timedelta(hours=1)
assert next_execution == expected_time
def test_get_next_execution_datetime_periodic_task_does_not_exist(
self, setup_task_result
):
task_result = setup_task_result
with pytest.raises(PeriodicTask.DoesNotExist):
get_next_execution_datetime(
task_id=task_result.task_id, provider_id="nonexistent"
)

View File

@@ -1,26 +0,0 @@
from datetime import datetime, timedelta, timezone
from django_celery_beat.models import PeriodicTask
from django_celery_results.models import TaskResult
def get_next_execution_datetime(task_id: int, provider_id: str) -> datetime:
task_instance = TaskResult.objects.get(task_id=task_id)
try:
periodic_task_instance = PeriodicTask.objects.get(
name=task_instance.periodic_task_name
)
except PeriodicTask.DoesNotExist:
periodic_task_instance = PeriodicTask.objects.get(
name=f"scan-perform-scheduled-{provider_id}"
)
interval = periodic_task_instance.interval
current_scheduled_time = datetime.combine(
datetime.now(timezone.utc).date(),
task_instance.date_created.time(),
tzinfo=timezone.utc,
)
return current_scheduled_time + timedelta(**{interval.period: interval.every})

View File

@@ -1,301 +0,0 @@
# AWS SSO to Prowler Automation Script
## Table of Contents
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Setup](#setup)
- [Script Overview](#script-overview)
- [Usage](#usage)
- [Troubleshooting](#troubleshooting)
- [Customization](#customization)
- [Security Considerations](#security-considerations)
- [License](#license)
## Introduction
This repository provides a Bash script that automates the process of logging into AWS Single Sign-On (SSO), extracting temporary AWS credentials, and running **Prowler**—a security tool that performs AWS security best practices assessments—inside a Docker container using those credentials.
By following this guide, you can streamline your AWS security assessments, ensuring that you consistently apply best practices across your AWS accounts.
## Prerequisites
Before you begin, ensure that you have the following tools installed and properly configured on your system:
1. **AWS CLI v2**
- AWS SSO support is available from AWS CLI version 2 onwards.
- [Installation Guide](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html)
2. **jq**
- A lightweight and flexible command-line JSON processor.
- **macOS (Homebrew):**
```bash
brew install jq
```
- **Ubuntu/Debian:**
```bash
sudo apt-get update
sudo apt-get install -y jq
```
- **Windows:**
- [Download jq](https://stedolan.github.io/jq/download/)
3. **Docker**
- Ensure Docker is installed and running on your system.
- [Docker Installation Guide](https://docs.docker.com/get-docker/)
4. **AWS SSO Profile Configuration**
- Ensure that you have configured an AWS CLI profile with SSO.
- [Configuring AWS CLI with SSO](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html)
## Setup
1. **Clone the Repository**
```bash
git clone https://github.com/your-username/aws-sso-prowler-automation.git
cd aws-sso-prowler-automation
```
2. **Create the Automation Script**
Create a new Bash script named `run_prowler_sso.sh` and make it executable.
```bash
nano run_prowler_sso.sh
chmod +x run_prowler_sso.sh
```
3. **Add the Script Content**
Paste the following content into `run_prowler_sso.sh`:
4. **Configure AWS SSO Profile**
Ensure that your AWS CLI profile (`twodragon` in this case) is correctly configured for SSO.
```bash
aws configure sso --profile twodragon
```
**Example Configuration Prompts:**
```
SSO session name (Recommended): [twodragon]
SSO start URL [None]: https://twodragon.awsapps.com/start
SSO region [None]: ap-northeast-2
SSO account ID [None]: 123456789012
SSO role name [None]: ReadOnlyAccess
CLI default client region [None]: ap-northeast-2
CLI default output format [None]: json
CLI profile name [twodragon]: twodragon
```
## Script Overview
The `run_prowler_sso.sh` script performs the following actions:
1. **AWS SSO Login:**
- Initiates AWS SSO login for the specified profile.
- Opens the SSO authorization page in the default browser for user authentication.
2. **Extract Temporary Credentials:**
- Locates the most recent SSO cache file containing the `accessToken`.
- Uses `jq` to parse and extract the `accessToken` from the cache file.
- Retrieves the `sso_role_name` and `sso_account_id` from the AWS CLI configuration.
- Obtains temporary AWS credentials (`AccessKeyId`, `SecretAccessKey`, `SessionToken`) using the extracted `accessToken`.
3. **Set Environment Variables:**
- Exports the extracted AWS credentials as environment variables to be used by the Docker container.
4. **Run Prowler:**
- Executes the **Prowler** Docker container, passing the AWS credentials as environment variables for security assessments.
## Usage
1. **Make the Script Executable**
Ensure the script has execute permissions.
```bash
chmod +x run_prowler_sso.sh
```
2. **Run the Script**
Execute the script to start the AWS SSO login process and run Prowler.
```bash
./run_prowler_sso.sh
```
3. **Follow the Prompts**
- A browser window will open prompting you to authenticate via AWS SSO.
- Complete the authentication process in the browser.
- Upon successful login, the script will extract temporary credentials and run Prowler.
4. **Review Prowler Output**
- Prowler will analyze your AWS environment based on the specified checks and output the results directly in the terminal.
## Troubleshooting
If you encounter issues during the script execution, follow these steps to diagnose and resolve them.
### 1. Verify AWS CLI Version
Ensure you are using AWS CLI version 2 or later.
```bash
aws --version
```
**Expected Output:**
```
aws-cli/2.11.10 Python/3.9.12 Darwin/20.3.0 exe/x86_64 prompt/off
```
If you are not using version 2, [install or update AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html).
### 2. Confirm AWS SSO Profile Configuration
Check that the `twodragon` profile is correctly configured.
```bash
aws configure list-profiles
```
**Expected Output:**
```
default
twodragon
```
Review the profile details:
```bash
aws configure get sso_start_url --profile twodragon
aws configure get sso_region --profile twodragon
aws configure get sso_account_id --profile twodragon
aws configure get sso_role_name --profile twodragon
```
Ensure all fields return the correct values.
### 3. Check SSO Cache File
Ensure that the SSO cache file contains a valid `accessToken`.
```bash
cat ~/.aws/sso/cache/*.json
```
**Example Content:**
```json
{
"accessToken": "eyJz93a...k4laUWw",
"expiresAt": "2024-12-22T14:07:55Z",
"clientId": "example-client-id",
"clientSecret": "example-client-secret",
"startUrl": "https://twodragon.awsapps.com/start#"
}
```
If `accessToken` is `null` or missing, retry the AWS SSO login:
```bash
aws sso login --profile twodragon
```
### 4. Validate `jq` Installation
Ensure that `jq` is installed and functioning correctly.
```bash
jq --version
```
**Expected Output:**
```
jq-1.6
```
If `jq` is not installed, install it using the instructions in the [Prerequisites](#prerequisites) section.
### 5. Test Docker Environment Variables
Verify that the Docker container receives the AWS credentials correctly.
```bash
docker run --platform linux/amd64 \
-e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \
-e AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN \
toniblyx/prowler /bin/bash -c 'echo $AWS_ACCESS_KEY_ID; echo $AWS_SECRET_ACCESS_KEY; echo $AWS_SESSION_TOKEN'
```
**Expected Output:**
```
ASIA...
wJalrFEMI/K7MDENG/bPxRfiCY...
IQoJb3JpZ2luX2VjEHwaCXVz...
```
Ensure that none of the environment variables are empty.
### 6. Review Script Output
Run the script with debugging enabled to get detailed output.
1. **Enable Debugging in Script**
Add `set -x` for verbose output.
```bash
#!/bin/bash
set -e
set -x
# ... rest of the script ...
```
2. **Run the Script**
```bash
./run_prowler_sso.sh
```
3. **Analyze Output**
Look for any errors or unexpected values in the output to identify where the script is failing.
## Customization
You can modify the script to suit your specific needs, such as:
- **Changing the AWS Profile Name:**
Update the `PROFILE` variable at the top of the script.
```bash
PROFILE="your-profile-name"
```
- **Adding Prowler Options:**
Pass additional options to Prowler for customized checks or output formats.
```bash
docker run --platform linux/amd64 \
-e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \
-e AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN \
toniblyx/prowler -c check123 -M json
```
## Security Considerations
- **Handle Credentials Securely:**
- Avoid sharing or exposing your AWS credentials.
- Do not include sensitive information in logs or version control.
- **Script Permissions:**
- Ensure the script file has appropriate permissions to prevent unauthorized access.
```bash
chmod 700 run_prowler_sso.sh
```
- **Environment Variables:**
- Be cautious when exporting credentials as environment variables.
- Consider using more secure methods for credential management if necessary.
## License
This project is licensed under the [MIT License](LICENSE).

View File

@@ -1,136 +0,0 @@
#!/bin/bash
set -e
# Set the profile name
PROFILE="twodragon"
# Set the Prowler output directory
OUTPUT_DIR=~/prowler-output
mkdir -p "$OUTPUT_DIR"
# Set the port for the local web server
WEB_SERVER_PORT=8000
# ----------------------------------------------
# Functions
# ----------------------------------------------
# Function to open the HTML report in the default browser
open_report() {
local report_path="$1"
if [[ "$OSTYPE" == "darwin"* ]]; then
open "$report_path"
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
xdg-open "$report_path"
elif [[ "$OSTYPE" == "msys" ]]; then
start "" "$report_path"
else
echo "Automatic method to open Prowler HTML report is not supported on this OS."
echo "Please open the report manually at: $report_path"
fi
}
# Function to start a simple HTTP server to host the Prowler reports
start_web_server() {
local directory="$1"
local port="$2"
echo "Starting local web server to host Prowler reports at http://localhost:$port"
echo "Press Ctrl+C to stop the web server."
# Change to the output directory
cd "$directory"
# Start the HTTP server in the foreground
# Python 3 is required
python3 -m http.server "$port"
}
# ----------------------------------------------
# Main Script
# ----------------------------------------------
# AWS SSO Login
echo "Logging into AWS SSO..."
aws sso login --profile "$PROFILE"
# Extract temporary credentials
echo "Extracting temporary credentials..."
# Find the most recently modified SSO cache file
CACHE_FILE=$(ls -t ~/.aws/sso/cache/*.json 2>/dev/null | head -n 1)
echo "Cache File: $CACHE_FILE"
if [ -z "$CACHE_FILE" ]; then
echo "SSO cache file not found. Please ensure AWS SSO login was successful."
exit 1
fi
# Extract accessToken using jq
ACCESS_TOKEN=$(jq -r '.accessToken' "$CACHE_FILE")
echo "Access Token: $ACCESS_TOKEN"
if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" == "null" ]; then
echo "Unable to extract accessToken. Please check your SSO login and cache file."
exit 1
fi
# Extract role name and account ID from AWS CLI configuration
ROLE_NAME=$(aws configure get sso_role_name --profile "$PROFILE")
ACCOUNT_ID=$(aws configure get sso_account_id --profile "$PROFILE")
echo "Role Name: $ROLE_NAME"
echo "Account ID: $ACCOUNT_ID"
if [ -z "$ROLE_NAME" ] || [ -z "$ACCOUNT_ID" ]; then
echo "Unable to extract sso_role_name or sso_account_id. Please check your profile configuration."
exit 1
fi
# Obtain temporary credentials using AWS SSO
TEMP_CREDS=$(aws sso get-role-credentials \
--role-name "$ROLE_NAME" \
--account-id "$ACCOUNT_ID" \
--access-token "$ACCESS_TOKEN" \
--profile "$PROFILE")
echo "TEMP_CREDS: $TEMP_CREDS"
# Extract credentials from the JSON response
AWS_ACCESS_KEY_ID=$(echo "$TEMP_CREDS" | jq -r '.roleCredentials.accessKeyId')
AWS_SECRET_ACCESS_KEY=$(echo "$TEMP_CREDS" | jq -r '.roleCredentials.secretAccessKey')
AWS_SESSION_TOKEN=$(echo "$TEMP_CREDS" | jq -r '.roleCredentials.sessionToken')
# Verify that all credentials were extracted successfully
if [ -z "$AWS_ACCESS_KEY_ID" ] || [ -z "$AWS_SECRET_ACCESS_KEY" ] || [ -z "$AWS_SESSION_TOKEN" ]; then
echo "Unable to extract temporary credentials."
exit 1
fi
# Export AWS credentials as environment variables
export AWS_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY
export AWS_SESSION_TOKEN
echo "AWS credentials have been set."
# Run Prowler in Docker container
echo "Running Prowler Docker container..."
docker run --platform linux/amd64 \
-e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \
-e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \
-e AWS_SESSION_TOKEN="$AWS_SESSION_TOKEN" \
-v "$OUTPUT_DIR":/home/prowler/output \
toniblyx/prowler -M html -M csv -M json-ocsf --output-directory /home/prowler/output --output-filename prowler-output
echo "Prowler has finished running. Reports are saved in $OUTPUT_DIR."
# Open the HTML report in the default browser
REPORT_PATH="$OUTPUT_DIR/prowler-output.html"
echo "Opening Prowler HTML report..."
open_report "$REPORT_PATH" &
# Start the local web server to host the Prowler dashboard
# This will run in the foreground. To run it in the background, append an ampersand (&) at the end of the command.
start_web_server "$OUTPUT_DIR" "$WEB_SERVER_PORT"

View File

@@ -1,24 +0,0 @@
apiVersion: v2
name: prowler-api
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "5.1.1"

View File

@@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prowler-api.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prowler-api.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prowler-api.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prowler-api.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "prowler-api.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "prowler-api.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "prowler-api.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "prowler-api.labels" -}}
helm.sh/chart: {{ include "prowler-api.chart" . }}
{{ include "prowler-api.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "prowler-api.selectorLabels" -}}
app.kubernetes.io/name: {{ include "prowler-api.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "prowler-api.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "prowler-api.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "prowler-api.fullname" . }}-config
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
data:
config.yaml: |-
{{- toYaml .Values.mainConfig | nindent 4 }}

View File

@@ -1,85 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prowler-api.fullname" . }}
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "prowler-api.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prowler-api.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "prowler-api.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
{{- range $name,$config := .Values.containers }}
{{- if $config.enabled }}
- name: {{ $name }}
securityContext:
{{- toYaml $config.securityContext | nindent 12 }}
image: "{{ $config.image.repository }}:{{ $config.image.tag | default $.Chart.AppVersion }}"
imagePullPolicy: {{ $config.image.pullPolicy }}
envFrom:
- secretRef:
name: {{ include "prowler-api.fullname" $ }}
command:
{{- toYaml $config.command | nindent 12 }}
{{- if $config.ports }}
ports:
{{- toYaml $config.ports | nindent 12 }}
{{- end }}
livenessProbe:
{{- toYaml $config.livenessProbe | nindent 12 }}
readinessProbe:
{{- toYaml $config.readinessProbe | nindent 12 }}
resources:
{{- toYaml $config.resources | nindent 12 }}
volumeMounts:
- name: {{ include "prowler-api.fullname" $ }}-config
mountPath: {{ $.Values.releaseConfigRoot }}{{ $.Values.releaseConfigPath }}
subPath: config.yaml
{{- with .volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- end }}
volumes:
- name: {{ include "prowler-api.fullname" . }}-config
configMap:
name: {{ include "prowler-api.fullname" . }}-config
{{- with .Values.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,43 +0,0 @@
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "prowler-api.fullname" . }}
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.ingress.className }}
ingressClassName: {{ . }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- with .pathType }}
pathType: {{ . }}
{{- end }}
backend:
service:
name: {{ include "prowler-api.fullname" $ }}
port:
number: {{ $.Values.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "prowler-api.fullname" . }}
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
type: Opaque
data:
{{- range $k, $v := .Values.secrets }}
{{ $k }}: {{ $v | toString | b64enc | quote }}
{{- end }}

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "prowler-api.fullname" . }}
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
{{- range $name,$config := .Values.containers }}
{{- if $config.ports }}
{{- range $p := $config.ports }}
- port: {{ $p.containerPort }}
targetPort: {{ $p.containerPort }}
protocol: TCP
name: {{ $config.name }}
{{- end }}
{{- end }}
{{- end }}
selector:
{{- include "prowler-api.selectorLabels" . | nindent 4 }}

View File

@@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prowler-api.serviceAccountName" . }}
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@@ -1,625 +0,0 @@
# Default values for prowler-api.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
containers:
prowler-api:
enabled: true
image:
repository: prowlercloud/prowler-api
pullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
protocol: TCP
command: ["/home/prowler/docker-entrypoint.sh", "prod"]
worker:
enabled: true
image:
repository: prowlercloud/prowler-api
pullPolicy: IfNotPresent
command: ["/home/prowler/docker-entrypoint.sh", "worker"]
worker-beat:
enabled: true
image:
repository: prowlercloud/prowler-api
pullPolicy: IfNotPresent
command: ["../docker-entrypoint.sh", "beat"]
secrets:
POSTGRES_HOST:
POSTGRES_PORT: 5432
POSTGRES_ADMIN_USER:
POSTGRES_ADMIN_PASSWORD:
POSTGRES_USER:
POSTGRES_PASSWORD:
POSTGRES_DB:
# Valkey settings
VALKEY_HOST: valkey-headless
VALKEY_PORT: "6379"
VALKEY_DB: "0"
# Django settings
DJANGO_ALLOWED_HOSTS: localhost,127.0.0.1,prowler-api
DJANGO_BIND_ADDRESS: 0.0.0.0
DJANGO_PORT: "8080"
DJANGO_DEBUG: False
DJANGO_SETTINGS_MODULE: config.django.production
# Select one of [ndjson|human_readable]
DJANGO_LOGGING_FORMATTER: human_readable
# Select one of [DEBUG|INFO|WARNING|ERROR|CRITICAL]
# Applies to both Django and Celery Workers
DJANGO_LOGGING_LEVEL: INFO
# Defaults to the maximum available based on CPU cores if not set.
DJANGO_WORKERS: 2
# Token lifetime is in minutes
DJANGO_ACCESS_TOKEN_LIFETIME: "30"
# Token lifetime is in minutes
DJANGO_REFRESH_TOKEN_LIFETIME: "1440"
DJANGO_CACHE_MAX_AGE: "3600"
DJANGO_STALE_WHILE_REVALIDATE: "60"
DJANGO_MANAGE_DB_PARTITIONS: "False"
# openssl genrsa -out private.pem 2048
DJANGO_TOKEN_SIGNING_KEY:
# openssl rsa -in private.pem -pubout -out public.pem
DJANGO_TOKEN_VERIFYING_KEY:
# openssl rand -base64 32
DJANGO_SECRETS_ENCRYPTION_KEY:
DJANGO_BROKER_VISIBILITY_TIMEOUT: 86400
releaseConfigRoot: /home/prowler/.cache/pypoetry/virtualenvs/prowler-api-NnJNioq7-py3.12/lib/python3.12/site-packages/
releaseConfigPath: prowler/config/config.yaml
mainConfig:
# AWS Configuration
aws:
# AWS Global Configuration
# aws.mute_non_default_regions --> Set to True to muted failed findings in non-default regions for AccessAnalyzer, GuardDuty, SecurityHub, DRS and Config
mute_non_default_regions: False
# If you want to mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w mutelist.yaml`:
# Mutelist:
# Accounts:
# "*":
# Checks:
# "*":
# Regions:
# - "ap-southeast-1"
# - "ap-southeast-2"
# Resources:
# - "*"
# AWS IAM Configuration
# aws.iam_user_accesskey_unused --> CIS recommends 45 days
max_unused_access_keys_days: 45
# aws.iam_user_console_access_unused --> CIS recommends 45 days
max_console_access_days: 45
# AWS EC2 Configuration
# aws.ec2_elastic_ip_shodan
# TODO: create common config
shodan_api_key: null
# aws.ec2_securitygroup_with_many_ingress_egress_rules --> by default is 50 rules
max_security_group_rules: 50
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
max_ec2_instance_age_in_days: 180
# aws.ec2_securitygroup_allow_ingress_from_internet_to_any_port
# allowed network interface types for security groups open to the Internet
ec2_allowed_interface_types:
[
"api_gateway_managed",
"vpc_endpoint",
]
# allowed network interface owners for security groups open to the Internet
ec2_allowed_instance_owners:
[
"amazon-elb"
]
# aws.ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports
ec2_high_risk_ports:
[
25,
110,
135,
143,
445,
3000,
4333,
5000,
5500,
8080,
8088,
]
# AWS ECS Configuration
# aws.ecs_service_fargate_latest_platform_version
fargate_linux_latest_version: "1.4.0"
fargate_windows_latest_version: "1.0.0"
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
# AWS SSM Configuration (aws.ssm_documents_set_as_public)
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
# Multi account environment: Any additional trusted account number should be added as a space separated list, e.g.
# trusted_account_ids : ["123456789012", "098765432109", "678901234567"]
trusted_account_ids: []
# AWS Cloudwatch Configuration
# aws.cloudwatch_log_group_retention_policy_specific_days_enabled --> by default is 365 days
log_group_retention_days: 365
# AWS CloudFormation Configuration
# cloudformation_stack_cdktoolkit_bootstrap_version --> by default is 21
recommended_cdk_bootstrap_version: 21
# AWS AppStream Session Configuration
# aws.appstream_fleet_session_idle_disconnect_timeout
max_idle_disconnect_timeout_in_seconds: 600 # 10 Minutes
# aws.appstream_fleet_session_disconnect_timeout
max_disconnect_timeout_in_seconds: 300 # 5 Minutes
# aws.appstream_fleet_maximum_session_duration
max_session_duration_seconds: 36000 # 10 Hours
# AWS Lambda Configuration
# aws.awslambda_function_using_supported_runtimes
obsolete_lambda_runtimes:
[
"java8",
"go1.x",
"provided",
"python3.6",
"python2.7",
"python3.7",
"nodejs4.3",
"nodejs4.3-edge",
"nodejs6.10",
"nodejs",
"nodejs8.10",
"nodejs10.x",
"nodejs12.x",
"nodejs14.x",
"nodejs16.x",
"dotnet5.0",
"dotnet7",
"dotnetcore1.0",
"dotnetcore2.0",
"dotnetcore2.1",
"dotnetcore3.1",
"ruby2.5",
"ruby2.7",
]
# aws.awslambda_function_vpc_is_in_multi_azs
lambda_min_azs: 2
# AWS Organizations
# aws.organizations_scp_check_deny_regions
# aws.organizations_enabled_regions: [
# "eu-central-1",
# "eu-west-1",
# "us-east-1"
# ]
organizations_enabled_regions: []
organizations_trusted_delegated_administrators: []
# AWS ECR
# aws.ecr_repositories_scan_vulnerabilities_in_latest_image
# CRITICAL
# HIGH
# MEDIUM
ecr_repository_vulnerability_minimum_severity: "MEDIUM"
# AWS Trusted Advisor
# aws.trustedadvisor_premium_support_plan_subscribed
verify_premium_support_plans: True
# AWS CloudTrail Configuration
# aws.cloudtrail_threat_detection_privilege_escalation
threat_detection_privilege_escalation_threshold: 0.2 # Percentage of actions found to decide if it is an privilege_escalation attack event, by default is 0.2 (20%)
threat_detection_privilege_escalation_minutes: 1440 # Past minutes to search from now for privilege_escalation attacks, by default is 1440 minutes (24 hours)
threat_detection_privilege_escalation_actions:
[
"AddPermission",
"AddRoleToInstanceProfile",
"AddUserToGroup",
"AssociateAccessPolicy",
"AssumeRole",
"AttachGroupPolicy",
"AttachRolePolicy",
"AttachUserPolicy",
"ChangePassword",
"CreateAccessEntry",
"CreateAccessKey",
"CreateDevEndpoint",
"CreateEventSourceMapping",
"CreateFunction",
"CreateGroup",
"CreateJob",
"CreateKeyPair",
"CreateLoginProfile",
"CreatePipeline",
"CreatePolicyVersion",
"CreateRole",
"CreateStack",
"DeleteRolePermissionsBoundary",
"DeleteRolePolicy",
"DeleteUserPermissionsBoundary",
"DeleteUserPolicy",
"DetachRolePolicy",
"DetachUserPolicy",
"GetCredentialsForIdentity",
"GetId",
"GetPolicyVersion",
"GetUserPolicy",
"Invoke",
"ModifyInstanceAttribute",
"PassRole",
"PutGroupPolicy",
"PutPipelineDefinition",
"PutRolePermissionsBoundary",
"PutRolePolicy",
"PutUserPermissionsBoundary",
"PutUserPolicy",
"ReplaceIamInstanceProfileAssociation",
"RunInstances",
"SetDefaultPolicyVersion",
"UpdateAccessKey",
"UpdateAssumeRolePolicy",
"UpdateDevEndpoint",
"UpdateEventSourceMapping",
"UpdateFunctionCode",
"UpdateJob",
"UpdateLoginProfile",
]
# aws.cloudtrail_threat_detection_enumeration
threat_detection_enumeration_threshold: 0.3 # Percentage of actions found to decide if it is an enumeration attack event, by default is 0.3 (30%)
threat_detection_enumeration_minutes: 1440 # Past minutes to search from now for enumeration attacks, by default is 1440 minutes (24 hours)
threat_detection_enumeration_actions:
[
"DescribeAccessEntry",
"DescribeAccountAttributes",
"DescribeAvailabilityZones",
"DescribeBundleTasks",
"DescribeCarrierGateways",
"DescribeClientVpnRoutes",
"DescribeCluster",
"DescribeDhcpOptions",
"DescribeFlowLogs",
"DescribeImages",
"DescribeInstanceAttribute",
"DescribeInstanceInformation",
"DescribeInstanceTypes",
"DescribeInstances",
"DescribeInstances",
"DescribeKeyPairs",
"DescribeLogGroups",
"DescribeLogStreams",
"DescribeOrganization",
"DescribeRegions",
"DescribeSecurityGroups",
"DescribeSnapshotAttribute",
"DescribeSnapshotTierStatus",
"DescribeSubscriptionFilters",
"DescribeTransitGatewayMulticastDomains",
"DescribeVolumes",
"DescribeVolumesModifications",
"DescribeVpcEndpointConnectionNotifications",
"DescribeVpcs",
"GetAccount",
"GetAccountAuthorizationDetails",
"GetAccountSendingEnabled",
"GetBucketAcl",
"GetBucketLogging",
"GetBucketPolicy",
"GetBucketReplication",
"GetBucketVersioning",
"GetCallerIdentity",
"GetCertificate",
"GetConsoleScreenshot",
"GetCostAndUsage",
"GetDetector",
"GetEbsDefaultKmsKeyId",
"GetEbsEncryptionByDefault",
"GetFindings",
"GetFlowLogsIntegrationTemplate",
"GetIdentityVerificationAttributes",
"GetInstances",
"GetIntrospectionSchema",
"GetLaunchTemplateData",
"GetLaunchTemplateData",
"GetLogRecord",
"GetParameters",
"GetPolicyVersion",
"GetPublicAccessBlock",
"GetQueryResults",
"GetRegions",
"GetSMSAttributes",
"GetSMSSandboxAccountStatus",
"GetSendQuota",
"GetTransitGatewayRouteTableAssociations",
"GetUserPolicy",
"HeadObject",
"ListAccessKeys",
"ListAccounts",
"ListAllMyBuckets",
"ListAssociatedAccessPolicies",
"ListAttachedUserPolicies",
"ListClusters",
"ListDetectors",
"ListDomains",
"ListFindings",
"ListHostedZones",
"ListIPSets",
"ListIdentities",
"ListInstanceProfiles",
"ListObjects",
"ListOrganizationalUnitsForParent",
"ListOriginationNumbers",
"ListPolicyVersions",
"ListRoles",
"ListRoles",
"ListRules",
"ListServiceQuotas",
"ListSubscriptions",
"ListTargetsByRule",
"ListTopics",
"ListUsers",
"LookupEvents",
"Search",
]
# aws.cloudtrail_threat_detection_llm_jacking
threat_detection_llm_jacking_threshold: 0.4 # Percentage of actions found to decide if it is an LLM Jacking attack event, by default is 0.4 (40%)
threat_detection_llm_jacking_minutes: 1440 # Past minutes to search from now for LLM Jacking attacks, by default is 1440 minutes (24 hours)
threat_detection_llm_jacking_actions:
[
"PutUseCaseForModelAccess", # Submits a use case for model access, providing justification (Write).
"PutFoundationModelEntitlement", # Grants entitlement for accessing a foundation model (Write).
"PutModelInvocationLoggingConfiguration", # Configures logging for model invocations (Write).
"CreateFoundationModelAgreement", # Creates a new agreement to use a foundation model (Write).
"InvokeModel", # Invokes a specified Bedrock model for inference using provided prompt and parameters (Read).
"InvokeModelWithResponseStream", # Invokes a Bedrock model for inference with real-time token streaming (Read).
"GetUseCaseForModelAccess", # Retrieves an existing use case for model access (Read).
"GetModelInvocationLoggingConfiguration", # Fetches the logging configuration for model invocations (Read).
"GetFoundationModelAvailability", # Checks the availability of a foundation model for use (Read).
"ListFoundationModelAgreementOffers", # Lists available agreement offers for accessing foundation models (List).
"ListFoundationModels", # Lists the available foundation models in Bedrock (List).
"ListProvisionedModelThroughputs", # Lists the provisioned throughput for previously created models (List).
]
# AWS RDS Configuration
# aws.rds_instance_backup_enabled
# Whether to check RDS instance replicas or not
check_rds_instance_replicas: False
# AWS ACM Configuration
# aws.acm_certificates_expiration_check
days_to_expire_threshold: 7
# aws.acm_certificates_with_secure_key_algorithms
insecure_key_algorithms:
[
"RSA-1024",
"P-192",
"SHA-1",
]
# AWS EKS Configuration
# aws.eks_control_plane_logging_all_types_enabled
# EKS control plane logging types that must be enabled
eks_required_log_types:
[
"api",
"audit",
"authenticator",
"controllerManager",
"scheduler",
]
# aws.eks_cluster_uses_a_supported_version
# EKS clusters must be version 1.28 or higher
eks_cluster_oldest_version_supported: "1.28"
# AWS CodeBuild Configuration
# aws.codebuild_project_no_secrets_in_variables
# CodeBuild sensitive variables that are excluded from the check
excluded_sensitive_environment_variables:
[
]
# AWS ELB Configuration
# aws.elb_is_in_multiple_az
# Minimum number of Availability Zones that an CLB must be in
elb_min_azs: 2
# AWS ELBv2 Configuration
# aws.elbv2_is_in_multiple_az
# Minimum number of Availability Zones that an ELBv2 must be in
elbv2_min_azs: 2
# AWS Secrets Configuration
# Patterns to ignore in the secrets checks
secrets_ignore_patterns: []
# AWS Secrets Manager Configuration
# aws.secretsmanager_secret_unused
# Maximum number of days a secret can be unused
max_days_secret_unused: 90
# aws.secretsmanager_secret_rotated_periodically
# Maximum number of days a secret should be rotated
max_days_secret_unrotated: 90
# AWS Kinesis Configuration
# Minimum retention period in hours for Kinesis streams
min_kinesis_stream_retention_hours: 168 # 7 days
# Azure Configuration
azure:
# Azure Network Configuration
# azure.network_public_ip_shodan
# TODO: create common config
shodan_api_key: null
# Azure App Service
# azure.app_ensure_php_version_is_latest
php_latest_version: "8.2"
# azure.app_ensure_python_version_is_latest
python_latest_version: "3.12"
# azure.app_ensure_java_version_is_latest
java_latest_version: "17"
# Azure SQL Server
# azure.sqlserver_minimal_tls_version
recommended_minimal_tls_versions:
[
"1.2",
"1.3",
]
# GCP Configuration
gcp:
# GCP Compute Configuration
# gcp.compute_public_address_shodan
shodan_api_key: null
# Kubernetes Configuration
kubernetes:
# Kubernetes API Server
# kubernetes.apiserver_audit_log_maxbackup_set
audit_log_maxbackup: 10
# kubernetes.apiserver_audit_log_maxsize_set
audit_log_maxsize: 100
# kubernetes.apiserver_audit_log_maxage_set
audit_log_maxage: 30
# kubernetes.apiserver_strong_ciphers_only
apiserver_strong_ciphers:
[
"TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256",
]
# Kubelet
# kubernetes.kubelet_strong_ciphers_only
kubelet_strong_ciphers:
[
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_128_GCM_SHA256",
]
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# This is to override the chart name.
nameOverride: ""
fullnameOverride: ""
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
service:
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
port: 80
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,6 +0,0 @@
apiVersion: v2
name: prowler-ui
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "5.1.1"

View File

@@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prowler-ui.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prowler-ui.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prowler-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prowler-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "prowler-ui.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "prowler-ui.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "prowler-ui.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "prowler-ui.labels" -}}
helm.sh/chart: {{ include "prowler-ui.chart" . }}
{{ include "prowler-ui.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "prowler-ui.selectorLabels" -}}
app.kubernetes.io/name: {{ include "prowler-ui.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "prowler-ui.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "prowler-ui.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -1,72 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prowler-ui.fullname" . }}
labels:
{{- include "prowler-ui.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "prowler-ui.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prowler-ui.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "prowler-ui.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
envFrom:
- secretRef:
name: {{ include "prowler-ui.fullname" $ }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
readinessProbe:
{{- toYaml .Values.readinessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,43 +0,0 @@
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "prowler-ui.fullname" . }}
labels:
{{- include "prowler-ui.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.ingress.className }}
ingressClassName: {{ . }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- with .pathType }}
pathType: {{ . }}
{{- end }}
backend:
service:
name: {{ include "prowler-ui.fullname" $ }}
port:
number: {{ $.Values.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "prowler-ui.fullname" . }}
labels:
{{- include "prowler-ui.labels" . | nindent 4 }}
type: Opaque
data:
{{- range $k, $v := .Values.secrets }}
{{ $k }}: {{ $v | toString | b64enc | quote }}
{{- end }}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "prowler-ui.fullname" . }}
labels:
{{- include "prowler-ui.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "prowler-ui.selectorLabels" . | nindent 4 }}

View File

@@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prowler-ui.serviceAccountName" . }}
labels:
{{- include "prowler-ui.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@@ -1,132 +0,0 @@
# Default values for prowler-ui.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
image:
repository: prowlercloud/prowler-ui
# This sets the pull policy for images.
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# This is to override the chart name.
nameOverride: ""
fullnameOverride: ""
secrets:
SITE_URL: http://localhost:3000
API_BASE_URL: http://prowler-api:8080/api/v1
NEXT_PUBLIC_API_DOCS_URL: http://prowler-api:8080/api/v1/docs
AUTH_TRUST_HOST: True
UI_PORT: 3000
# openssl rand -base64 32
AUTH_SECRET:
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
service:
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
port: 3000
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -39,3 +39,4 @@ spec:
path: {{ $value }}
{{- end }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More