mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-02-08 04:57:03 +00:00
Compare commits
301 Commits
review_met
...
5.4.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
926f449ae6 | ||
|
|
646668c6ae | ||
|
|
8e6b92792b | ||
|
|
65c081ce38 | ||
|
|
5600131d6a | ||
|
|
dbd271980f | ||
|
|
ff532a899e | ||
|
|
0c9675ec70 | ||
|
|
d45eda2b2b | ||
|
|
0abcf80d19 | ||
|
|
c0e10fd395 | ||
|
|
a80e9b26a8 | ||
|
|
2a9cd57fb8 | ||
|
|
a0ad1a5f49 | ||
|
|
dff22dd166 | ||
|
|
58138810b9 | ||
|
|
1ecc272fe4 | ||
|
|
b784167006 | ||
|
|
cf0ec8dea0 | ||
|
|
96cae5e961 | ||
|
|
a48e5cb15f | ||
|
|
5a9ff007e0 | ||
|
|
24c45f894c | ||
|
|
5d03c85629 | ||
|
|
41dc397a7a | ||
|
|
237a9adce9 | ||
|
|
a06167f1c2 | ||
|
|
a7d58c40dd | ||
|
|
e260c46389 | ||
|
|
115169a596 | ||
|
|
5b19173c1d | ||
|
|
d3dd1644e6 | ||
|
|
8ff0c59964 | ||
|
|
285939c389 | ||
|
|
a62ae8af51 | ||
|
|
5d78b9e439 | ||
|
|
1056c270ca | ||
|
|
eeef6600b7 | ||
|
|
e142f17abe | ||
|
|
a65d858dac | ||
|
|
6235a1ba41 | ||
|
|
05007d03ee | ||
|
|
102d099947 | ||
|
|
3194675a5c | ||
|
|
14e6e4aa68 | ||
|
|
b24c3665b5 | ||
|
|
1f60878867 | ||
|
|
2dd18662d8 | ||
|
|
175360dbe6 | ||
|
|
80e24b971f | ||
|
|
78877c470a | ||
|
|
9c9b100359 | ||
|
|
10f3232294 | ||
|
|
a2e5f70f36 | ||
|
|
8d8b31c757 | ||
|
|
cba1e718b9 | ||
|
|
6c3c37fc26 | ||
|
|
b610cacd0c | ||
|
|
027a5705cb | ||
|
|
b7fbfb4360 | ||
|
|
5acf0a7e3d | ||
|
|
3a25e86e30 | ||
|
|
50f1592eb3 | ||
|
|
0f2927cb88 | ||
|
|
b4e1434052 | ||
|
|
43710783f9 | ||
|
|
16f767e7b9 | ||
|
|
42818217a0 | ||
|
|
13405594b2 | ||
|
|
b5a3852334 | ||
|
|
181ff1acb3 | ||
|
|
91e59a3279 | ||
|
|
b3fad1a765 | ||
|
|
6f90927a79 | ||
|
|
0bb4a9a3e9 | ||
|
|
80d9cde60b | ||
|
|
11196c2f83 | ||
|
|
55a0d0a1b5 | ||
|
|
4e5e1d7bd4 | ||
|
|
06a0a434ab | ||
|
|
153833fc55 | ||
|
|
fc4877975f | ||
|
|
0797efd4fd | ||
|
|
fbec99a0b7 | ||
|
|
b2cb1de95e | ||
|
|
190c2316d7 | ||
|
|
f6c352281a | ||
|
|
66dfe89936 | ||
|
|
8b3942ca49 | ||
|
|
9d35213bd5 | ||
|
|
3e586e615d | ||
|
|
a4f950e093 | ||
|
|
7c2441f6ff | ||
|
|
0a92af3eb2 | ||
|
|
666f3a0e20 | ||
|
|
06ef98b5cc | ||
|
|
79125bdd40 | ||
|
|
e8e8b085ac | ||
|
|
c9b81d003a | ||
|
|
23fa3c1e38 | ||
|
|
03fbd0baca | ||
|
|
d4fe24ef47 | ||
|
|
9c5220ee98 | ||
|
|
6491bce5a6 | ||
|
|
ca375dd79c | ||
|
|
e807573b54 | ||
|
|
c0f4c9743f | ||
|
|
5974d0b5da | ||
|
|
6244a8a5f7 | ||
|
|
5b9dae4529 | ||
|
|
a424374c44 | ||
|
|
b7fc2542e8 | ||
|
|
83a1598a1e | ||
|
|
b22b56a06b | ||
|
|
5020e4713c | ||
|
|
ee534a740e | ||
|
|
48cb45b7a8 | ||
|
|
91b74822e9 | ||
|
|
287eef5085 | ||
|
|
45d359c84a | ||
|
|
6049e5d4e8 | ||
|
|
dfd377f89e | ||
|
|
37e6c52c14 | ||
|
|
d6a7f4d88f | ||
|
|
239cda0a90 | ||
|
|
4a821e425b | ||
|
|
e1a2f0c204 | ||
|
|
c70860c733 | ||
|
|
05e71e033f | ||
|
|
5164ec2eb9 | ||
|
|
be18dac4f9 | ||
|
|
bb126c242f | ||
|
|
e27780a856 | ||
|
|
196ec51751 | ||
|
|
86abf9e64c | ||
|
|
9d8be578e3 | ||
|
|
b3aa800082 | ||
|
|
501674a778 | ||
|
|
5ff6ae79d8 | ||
|
|
e518a869ab | ||
|
|
43927a62f3 | ||
|
|
335980c8d8 | ||
|
|
ca3ee378db | ||
|
|
c05bc1068a | ||
|
|
2e3164636d | ||
|
|
c34e07fc40 | ||
|
|
6022122a61 | ||
|
|
f65f5e4b46 | ||
|
|
dee17733a0 | ||
|
|
cddda1e64e | ||
|
|
f7b873db03 | ||
|
|
792bc70d0a | ||
|
|
185491b061 | ||
|
|
3af8a43480 | ||
|
|
fd78406b29 | ||
|
|
4758b258a3 | ||
|
|
015e2b3b88 | ||
|
|
e184c9cb61 | ||
|
|
9004a01183 | ||
|
|
dd65ba3d9e | ||
|
|
bba616a18f | ||
|
|
aa0f8d2981 | ||
|
|
2511d6ffa9 | ||
|
|
27329457be | ||
|
|
7189f3d526 | ||
|
|
58e7589c9d | ||
|
|
d60f4b5ded | ||
|
|
4c2ec094f6 | ||
|
|
395ecaff5b | ||
|
|
c39506ef7d | ||
|
|
eb90d479e2 | ||
|
|
b92a73f5ea | ||
|
|
ad121f3059 | ||
|
|
70e4c5a44e | ||
|
|
b5a46b7b59 | ||
|
|
f1a97cd166 | ||
|
|
0774508093 | ||
|
|
0664ce6b94 | ||
|
|
407c779c52 | ||
|
|
c60f13f23f | ||
|
|
37d912ef01 | ||
|
|
d3de89c017 | ||
|
|
cb22af25c6 | ||
|
|
a534b94495 | ||
|
|
6262b4ff0b | ||
|
|
84ecd7ab2c | ||
|
|
1a5428445a | ||
|
|
ac8e991ca0 | ||
|
|
83a0331472 | ||
|
|
cce31e2971 | ||
|
|
0adf7d6e77 | ||
|
|
295f8b557e | ||
|
|
bb2c5c3161 | ||
|
|
0018f36a36 | ||
|
|
857de84f49 | ||
|
|
9630f2242a | ||
|
|
1fe125867c | ||
|
|
0737893240 | ||
|
|
282fe3d348 | ||
|
|
b5d83640ae | ||
|
|
2823d3ad21 | ||
|
|
00b93bfe86 | ||
|
|
84c253d887 | ||
|
|
2ab5a702c9 | ||
|
|
11d9cdf24e | ||
|
|
b1de41619b | ||
|
|
18a4881a51 | ||
|
|
de76a168c0 | ||
|
|
bcc13a742d | ||
|
|
23b584f4bf | ||
|
|
074b7c1ff5 | ||
|
|
c04e9ed914 | ||
|
|
1f1b126e79 | ||
|
|
9b0dd80f13 | ||
|
|
b0807478f2 | ||
|
|
11dfa58ecd | ||
|
|
412f396e0a | ||
|
|
8100d43ff2 | ||
|
|
bc96acef48 | ||
|
|
6e9876b61a | ||
|
|
32253ca4f7 | ||
|
|
4c6be5e283 | ||
|
|
69178fd7bd | ||
|
|
cd4432c14b | ||
|
|
0e47172aec | ||
|
|
efe18ae8b2 | ||
|
|
d5e13df4fe | ||
|
|
b0e84d74f2 | ||
|
|
28526b591c | ||
|
|
51e6a6edb1 | ||
|
|
c1b132a29e | ||
|
|
e2530e7d57 | ||
|
|
f2fcb1599b | ||
|
|
160eafa0c9 | ||
|
|
c2bc0f1368 | ||
|
|
27d27fff81 | ||
|
|
66e8f0ce18 | ||
|
|
0ceae8361b | ||
|
|
5e52fded83 | ||
|
|
0a7d07b4b6 | ||
|
|
34b5f483d7 | ||
|
|
9d04a1bc52 | ||
|
|
b25c0aaa00 | ||
|
|
652b93ea45 | ||
|
|
ccb7726511 | ||
|
|
c514a4e451 | ||
|
|
d841bd6890 | ||
|
|
ff54e10ab2 | ||
|
|
718e562741 | ||
|
|
ce05e2a939 | ||
|
|
90831d3084 | ||
|
|
2c928dead5 | ||
|
|
3ffe147664 | ||
|
|
3373b0f47c | ||
|
|
b29db04560 | ||
|
|
f964a0362e | ||
|
|
537b23dfae | ||
|
|
48cf3528b4 | ||
|
|
3c4b9d32c9 | ||
|
|
fd8361ae2c | ||
|
|
33cadaa932 | ||
|
|
cc126eb8b4 | ||
|
|
c996b3f6aa | ||
|
|
e2b406a300 | ||
|
|
c2c69da603 | ||
|
|
4e51348ff2 | ||
|
|
3257b82706 | ||
|
|
c98d764daa | ||
|
|
0448429a9f | ||
|
|
b948ac6125 | ||
|
|
3acc09ea16 | ||
|
|
82d0d0de9d | ||
|
|
f9cfc4d087 | ||
|
|
7d68ff455b | ||
|
|
ddf4881971 | ||
|
|
9ad4944142 | ||
|
|
7f33ea76a4 | ||
|
|
1140c29384 | ||
|
|
2441a62f39 | ||
|
|
c26a231fc1 | ||
|
|
2fb2315037 | ||
|
|
a9e475481a | ||
|
|
826d7c4dc3 | ||
|
|
b7f4b37f66 | ||
|
|
193d691bfe | ||
|
|
a359bc581c | ||
|
|
9a28ff025a | ||
|
|
f1c7050700 | ||
|
|
9391c27b9e | ||
|
|
4c54de092f | ||
|
|
690c482a43 | ||
|
|
ad2d857c6f | ||
|
|
07ee59d2ef | ||
|
|
bec4617d0a | ||
|
|
94916f8305 | ||
|
|
44de651be3 | ||
|
|
bdcba9c642 | ||
|
|
c172f75f1a | ||
|
|
ec492fa13a | ||
|
|
702659959c | ||
|
|
fef332a591 |
34
.env
34
.env
@@ -3,7 +3,7 @@
|
||||
# For production, it is recommended to use a secure method to store these variables and change the default secret keys.
|
||||
|
||||
#### Prowler UI Configuration ####
|
||||
PROWLER_UI_VERSION="latest"
|
||||
PROWLER_UI_VERSION="stable"
|
||||
SITE_URL=http://localhost:3000
|
||||
API_BASE_URL=http://prowler-api:8080/api/v1
|
||||
NEXT_PUBLIC_API_DOCS_URL=http://prowler-api:8080/api/v1/docs
|
||||
@@ -30,6 +30,30 @@ VALKEY_HOST=valkey
|
||||
VALKEY_PORT=6379
|
||||
VALKEY_DB=0
|
||||
|
||||
# API scan settings
|
||||
|
||||
# The path to the directory where scan output should be stored
|
||||
DJANGO_TMP_OUTPUT_DIRECTORY = "/tmp/prowler_api_output"
|
||||
|
||||
# The maximum number of findings to process in a single batch
|
||||
DJANGO_FINDINGS_BATCH_SIZE = 1000
|
||||
|
||||
# The AWS access key to be used when uploading scan output to an S3 bucket
|
||||
# If left empty, default AWS credentials resolution behavior will be used
|
||||
DJANGO_OUTPUT_S3_AWS_ACCESS_KEY_ID=""
|
||||
|
||||
# The AWS secret key to be used when uploading scan output to an S3 bucket
|
||||
DJANGO_OUTPUT_S3_AWS_SECRET_ACCESS_KEY=""
|
||||
|
||||
# An optional AWS session token
|
||||
DJANGO_OUTPUT_S3_AWS_SESSION_TOKEN=""
|
||||
|
||||
# The AWS region where your S3 bucket is located (e.g., "us-east-1")
|
||||
DJANGO_OUTPUT_S3_AWS_DEFAULT_REGION=""
|
||||
|
||||
# The name of the S3 bucket where scan output should be stored
|
||||
DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET=""
|
||||
|
||||
# Django settings
|
||||
DJANGO_ALLOWED_HOSTS=localhost,127.0.0.1,prowler-api
|
||||
DJANGO_BIND_ADDRESS=0.0.0.0
|
||||
@@ -92,3 +116,11 @@ jQIDAQAB
|
||||
# openssl rand -base64 32
|
||||
DJANGO_SECRETS_ENCRYPTION_KEY="oE/ltOhp/n1TdbHjVmzcjDPLcLA41CVI/4Rk+UB5ESc="
|
||||
DJANGO_BROKER_VISIBILITY_TIMEOUT=86400
|
||||
DJANGO_SENTRY_DSN=
|
||||
|
||||
# Sentry settings
|
||||
SENTRY_ENVIRONMENT=local
|
||||
SENTRY_RELEASE=local
|
||||
|
||||
#### Prowler release version ####
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.4.0
|
||||
|
||||
10
.github/labeler.yml
vendored
10
.github/labeler.yml
vendored
@@ -92,3 +92,13 @@ component/api:
|
||||
component/ui:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: "ui/**"
|
||||
|
||||
compliance:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: "prowler/compliance/**"
|
||||
- any-glob-to-any-file: "prowler/lib/outputs/compliance/**"
|
||||
- any-glob-to-any-file: "tests/lib/outputs/compliance/**"
|
||||
|
||||
review-django-migrations:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: "api/src/backend/api/migrations/**"
|
||||
|
||||
8
.github/pull_request_template.md
vendored
8
.github/pull_request_template.md
vendored
@@ -15,7 +15,13 @@ Please include a summary of the change and which issue is fixed. List any depend
|
||||
- [ ] Review if the code is being covered by tests.
|
||||
- [ ] Review if code is being documented following this specification https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings
|
||||
- [ ] Review if backport is needed.
|
||||
- [ ] Review if is needed to change the [Readme.md](https://github.com/prowler-cloud/prowler/blob/master/README.md)
|
||||
|
||||
#### API
|
||||
- [ ] Verify if API specs need to be regenerated.
|
||||
- [ ] Check if version updates are required (e.g., specs, Poetry, etc.).
|
||||
- [ ] Ensure new entries are added to [CHANGELOG.md](https://github.com/prowler-cloud/prowler/blob/master/api/CHANGELOG.md), if applicable.
|
||||
|
||||
### License
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
@@ -63,6 +63,12 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set short git commit SHA
|
||||
id: vars
|
||||
run: |
|
||||
shortSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "SHORT_SHA=${shortSha}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
@@ -82,6 +88,7 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.SHORT_SHA }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
@@ -96,3 +103,12 @@ jobs:
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Trigger deployment
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.CLOUD_DISPATCH }}
|
||||
event-type: prowler-api-deploy
|
||||
client-payload: '{"sha": "${{ github.sha }}", "short_sha": "${{ env.SHORT_SHA }}"}'
|
||||
|
||||
20
.github/workflows/api-pull-request.yml
vendored
20
.github/workflows/api-pull-request.yml
vendored
@@ -6,6 +6,7 @@ on:
|
||||
- "master"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- ".github/workflows/api-pull-request.yml"
|
||||
- "api/**"
|
||||
pull_request:
|
||||
branches:
|
||||
@@ -14,7 +15,6 @@ on:
|
||||
paths:
|
||||
- "api/**"
|
||||
|
||||
|
||||
env:
|
||||
POSTGRES_HOST: localhost
|
||||
POSTGRES_PORT: 5432
|
||||
@@ -26,7 +26,8 @@ env:
|
||||
VALKEY_HOST: localhost
|
||||
VALKEY_PORT: 6379
|
||||
VALKEY_DB: 0
|
||||
|
||||
API_WORKING_DIR: ./api
|
||||
IMAGE_NAME: prowler-api
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -171,3 +172,18 @@ jobs:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: api
|
||||
test-container-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build Container
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ${{ env.API_WORKING_DIR }}
|
||||
push: false
|
||||
tags: ${{ env.IMAGE_NAME }}:latest
|
||||
outputs: type=docker
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
23
.github/workflows/conventional-commit.yml
vendored
Normal file
23
.github/workflows/conventional-commit.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Prowler - Conventional Commit
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- "opened"
|
||||
- "edited"
|
||||
- "synchronize"
|
||||
branches:
|
||||
- "master"
|
||||
- "v3"
|
||||
- "v4.*"
|
||||
- "v5.*"
|
||||
|
||||
jobs:
|
||||
conventional-commit-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: conventional-commit-check
|
||||
id: conventional-commit-check
|
||||
uses: agenthunt/conventional-commit-checker-action@v2.0.0
|
||||
with:
|
||||
pr-title-regex: '^([^\s(]+)(?:\(([^)]+)\))?: (.+)'
|
||||
2
.github/workflows/find-secrets.yml
vendored
2
.github/workflows/find-secrets.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: TruffleHog OSS
|
||||
uses: trufflesecurity/trufflehog@v3.88.2
|
||||
uses: trufflesecurity/trufflehog@v3.88.14
|
||||
with:
|
||||
path: ./
|
||||
base: ${{ github.event.repository.default_branch }}
|
||||
|
||||
2
.github/workflows/sdk-pull-request.yml
vendored
2
.github/workflows/sdk-pull-request.yml
vendored
@@ -39,6 +39,8 @@ jobs:
|
||||
.backportrc.json
|
||||
.env
|
||||
docker-compose*
|
||||
examples/**
|
||||
.gitignore
|
||||
|
||||
- name: Install poetry
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
|
||||
@@ -63,6 +63,12 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set short git commit SHA
|
||||
id: vars
|
||||
run: |
|
||||
shortSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "SHORT_SHA=${shortSha}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
@@ -78,10 +84,13 @@ jobs:
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=${{ env.SHORT_SHA }}
|
||||
# Set push: false for testing
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.SHORT_SHA }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
@@ -90,9 +99,20 @@ jobs:
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${{ env.RELEASE_TAG }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Trigger deployment
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.CLOUD_DISPATCH }}
|
||||
event-type: prowler-ui-deploy
|
||||
client-payload: '{"sha": "${{ github.sha }}", "short_sha": "${{ env.SHORT_SHA }}"}'
|
||||
|
||||
21
.github/workflows/ui-pull-request.yml
vendored
21
.github/workflows/ui-pull-request.yml
vendored
@@ -6,6 +6,7 @@ on:
|
||||
- "master"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- ".github/workflows/ui-pull-request.yml"
|
||||
- "ui/**"
|
||||
pull_request:
|
||||
branches:
|
||||
@@ -13,6 +14,9 @@ on:
|
||||
- "v5.*"
|
||||
paths:
|
||||
- 'ui/**'
|
||||
env:
|
||||
UI_WORKING_DIR: ./ui
|
||||
IMAGE_NAME: prowler-ui
|
||||
|
||||
jobs:
|
||||
test-and-coverage:
|
||||
@@ -39,3 +43,20 @@ jobs:
|
||||
- name: Build the application
|
||||
working-directory: ./ui
|
||||
run: npm run build
|
||||
test-container-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build Container
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ${{ env.UI_WORKING_DIR }}
|
||||
# Always build using `prod` target
|
||||
target: prod
|
||||
push: false
|
||||
tags: ${{ env.IMAGE_NAME }}:latest
|
||||
outputs: type=docker
|
||||
build-args: |
|
||||
NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=pk_test_51LwpXXXX
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -31,7 +31,7 @@ tags
|
||||
*.DS_Store
|
||||
|
||||
# Prowler output
|
||||
output/
|
||||
/output
|
||||
|
||||
# Prowler found secrets
|
||||
secrets-*/
|
||||
@@ -45,6 +45,7 @@ junit-reports/
|
||||
# Terraform
|
||||
.terraform*
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
|
||||
# .env
|
||||
ui/.env*
|
||||
|
||||
@@ -27,6 +27,7 @@ repos:
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
exclude: contrib
|
||||
|
||||
## PYTHON
|
||||
- repo: https://github.com/myint/autoflake
|
||||
rev: v2.3.1
|
||||
@@ -61,8 +62,25 @@ repos:
|
||||
rev: 1.8.0
|
||||
hooks:
|
||||
- id: poetry-check
|
||||
name: API - poetry-check
|
||||
args: ["--directory=./api"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-lock
|
||||
args: ["--no-update"]
|
||||
name: API - poetry-lock
|
||||
args: ["--no-update", "--directory=./api"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-check
|
||||
name: SDK - poetry-check
|
||||
args: ["--directory=./"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-lock
|
||||
name: SDK - poetry-lock
|
||||
args: ["--no-update", "--directory=./"]
|
||||
pass_filenames: false
|
||||
|
||||
|
||||
- repo: https://github.com/hadolint/hadolint
|
||||
rev: v2.13.0-beta
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM python:3.12.8-alpine3.20
|
||||
FROM python:3.12.9-alpine3.20
|
||||
|
||||
LABEL maintainer="https://github.com/prowler-cloud/prowler"
|
||||
|
||||
|
||||
34
README.md
34
README.md
@@ -71,10 +71,13 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
|
||||
|
||||
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) |
|
||||
|---|---|---|---|---|
|
||||
| AWS | 561 | 81 -> `prowler aws --list-services` | 30 -> `prowler aws --list-compliance` | 9 -> `prowler aws --list-categories` |
|
||||
| GCP | 77 | 13 -> `prowler gcp --list-services` | 4 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
|
||||
| Azure | 139 | 18 -> `prowler azure --list-services` | 4 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
|
||||
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | 1 -> `prowler kubernetes --list-compliance` | 7 -> `prowler kubernetes --list-categories` |
|
||||
| AWS | 564 | 82 | 33 | 10 |
|
||||
| GCP | 77 | 13 | 5 | 3 |
|
||||
| Azure | 140 | 18 | 6 | 3 |
|
||||
| Kubernetes | 83 | 7 | 2 | 7 |
|
||||
| Microsoft365 | 5 | 2 | 1 | 0 |
|
||||
|
||||
> You can list the checks, services, compliance frameworks and categories with `prowler <provider> --list-checks`, `prowler <provider> --list-services`, `prowler <provider> --list-compliance` and `prowler <provider> --list-categories`.
|
||||
|
||||
# 💻 Installation
|
||||
|
||||
@@ -116,7 +119,7 @@ docker compose up -d
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler/api
|
||||
poetry install
|
||||
poetry shell
|
||||
eval $(poetry env activate)
|
||||
set -a
|
||||
source .env
|
||||
docker compose up postgres valkey -d
|
||||
@@ -124,6 +127,11 @@ cd src/backend
|
||||
python manage.py migrate --database admin
|
||||
gunicorn -c config/guniconf.py config.wsgi:application
|
||||
```
|
||||
> [!IMPORTANT]
|
||||
> Starting from Poetry v2.0.0, `poetry shell` has been deprecated in favor of `poetry env activate`.
|
||||
>
|
||||
> If your poetry version is below 2.0.0 you must keep using `poetry shell` to activate your environment.
|
||||
> In case you have any doubts, consult the Poetry environment activation guide: https://python-poetry.org/docs/managing-environments/#activating-the-environment
|
||||
|
||||
> Now, you can access the API documentation at http://localhost:8080/api/v1/docs.
|
||||
|
||||
@@ -133,7 +141,7 @@ gunicorn -c config/guniconf.py config.wsgi:application
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler/api
|
||||
poetry install
|
||||
poetry shell
|
||||
eval $(poetry env activate)
|
||||
set -a
|
||||
source .env
|
||||
cd src/backend
|
||||
@@ -146,7 +154,7 @@ python -m celery -A config.celery worker -l info -E
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler/api
|
||||
poetry install
|
||||
poetry shell
|
||||
eval $(poetry env activate)
|
||||
set -a
|
||||
source .env
|
||||
cd src/backend
|
||||
@@ -167,7 +175,7 @@ npm start
|
||||
|
||||
## Prowler CLI
|
||||
### Pip package
|
||||
Prowler CLI is available as a project in [PyPI](https://pypi.org/project/prowler-cloud/), thus can be installed using pip with Python >= 3.9, < 3.13:
|
||||
Prowler CLI is available as a project in [PyPI](https://pypi.org/project/prowler-cloud/), thus can be installed using pip with Python > 3.9.1, < 3.13:
|
||||
|
||||
```console
|
||||
pip install prowler
|
||||
@@ -197,15 +205,21 @@ The container images are available here:
|
||||
|
||||
### From GitHub
|
||||
|
||||
Python >= 3.9, < 3.13 is required with pip and poetry:
|
||||
Python > 3.9.1, < 3.13 is required with pip and poetry:
|
||||
|
||||
``` console
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler
|
||||
poetry shell
|
||||
eval $(poetry env activate)
|
||||
poetry install
|
||||
python prowler.py -v
|
||||
```
|
||||
> [!IMPORTANT]
|
||||
> Starting from Poetry v2.0.0, `poetry shell` has been deprecated in favor of `poetry env activate`.
|
||||
>
|
||||
> If your poetry version is below 2.0.0 you must keep using `poetry shell` to activate your environment.
|
||||
> In case you have any doubts, consult the Poetry environment activation guide: https://python-poetry.org/docs/managing-environments/#activating-the-environment
|
||||
|
||||
> If you want to clone Prowler from Windows, use `git config core.longpaths true` to allow long file paths.
|
||||
# 📐✏️ High level architecture
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ DJANGO_SECRETS_ENCRYPTION_KEY=""
|
||||
DJANGO_MANAGE_DB_PARTITIONS=[True|False]
|
||||
DJANGO_CELERY_DEADLOCK_ATTEMPTS=5
|
||||
DJANGO_BROKER_VISIBILITY_TIMEOUT=86400
|
||||
DJANGO_SENTRY_DSN=
|
||||
|
||||
# PostgreSQL settings
|
||||
# If running django and celery on host, use 'localhost', else use 'postgres-db'
|
||||
@@ -39,3 +40,16 @@ POSTGRES_DB=prowler_db
|
||||
VALKEY_HOST=[localhost|valkey]
|
||||
VALKEY_PORT=6379
|
||||
VALKEY_DB=0
|
||||
|
||||
# Sentry settings
|
||||
SENTRY_ENVIRONMENT=local
|
||||
SENTRY_RELEASE=local
|
||||
|
||||
# Social login credentials
|
||||
DJANGO_GOOGLE_OAUTH_CLIENT_ID=""
|
||||
DJANGO_GOOGLE_OAUTH_CLIENT_SECRET=""
|
||||
DJANGO_GOOGLE_OAUTH_CALLBACK_URL=""
|
||||
|
||||
DJANGO_GITHUB_OAUTH_CLIENT_ID=""
|
||||
DJANGO_GITHUB_OAUTH_CLIENT_SECRET=""
|
||||
DJANGO_GITHUB_OAUTH_CALLBACK_URL=""
|
||||
|
||||
27
api/CHANGELOG.md
Normal file
27
api/CHANGELOG.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Prowler API Changelog
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [v1.5.0] (Prowler v5.4.0)
|
||||
|
||||
### Added
|
||||
- Social login integration with Google and GitHub [(#6906)](https://github.com/prowler-cloud/prowler/pull/6906)
|
||||
- Add API scan report system, now all scans launched from the API will generate a compressed file with the report in OCSF, CSV and HTML formats [(#6878)](https://github.com/prowler-cloud/prowler/pull/6878).
|
||||
- Configurable Sentry integration [(#6874)](https://github.com/prowler-cloud/prowler/pull/6874)
|
||||
|
||||
### Changed
|
||||
- Optimized `GET /findings` endpoint to improve response time and size [(#7019)](https://github.com/prowler-cloud/prowler/pull/7019).
|
||||
|
||||
---
|
||||
|
||||
## [v1.4.0] (Prowler v5.3.0)
|
||||
|
||||
### Changed
|
||||
- Daily scheduled scan instances are now created beforehand with `SCHEDULED` state [(#6700)](https://github.com/prowler-cloud/prowler/pull/6700).
|
||||
- Findings endpoints now require at least one date filter [(#6800)](https://github.com/prowler-cloud/prowler/pull/6800).
|
||||
- Findings metadata endpoint received a performance improvement [(#6863)](https://github.com/prowler-cloud/prowler/pull/6863).
|
||||
- Increase the allowed length of the provider UID for Kubernetes providers [(#6869)](https://github.com/prowler-cloud/prowler/pull/6869).
|
||||
|
||||
---
|
||||
@@ -269,3 +269,66 @@ poetry shell
|
||||
cd src/backend
|
||||
pytest
|
||||
```
|
||||
|
||||
# Custom commands
|
||||
|
||||
Django provides a way to create custom commands that can be run from the command line.
|
||||
|
||||
> These commands can be found in: ```prowler/api/src/backend/api/management/commands```
|
||||
|
||||
To run a custom command, you need to be in the `prowler/api/src/backend` directory and run:
|
||||
|
||||
```console
|
||||
poetry shell
|
||||
python manage.py <command_name>
|
||||
```
|
||||
|
||||
## Generate dummy data
|
||||
|
||||
```console
|
||||
python manage.py findings --tenant
|
||||
<TENANT_ID> --findings <NUM_FINDINGS> --re
|
||||
sources <NUM_RESOURCES> --batch <TRANSACTION_BATCH_SIZE> --alias <ALIAS>
|
||||
```
|
||||
|
||||
This command creates, for a given tenant, a provider, scan and a set of findings and resources related altogether.
|
||||
|
||||
> Scan progress and state are updated in real time.
|
||||
> - 0-33%: Create resources.
|
||||
> - 33-66%: Create findings.
|
||||
> - 66%: Create resource-finding mapping.
|
||||
>
|
||||
> The last step is required to access the findings details, since the UI needs that to print all the information.
|
||||
|
||||
### Example
|
||||
|
||||
```console
|
||||
~/backend $ poetry run python manage.py findings --tenant
|
||||
fffb1893-3fc7-4623-a5d9-fae47da1c528 --findings 25000 --re
|
||||
sources 1000 --batch 5000 --alias test-script
|
||||
|
||||
Starting data population
|
||||
Tenant: fffb1893-3fc7-4623-a5d9-fae47da1c528
|
||||
Alias: test-script
|
||||
Resources: 1000
|
||||
Findings: 25000
|
||||
Batch size: 5000
|
||||
|
||||
|
||||
Creating resources...
|
||||
100%|███████████████████████| 1/1 [00:00<00:00, 7.72it/s]
|
||||
Resources created successfully.
|
||||
|
||||
|
||||
Creating findings...
|
||||
100%|███████████████████████| 5/5 [00:05<00:00, 1.09s/it]
|
||||
Findings created successfully.
|
||||
|
||||
|
||||
Creating resource-finding mappings...
|
||||
100%|███████████████████████| 5/5 [00:02<00:00, 1.81it/s]
|
||||
Resource-finding mappings created successfully.
|
||||
|
||||
|
||||
Successfully populated test data.
|
||||
```
|
||||
|
||||
@@ -28,7 +28,7 @@ start_prod_server() {
|
||||
|
||||
start_worker() {
|
||||
echo "Starting the worker..."
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans -E
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans,scan-reports,deletion -E --max-tasks-per-child 1
|
||||
}
|
||||
|
||||
start_worker_beat() {
|
||||
|
||||
1563
api/poetry.lock
generated
1563
api/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -8,11 +8,12 @@ description = "Prowler's API (Django/DRF)"
|
||||
license = "Apache-2.0"
|
||||
name = "prowler-api"
|
||||
package-mode = false
|
||||
version = "1.1.0"
|
||||
version = "1.5.0"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
celery = {extras = ["pytest"], version = "^5.4.0"}
|
||||
django = "5.1.4"
|
||||
dj-rest-auth = {extras = ["with_social", "jwt"], version = "7.0.1"}
|
||||
django = "5.1.5"
|
||||
django-celery-beat = "^2.7.0"
|
||||
django-celery-results = "^2.5.1"
|
||||
django-cors-headers = "4.4.0"
|
||||
@@ -27,16 +28,18 @@ drf-nested-routers = "^0.94.1"
|
||||
drf-spectacular = "0.27.2"
|
||||
drf-spectacular-jsonapi = "0.5.1"
|
||||
gunicorn = "23.0.0"
|
||||
prowler = "^5.0"
|
||||
prowler = {git = "https://github.com/prowler-cloud/prowler.git", branch = "v5.4"}
|
||||
psycopg2-binary = "2.9.9"
|
||||
pytest-celery = {extras = ["redis"], version = "^1.0.1"}
|
||||
# Needed for prowler compatibility
|
||||
python = ">=3.11,<3.13"
|
||||
sentry-sdk = {extras = ["django"], version = "^2.20.0"}
|
||||
uuid6 = "2024.7.10"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
bandit = "1.7.9"
|
||||
coverage = "7.5.4"
|
||||
django-silk = "5.3.2"
|
||||
docker = "7.1.0"
|
||||
freezegun = "1.5.1"
|
||||
mypy = "1.10.1"
|
||||
@@ -49,6 +52,7 @@ pytest-randomly = "3.15.0"
|
||||
pytest-xdist = "3.6.1"
|
||||
ruff = "0.5.0"
|
||||
safety = "3.2.9"
|
||||
tqdm = "4.67.1"
|
||||
vulture = "2.14"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
|
||||
57
api/src/backend/api/adapters.py
Normal file
57
api/src/backend/api/adapters.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
|
||||
from django.db import transaction
|
||||
|
||||
from api.db_router import MainRouter
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Membership, Role, Tenant, User, UserRoleRelationship
|
||||
|
||||
|
||||
class ProwlerSocialAccountAdapter(DefaultSocialAccountAdapter):
|
||||
@staticmethod
|
||||
def get_user_by_email(email: str):
|
||||
try:
|
||||
return User.objects.get(email=email)
|
||||
except User.DoesNotExist:
|
||||
return None
|
||||
|
||||
def pre_social_login(self, request, sociallogin):
|
||||
# Link existing accounts with the same email address
|
||||
email = sociallogin.account.extra_data.get("email")
|
||||
if email:
|
||||
existing_user = self.get_user_by_email(email)
|
||||
if existing_user:
|
||||
sociallogin.connect(request, existing_user)
|
||||
|
||||
def save_user(self, request, sociallogin, form=None):
|
||||
"""
|
||||
Called after the user data is fully populated from the provider
|
||||
and is about to be saved to the DB for the first time.
|
||||
"""
|
||||
with transaction.atomic(using=MainRouter.admin_db):
|
||||
user = super().save_user(request, sociallogin, form)
|
||||
user.save(using=MainRouter.admin_db)
|
||||
|
||||
tenant = Tenant.objects.using(MainRouter.admin_db).create(
|
||||
name=f"{user.email.split('@')[0]} default tenant"
|
||||
)
|
||||
with rls_transaction(str(tenant.id)):
|
||||
Membership.objects.using(MainRouter.admin_db).create(
|
||||
user=user, tenant=tenant, role=Membership.RoleChoices.OWNER
|
||||
)
|
||||
role = Role.objects.using(MainRouter.admin_db).create(
|
||||
name="admin",
|
||||
tenant_id=tenant.id,
|
||||
manage_users=True,
|
||||
manage_account=True,
|
||||
manage_billing=True,
|
||||
manage_providers=True,
|
||||
manage_integrations=True,
|
||||
manage_scans=True,
|
||||
unlimited_visibility=True,
|
||||
)
|
||||
UserRoleRelationship.objects.using(MainRouter.admin_db).create(
|
||||
user=user,
|
||||
role=role,
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
return user
|
||||
@@ -1,18 +1,29 @@
|
||||
ALLOWED_APPS = ("django", "socialaccount", "account", "authtoken", "silk")
|
||||
|
||||
|
||||
class MainRouter:
|
||||
default_db = "default"
|
||||
admin_db = "admin"
|
||||
|
||||
def db_for_read(self, model, **hints): # noqa: F841
|
||||
model_table_name = model._meta.db_table
|
||||
if model_table_name.startswith("django_"):
|
||||
if model_table_name.startswith("django_") or any(
|
||||
model_table_name.startswith(f"{app}_") for app in ALLOWED_APPS
|
||||
):
|
||||
return self.admin_db
|
||||
return None
|
||||
|
||||
def db_for_write(self, model, **hints): # noqa: F841
|
||||
model_table_name = model._meta.db_table
|
||||
if model_table_name.startswith("django_"):
|
||||
if any(model_table_name.startswith(f"{app}_") for app in ALLOWED_APPS):
|
||||
return self.admin_db
|
||||
return None
|
||||
|
||||
def allow_migrate(self, db, app_label, model_name=None, **hints): # noqa: F841
|
||||
return db == self.admin_db
|
||||
|
||||
def allow_relation(self, obj1, obj2, **hints): # noqa: F841
|
||||
# Allow relations if both objects are in either "default" or "admin" db connectors
|
||||
if {obj1._state.db, obj2._state.db} <= {self.default_db, self.admin_db}:
|
||||
return True
|
||||
return None
|
||||
|
||||
@@ -7,7 +7,7 @@ from rest_framework_json_api.serializers import ValidationError
|
||||
from api.db_utils import POSTGRES_TENANT_VAR, SET_CONFIG_QUERY
|
||||
|
||||
|
||||
def set_tenant(func):
|
||||
def set_tenant(func=None, *, keep_tenant=False):
|
||||
"""
|
||||
Decorator to set the tenant context for a Celery task based on the provided tenant_id.
|
||||
|
||||
@@ -40,20 +40,29 @@ def set_tenant(func):
|
||||
# The tenant context will be set before the task logic executes.
|
||||
"""
|
||||
|
||||
@wraps(func)
|
||||
@transaction.atomic
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
tenant_id = kwargs.pop("tenant_id")
|
||||
except KeyError:
|
||||
raise KeyError("This task requires the tenant_id")
|
||||
try:
|
||||
uuid.UUID(tenant_id)
|
||||
except ValueError:
|
||||
raise ValidationError("Tenant ID must be a valid UUID")
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(SET_CONFIG_QUERY, [POSTGRES_TENANT_VAR, tenant_id])
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
@transaction.atomic
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
if not keep_tenant:
|
||||
tenant_id = kwargs.pop("tenant_id")
|
||||
else:
|
||||
tenant_id = kwargs["tenant_id"]
|
||||
except KeyError:
|
||||
raise KeyError("This task requires the tenant_id")
|
||||
try:
|
||||
uuid.UUID(tenant_id)
|
||||
except ValueError:
|
||||
raise ValidationError("Tenant ID must be a valid UUID")
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(SET_CONFIG_QUERY, [POSTGRES_TENANT_VAR, tenant_id])
|
||||
|
||||
return func(*args, **kwargs)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
return wrapper
|
||||
|
||||
if func is None:
|
||||
return decorator
|
||||
else:
|
||||
return decorator(func)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from datetime import date, datetime, timezone
|
||||
from datetime import date, datetime, timedelta, timezone
|
||||
|
||||
from django.conf import settings
|
||||
from django.db.models import Q
|
||||
@@ -319,13 +319,41 @@ class FindingFilter(FilterSet):
|
||||
field_name="resources__type", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# resource_tag_key = CharFilter(field_name="resources__tags__key")
|
||||
# resource_tag_key__in = CharInFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_key__icontains = CharFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tag_value = CharFilter(field_name="resources__tags__value")
|
||||
# resource_tag_value__in = CharInFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_value__icontains = CharFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tags = CharInFilter(
|
||||
# method="filter_resource_tag",
|
||||
# lookup_expr="in",
|
||||
# help_text="Filter by resource tags `key:value` pairs.\nMultiple values may be "
|
||||
# "separated by commas.",
|
||||
# )
|
||||
|
||||
scan = UUIDFilter(method="filter_scan_id")
|
||||
scan__in = UUIDInFilter(method="filter_scan_id_in")
|
||||
|
||||
inserted_at = DateFilter(method="filter_inserted_at", lookup_expr="date")
|
||||
inserted_at__date = DateFilter(method="filter_inserted_at", lookup_expr="date")
|
||||
inserted_at__gte = DateFilter(method="filter_inserted_at_gte")
|
||||
inserted_at__lte = DateFilter(method="filter_inserted_at_lte")
|
||||
inserted_at__gte = DateFilter(
|
||||
method="filter_inserted_at_gte",
|
||||
help_text=f"Maximum date range is {settings.FINDINGS_MAX_DAYS_IN_RANGE} days.",
|
||||
)
|
||||
inserted_at__lte = DateFilter(
|
||||
method="filter_inserted_at_lte",
|
||||
help_text=f"Maximum date range is {settings.FINDINGS_MAX_DAYS_IN_RANGE} days.",
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Finding
|
||||
@@ -353,6 +381,52 @@ class FindingFilter(FilterSet):
|
||||
},
|
||||
}
|
||||
|
||||
def filter_queryset(self, queryset):
|
||||
if not (self.data.get("scan") or self.data.get("scan__in")) and not (
|
||||
self.data.get("inserted_at")
|
||||
or self.data.get("inserted_at__date")
|
||||
or self.data.get("inserted_at__gte")
|
||||
or self.data.get("inserted_at__lte")
|
||||
):
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
"detail": "At least one date filter is required: filter[inserted_at], filter[inserted_at.gte], "
|
||||
"or filter[inserted_at.lte].",
|
||||
"status": 400,
|
||||
"source": {"pointer": "/data/attributes/inserted_at"},
|
||||
"code": "required",
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
gte_date = (
|
||||
datetime.strptime(self.data.get("inserted_at__gte"), "%Y-%m-%d").date()
|
||||
if self.data.get("inserted_at__gte")
|
||||
else datetime.now(timezone.utc).date()
|
||||
)
|
||||
lte_date = (
|
||||
datetime.strptime(self.data.get("inserted_at__lte"), "%Y-%m-%d").date()
|
||||
if self.data.get("inserted_at__lte")
|
||||
else datetime.now(timezone.utc).date()
|
||||
)
|
||||
|
||||
if abs(lte_date - gte_date) > timedelta(
|
||||
days=settings.FINDINGS_MAX_DAYS_IN_RANGE
|
||||
):
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
"detail": f"The date range cannot exceed {settings.FINDINGS_MAX_DAYS_IN_RANGE} days.",
|
||||
"status": 400,
|
||||
"source": {"pointer": "/data/attributes/inserted_at"},
|
||||
"code": "invalid",
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
return super().filter_queryset(queryset)
|
||||
|
||||
# Convert filter values to UUIDv7 values for use with partitioning
|
||||
def filter_scan_id(self, queryset, name, value):
|
||||
try:
|
||||
@@ -373,9 +447,7 @@ class FindingFilter(FilterSet):
|
||||
)
|
||||
|
||||
return (
|
||||
queryset.filter(id__gte=start)
|
||||
.filter(id__lt=end)
|
||||
.filter(scan__id=value_uuid)
|
||||
queryset.filter(id__gte=start).filter(id__lt=end).filter(scan_id=value_uuid)
|
||||
)
|
||||
|
||||
def filter_scan_id_in(self, queryset, name, value):
|
||||
@@ -400,31 +472,42 @@ class FindingFilter(FilterSet):
|
||||
]
|
||||
)
|
||||
if start == end:
|
||||
return queryset.filter(id__gte=start).filter(scan__id__in=uuid_list)
|
||||
return queryset.filter(id__gte=start).filter(scan_id__in=uuid_list)
|
||||
else:
|
||||
return (
|
||||
queryset.filter(id__gte=start)
|
||||
.filter(id__lt=end)
|
||||
.filter(scan__id__in=uuid_list)
|
||||
.filter(scan_id__in=uuid_list)
|
||||
)
|
||||
|
||||
def filter_inserted_at(self, queryset, name, value):
|
||||
value = self.maybe_date_to_datetime(value)
|
||||
start = uuid7_start(datetime_to_uuid7(value))
|
||||
datetime_value = self.maybe_date_to_datetime(value)
|
||||
start = uuid7_start(datetime_to_uuid7(datetime_value))
|
||||
end = uuid7_start(datetime_to_uuid7(datetime_value + timedelta(days=1)))
|
||||
|
||||
return queryset.filter(id__gte=start).filter(inserted_at__date=value)
|
||||
return queryset.filter(id__gte=start, id__lt=end)
|
||||
|
||||
def filter_inserted_at_gte(self, queryset, name, value):
|
||||
value = self.maybe_date_to_datetime(value)
|
||||
start = uuid7_start(datetime_to_uuid7(value))
|
||||
datetime_value = self.maybe_date_to_datetime(value)
|
||||
start = uuid7_start(datetime_to_uuid7(datetime_value))
|
||||
|
||||
return queryset.filter(id__gte=start).filter(inserted_at__gte=value)
|
||||
return queryset.filter(id__gte=start)
|
||||
|
||||
def filter_inserted_at_lte(self, queryset, name, value):
|
||||
value = self.maybe_date_to_datetime(value)
|
||||
end = uuid7_start(datetime_to_uuid7(value))
|
||||
datetime_value = self.maybe_date_to_datetime(value)
|
||||
end = uuid7_start(datetime_to_uuid7(datetime_value + timedelta(days=1)))
|
||||
|
||||
return queryset.filter(id__lte=end).filter(inserted_at__lte=value)
|
||||
return queryset.filter(id__lt=end)
|
||||
|
||||
def filter_resource_tag(self, queryset, name, value):
|
||||
overall_query = Q()
|
||||
for key_value_pair in value:
|
||||
tag_key, tag_value = key_value_pair.split(":", 1)
|
||||
overall_query |= Q(
|
||||
resources__tags__key__icontains=tag_key,
|
||||
resources__tags__value__icontains=tag_value,
|
||||
)
|
||||
return queryset.filter(overall_query).distinct()
|
||||
|
||||
@staticmethod
|
||||
def maybe_date_to_datetime(value):
|
||||
|
||||
@@ -122,6 +122,22 @@
|
||||
"scanner_args": {}
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.provider",
|
||||
"pk": "7791914f-d646-4fe2-b2ed-73f2c6499a36",
|
||||
"fields": {
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:45:26.352Z",
|
||||
"updated_at": "2024-10-18T11:16:23.533Z",
|
||||
"provider": "kubernetes",
|
||||
"uid": "gke_lucky-coast-419309_us-central1_autopilot-cluster-2",
|
||||
"alias": "k8s_testing_2",
|
||||
"connected": true,
|
||||
"connection_last_checked_at": "2024-10-18T11:16:23.503Z",
|
||||
"metadata": {},
|
||||
"scanner_args": {}
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.providersecret",
|
||||
"pk": "11491b47-75ae-4f71-ad8d-3e630a72182e",
|
||||
|
||||
@@ -11,9 +11,7 @@
|
||||
"unique_resource_count": 1,
|
||||
"duration": 5,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled"
|
||||
]
|
||||
"checks_to_execute": ["accessanalyzer_enabled"]
|
||||
},
|
||||
"inserted_at": "2024-09-01T17:25:27.050Z",
|
||||
"started_at": "2024-09-01T17:25:27.050Z",
|
||||
@@ -33,9 +31,7 @@
|
||||
"unique_resource_count": 1,
|
||||
"duration": 20,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled"
|
||||
]
|
||||
"checks_to_execute": ["accessanalyzer_enabled"]
|
||||
},
|
||||
"inserted_at": "2024-09-02T17:24:27.050Z",
|
||||
"started_at": "2024-09-02T17:24:27.050Z",
|
||||
@@ -55,9 +51,7 @@
|
||||
"unique_resource_count": 10,
|
||||
"duration": 10,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"cloudsql_instance_automated_backups"
|
||||
]
|
||||
"checks_to_execute": ["cloudsql_instance_automated_backups"]
|
||||
},
|
||||
"inserted_at": "2024-09-02T19:26:27.050Z",
|
||||
"started_at": "2024-09-02T19:26:27.050Z",
|
||||
@@ -77,9 +71,7 @@
|
||||
"unique_resource_count": 1,
|
||||
"duration": 35,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled"
|
||||
]
|
||||
"checks_to_execute": ["accessanalyzer_enabled"]
|
||||
},
|
||||
"inserted_at": "2024-09-02T19:27:27.050Z",
|
||||
"started_at": "2024-09-02T19:27:27.050Z",
|
||||
@@ -97,9 +89,7 @@
|
||||
"name": "test scheduled aws scan",
|
||||
"state": "available",
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"cloudformation_stack_outputs_find_secrets"
|
||||
]
|
||||
"checks_to_execute": ["cloudformation_stack_outputs_find_secrets"]
|
||||
},
|
||||
"scheduled_at": "2030-09-02T19:20:27.050Z",
|
||||
"inserted_at": "2024-09-02T19:24:27.050Z",
|
||||
@@ -178,9 +168,7 @@
|
||||
"unique_resource_count": 19,
|
||||
"progress": 100,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled"
|
||||
]
|
||||
"checks_to_execute": ["accessanalyzer_enabled"]
|
||||
},
|
||||
"duration": 7,
|
||||
"scheduled_at": null,
|
||||
@@ -190,6 +178,56 @@
|
||||
"completed_at": "2024-10-18T10:46:05.127Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.scan",
|
||||
"pk": "6dd8925f-a52d-48de-a546-d2d90db30ab1",
|
||||
"fields": {
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"name": "real scan azure",
|
||||
"provider": "1b59e032-3eb6-4694-93a5-df84cd9b3ce2",
|
||||
"trigger": "manual",
|
||||
"state": "completed",
|
||||
"unique_resource_count": 20,
|
||||
"progress": 100,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled",
|
||||
"account_security_contact_information_is_registered"
|
||||
]
|
||||
},
|
||||
"duration": 4,
|
||||
"scheduled_at": null,
|
||||
"inserted_at": "2024-10-18T11:16:21.358Z",
|
||||
"updated_at": "2024-10-18T11:16:26.060Z",
|
||||
"started_at": "2024-10-18T11:16:21.593Z",
|
||||
"completed_at": "2024-10-18T11:16:26.060Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.scan",
|
||||
"pk": "4ca7ce89-3236-41a8-a369-8937bc152af5",
|
||||
"fields": {
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"name": "real scan k8s",
|
||||
"provider": "7791914f-d646-4fe2-b2ed-73f2c6499a36",
|
||||
"trigger": "manual",
|
||||
"state": "completed",
|
||||
"unique_resource_count": 20,
|
||||
"progress": 100,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled",
|
||||
"account_security_contact_information_is_registered"
|
||||
]
|
||||
},
|
||||
"duration": 4,
|
||||
"scheduled_at": null,
|
||||
"inserted_at": "2024-10-18T11:16:21.358Z",
|
||||
"updated_at": "2024-10-18T11:16:26.060Z",
|
||||
"started_at": "2024-10-18T11:16:21.593Z",
|
||||
"completed_at": "2024-10-18T11:16:26.060Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.scan",
|
||||
"pk": "01929f57-c0ee-7553-be0b-cbde006fb6f7",
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.823Z",
|
||||
"updated_at": "2024-10-18T10:46:04.841Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.823Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -61,6 +62,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.855Z",
|
||||
"updated_at": "2024-10-18T10:46:04.858Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.855Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -116,6 +118,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.869Z",
|
||||
"updated_at": "2024-10-18T10:46:04.876Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.869Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -171,6 +174,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.888Z",
|
||||
"updated_at": "2024-10-18T10:46:04.892Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.888Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -226,6 +230,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.901Z",
|
||||
"updated_at": "2024-10-18T10:46:04.905Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.901Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -281,6 +286,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.915Z",
|
||||
"updated_at": "2024-10-18T10:46:04.919Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.915Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -336,6 +342,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.929Z",
|
||||
"updated_at": "2024-10-18T10:46:04.934Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.929Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -391,6 +398,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.944Z",
|
||||
"updated_at": "2024-10-18T10:46:04.947Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.944Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -446,6 +454,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.957Z",
|
||||
"updated_at": "2024-10-18T10:46:04.962Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.957Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
|
||||
"delta": "new",
|
||||
"status": "PASS",
|
||||
@@ -501,6 +510,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.971Z",
|
||||
"updated_at": "2024-10-18T10:46:04.975Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.971Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -556,6 +566,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.984Z",
|
||||
"updated_at": "2024-10-18T10:46:04.989Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.984Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -611,6 +622,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.999Z",
|
||||
"updated_at": "2024-10-18T10:46:05.003Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.999Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -666,6 +678,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.013Z",
|
||||
"updated_at": "2024-10-18T10:46:05.018Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.013Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -721,6 +734,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.029Z",
|
||||
"updated_at": "2024-10-18T10:46:05.033Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.029Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -776,6 +790,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.045Z",
|
||||
"updated_at": "2024-10-18T10:46:05.050Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.045Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -831,6 +846,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.061Z",
|
||||
"updated_at": "2024-10-18T10:46:05.065Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.061Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -886,6 +902,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.080Z",
|
||||
"updated_at": "2024-10-18T10:46:05.085Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.080Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -941,6 +958,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.099Z",
|
||||
"updated_at": "2024-10-18T10:46:05.104Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.099Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -996,6 +1014,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.115Z",
|
||||
"updated_at": "2024-10-18T10:46:05.121Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.115Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -1051,6 +1070,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.489Z",
|
||||
"updated_at": "2024-10-18T11:16:24.506Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.823Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1106,6 +1126,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.518Z",
|
||||
"updated_at": "2024-10-18T11:16:24.521Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.855Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1161,6 +1182,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.526Z",
|
||||
"updated_at": "2024-10-18T11:16:24.529Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.869Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1216,6 +1238,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.535Z",
|
||||
"updated_at": "2024-10-18T11:16:24.538Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.888Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1271,6 +1294,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.544Z",
|
||||
"updated_at": "2024-10-18T11:16:24.546Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.901Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1326,6 +1350,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.551Z",
|
||||
"updated_at": "2024-10-18T11:16:24.554Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.915Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1381,6 +1406,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.560Z",
|
||||
"updated_at": "2024-10-18T11:16:24.562Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.929Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1436,6 +1462,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.567Z",
|
||||
"updated_at": "2024-10-18T11:16:24.569Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.944Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1491,6 +1518,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.573Z",
|
||||
"updated_at": "2024-10-18T11:16:24.575Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.957Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
|
||||
"delta": null,
|
||||
"status": "PASS",
|
||||
@@ -1546,6 +1574,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.580Z",
|
||||
"updated_at": "2024-10-18T11:16:24.582Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.971Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1601,6 +1630,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.587Z",
|
||||
"updated_at": "2024-10-18T11:16:24.589Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.984Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1656,6 +1686,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.595Z",
|
||||
"updated_at": "2024-10-18T11:16:24.597Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.999Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1711,6 +1742,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.602Z",
|
||||
"updated_at": "2024-10-18T11:16:24.604Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.013Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1766,6 +1798,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.610Z",
|
||||
"updated_at": "2024-10-18T11:16:24.612Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.029Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1821,6 +1854,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.617Z",
|
||||
"updated_at": "2024-10-18T11:16:24.620Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.045Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1876,6 +1910,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.625Z",
|
||||
"updated_at": "2024-10-18T11:16:24.627Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.061Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1931,6 +1966,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.632Z",
|
||||
"updated_at": "2024-10-18T11:16:24.634Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.080Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1986,6 +2022,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.639Z",
|
||||
"updated_at": "2024-10-18T11:16:24.642Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.099Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -2041,6 +2078,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.646Z",
|
||||
"updated_at": "2024-10-18T11:16:24.648Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.115Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -2096,6 +2134,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:26.033Z",
|
||||
"updated_at": "2024-10-18T11:16:26.045Z",
|
||||
"first_seen_at": "2024-10-18T11:16:26.033Z",
|
||||
"uid": "prowler-aws-account_security_contact_information_is_registered-112233445566-us-east-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "MANUAL",
|
||||
|
||||
237
api/src/backend/api/management/commands/findings.py
Normal file
237
api/src/backend/api/management/commands/findings.py
Normal file
@@ -0,0 +1,237 @@
|
||||
import random
|
||||
from datetime import datetime, timezone
|
||||
from math import ceil
|
||||
from uuid import uuid4
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from tqdm import tqdm
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import (
|
||||
Finding,
|
||||
Provider,
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
Scan,
|
||||
StatusChoices,
|
||||
)
|
||||
from prowler.lib.check.models import CheckMetadata
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "Populates the database with test data for performance testing."
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"--tenant",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Tenant id for which the data will be populated.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--resources",
|
||||
type=int,
|
||||
required=True,
|
||||
help="The number of resources to create.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--findings",
|
||||
type=int,
|
||||
required=True,
|
||||
help="The number of findings to create.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--batch", type=int, required=True, help="The batch size for bulk creation."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--alias",
|
||||
type=str,
|
||||
required=False,
|
||||
help="Optional alias for the provider and scan",
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
tenant_id = options["tenant"]
|
||||
num_resources = options["resources"]
|
||||
num_findings = options["findings"]
|
||||
batch_size = options["batch"]
|
||||
alias = options["alias"] or "Testing"
|
||||
uid_token = str(uuid4())
|
||||
|
||||
self.stdout.write(self.style.NOTICE("Starting data population"))
|
||||
self.stdout.write(self.style.NOTICE(f"\tTenant: {tenant_id}"))
|
||||
self.stdout.write(self.style.NOTICE(f"\tAlias: {alias}"))
|
||||
self.stdout.write(self.style.NOTICE(f"\tResources: {num_resources}"))
|
||||
self.stdout.write(self.style.NOTICE(f"\tFindings: {num_findings}"))
|
||||
self.stdout.write(self.style.NOTICE(f"\tBatch size: {batch_size}\n\n"))
|
||||
|
||||
# Resource metadata
|
||||
possible_regions = [
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
"sa-east-1",
|
||||
]
|
||||
possible_services = []
|
||||
possible_types = []
|
||||
|
||||
bulk_check_metadata = CheckMetadata.get_bulk(provider="aws")
|
||||
for check_metadata in bulk_check_metadata.values():
|
||||
if check_metadata.ServiceName not in possible_services:
|
||||
possible_services.append(check_metadata.ServiceName)
|
||||
if (
|
||||
check_metadata.ResourceType
|
||||
and check_metadata.ResourceType not in possible_types
|
||||
):
|
||||
possible_types.append(check_metadata.ResourceType)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
provider, _ = Provider.all_objects.get_or_create(
|
||||
tenant_id=tenant_id,
|
||||
provider="aws",
|
||||
connected=True,
|
||||
uid=str(random.randint(100000000000, 999999999999)),
|
||||
defaults={
|
||||
"alias": alias,
|
||||
},
|
||||
)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
scan = Scan.all_objects.create(
|
||||
tenant_id=tenant_id,
|
||||
provider=provider,
|
||||
name=alias,
|
||||
trigger="manual",
|
||||
state="executing",
|
||||
progress=0,
|
||||
started_at=datetime.now(timezone.utc),
|
||||
)
|
||||
scan_state = "completed"
|
||||
|
||||
try:
|
||||
# Create resources
|
||||
resources = []
|
||||
|
||||
for i in range(num_resources):
|
||||
resources.append(
|
||||
Resource(
|
||||
tenant_id=tenant_id,
|
||||
provider_id=provider.id,
|
||||
uid=f"testing-{uid_token}-{i}",
|
||||
name=f"Testing {uid_token}-{i}",
|
||||
region=random.choice(possible_regions),
|
||||
service=random.choice(possible_services),
|
||||
type=random.choice(possible_types),
|
||||
)
|
||||
)
|
||||
|
||||
num_batches = ceil(len(resources) / batch_size)
|
||||
self.stdout.write(self.style.WARNING("Creating resources..."))
|
||||
for i in tqdm(range(0, len(resources), batch_size), total=num_batches):
|
||||
with rls_transaction(tenant_id):
|
||||
Resource.all_objects.bulk_create(resources[i : i + batch_size])
|
||||
self.stdout.write(self.style.SUCCESS("Resources created successfully.\n\n"))
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
scan.progress = 33
|
||||
scan.save()
|
||||
|
||||
# Create Findings
|
||||
findings = []
|
||||
possible_deltas = ["new", "changed", None]
|
||||
possible_severities = ["critical", "high", "medium", "low"]
|
||||
findings_resources_mapping = []
|
||||
|
||||
for i in range(num_findings):
|
||||
severity = random.choice(possible_severities)
|
||||
check_id = random.randint(1, 1000)
|
||||
assigned_resource_num = random.randint(0, len(resources) - 1)
|
||||
assigned_resource = resources[assigned_resource_num]
|
||||
findings_resources_mapping.append(assigned_resource_num)
|
||||
|
||||
findings.append(
|
||||
Finding(
|
||||
tenant_id=tenant_id,
|
||||
scan=scan,
|
||||
uid=f"testing-{uid_token}-{i}",
|
||||
delta=random.choice(possible_deltas),
|
||||
check_id=f"check-{check_id}",
|
||||
status=random.choice(list(StatusChoices)),
|
||||
severity=severity,
|
||||
impact=severity,
|
||||
raw_result={},
|
||||
check_metadata={
|
||||
"checktitle": f"Test title for check {check_id}",
|
||||
"risk": f"Testing risk {uid_token}-{i}",
|
||||
"provider": "aws",
|
||||
"severity": severity,
|
||||
"categories": ["category1", "category2", "category3"],
|
||||
"description": "This is a random description that should not matter for testing purposes.",
|
||||
"servicename": assigned_resource.service,
|
||||
"resourcetype": assigned_resource.type,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
num_batches = ceil(len(findings) / batch_size)
|
||||
self.stdout.write(self.style.WARNING("Creating findings..."))
|
||||
for i in tqdm(range(0, len(findings), batch_size), total=num_batches):
|
||||
with rls_transaction(tenant_id):
|
||||
Finding.all_objects.bulk_create(findings[i : i + batch_size])
|
||||
self.stdout.write(self.style.SUCCESS("Findings created successfully.\n\n"))
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
scan.progress = 66
|
||||
scan.save()
|
||||
|
||||
# Create ResourceFindingMapping
|
||||
mappings = []
|
||||
for index, f in enumerate(findings):
|
||||
mappings.append(
|
||||
ResourceFindingMapping(
|
||||
tenant_id=tenant_id,
|
||||
resource=resources[findings_resources_mapping[index]],
|
||||
finding=f,
|
||||
)
|
||||
)
|
||||
|
||||
num_batches = ceil(len(mappings) / batch_size)
|
||||
self.stdout.write(
|
||||
self.style.WARNING("Creating resource-finding mappings...")
|
||||
)
|
||||
for i in tqdm(range(0, len(mappings), batch_size), total=num_batches):
|
||||
with rls_transaction(tenant_id):
|
||||
ResourceFindingMapping.objects.bulk_create(
|
||||
mappings[i : i + batch_size]
|
||||
)
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(
|
||||
"Resource-finding mappings created successfully.\n\n"
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
self.stdout.write(self.style.ERROR(f"Failed to populate test data: {e}"))
|
||||
scan_state = "failed"
|
||||
finally:
|
||||
scan.completed_at = datetime.now(timezone.utc)
|
||||
scan.duration = int(
|
||||
(datetime.now(timezone.utc) - scan.started_at).total_seconds()
|
||||
)
|
||||
scan.progress = 100
|
||||
scan.state = scan_state
|
||||
scan.unique_resource_count = num_resources
|
||||
with rls_transaction(tenant_id):
|
||||
scan.save()
|
||||
|
||||
self.stdout.write(self.style.NOTICE("Successfully populated test data."))
|
||||
15
api/src/backend/api/migrations/0006_findings_first_seen.py
Normal file
15
api/src/backend/api/migrations/0006_findings_first_seen.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0005_rbac_missing_admin_roles"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="finding",
|
||||
name="first_seen_at",
|
||||
field=models.DateTimeField(editable=False, null=True),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,25 @@
|
||||
# Generated by Django 5.1.5 on 2025-01-28 15:03
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0006_findings_first_seen"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddIndex(
|
||||
model_name="scan",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "provider_id", "state", "inserted_at"],
|
||||
name="scans_prov_state_insert_idx",
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="scansummary",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id"], name="scan_summaries_tenant_scan_idx"
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,64 @@
|
||||
import json
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Scan, StateChoices
|
||||
|
||||
|
||||
def migrate_daily_scheduled_scan_tasks(apps, schema_editor):
|
||||
for daily_scheduled_scan_task in PeriodicTask.objects.filter(
|
||||
task="scan-perform-scheduled"
|
||||
):
|
||||
task_kwargs = json.loads(daily_scheduled_scan_task.kwargs)
|
||||
tenant_id = task_kwargs["tenant_id"]
|
||||
provider_id = task_kwargs["provider_id"]
|
||||
|
||||
current_time = datetime.now(timezone.utc)
|
||||
scheduled_time_today = datetime.combine(
|
||||
current_time.date(),
|
||||
daily_scheduled_scan_task.start_time.time(),
|
||||
tzinfo=timezone.utc,
|
||||
)
|
||||
|
||||
if current_time < scheduled_time_today:
|
||||
next_scan_date = scheduled_time_today
|
||||
else:
|
||||
next_scan_date = scheduled_time_today + timedelta(days=1)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
state=StateChoices.SCHEDULED,
|
||||
scheduled_at=next_scan_date,
|
||||
scheduler_task_id=daily_scheduled_scan_task.id,
|
||||
)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0007_scan_and_scan_summaries_indexes"),
|
||||
("django_celery_beat", "0019_alter_periodictasks_options"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="scan",
|
||||
name="scheduler_task",
|
||||
field=models.ForeignKey(
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
to="django_celery_beat.periodictask",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(migrate_daily_scheduled_scan_tasks),
|
||||
]
|
||||
@@ -0,0 +1,22 @@
|
||||
# Generated by Django 5.1.5 on 2025-02-07 09:42
|
||||
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0008_daily_scheduled_tasks_update"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name="provider",
|
||||
name="uid",
|
||||
field=models.CharField(
|
||||
max_length=250,
|
||||
validators=[django.core.validators.MinLengthValidator(3)],
|
||||
verbose_name="Unique identifier for the provider, set by the provider",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,109 @@
|
||||
from functools import partial
|
||||
|
||||
from django.db import connection, migrations
|
||||
|
||||
|
||||
def create_index_on_partitions(
|
||||
apps, schema_editor, parent_table: str, index_name: str, index_details: str
|
||||
):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT inhrelid::regclass::text
|
||||
FROM pg_inherits
|
||||
WHERE inhparent = %s::regclass;
|
||||
""",
|
||||
[parent_table],
|
||||
)
|
||||
partitions = [row[0] for row in cursor.fetchall()]
|
||||
# Iterate over partitions and create index concurrently.
|
||||
# Note: PostgreSQL does not allow CONCURRENTLY inside a transaction,
|
||||
# so we need atomic = False for this migration.
|
||||
for partition in partitions:
|
||||
sql = (
|
||||
f"CREATE INDEX CONCURRENTLY IF NOT EXISTS {partition.replace('.', '_')}_{index_name} ON {partition} "
|
||||
f"{index_details};"
|
||||
)
|
||||
schema_editor.execute(sql)
|
||||
|
||||
|
||||
def drop_index_on_partitions(apps, schema_editor, parent_table: str, index_name: str):
|
||||
with schema_editor.connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT inhrelid::regclass::text
|
||||
FROM pg_inherits
|
||||
WHERE inhparent = %s::regclass;
|
||||
""",
|
||||
[parent_table],
|
||||
)
|
||||
partitions = [row[0] for row in cursor.fetchall()]
|
||||
|
||||
# Iterate over partitions and drop index concurrently.
|
||||
for partition in partitions:
|
||||
partition_index = f"{partition.replace('.', '_')}_{index_name}"
|
||||
sql = f"DROP INDEX CONCURRENTLY IF EXISTS {partition_index};"
|
||||
schema_editor.execute(sql)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0009_increase_provider_uid_maximum_length"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="findings_tenant_and_id_idx",
|
||||
index_details="(tenant_id, id)",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="findings_tenant_and_id_idx",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_tenant_scan_idx",
|
||||
index_details="(tenant_id, scan_id)",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_tenant_scan_idx",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_tenant_scan_id_idx",
|
||||
index_details="(tenant_id, scan_id, id)",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_tenant_scan_id_idx",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_delta_new_idx",
|
||||
index_details="(tenant_id, id) where delta = 'new'",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_delta_new_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,49 @@
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0010_findings_performance_indexes_partitions"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "id"], name="findings_tenant_and_id_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id"], name="find_tenant_scan_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id", "id"], name="find_tenant_scan_id_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=models.Index(
|
||||
condition=models.Q(("delta", "new")),
|
||||
fields=["tenant_id", "id"],
|
||||
name="find_delta_new_idx",
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="resourcetagmapping",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "resource_id"], name="resource_tag_tenant_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="resource",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "service", "region", "type"],
|
||||
name="resource_tenant_metadata_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
15
api/src/backend/api/migrations/0012_scan_report_output.py
Normal file
15
api/src/backend/api/migrations/0012_scan_report_output.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0011_findings_performance_indexes_parent"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="scan",
|
||||
name="output_location",
|
||||
field=models.CharField(blank=True, max_length=200, null=True),
|
||||
),
|
||||
]
|
||||
@@ -11,6 +11,7 @@ from django.core.validators import MinLengthValidator
|
||||
from django.db import models
|
||||
from django.db.models import Q
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from django_celery_results.models import TaskResult
|
||||
from psqlextra.manager import PostgresManager
|
||||
from psqlextra.models import PostgresPartitionedModel
|
||||
@@ -226,13 +227,13 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
@staticmethod
|
||||
def validate_kubernetes_uid(value):
|
||||
if not re.match(
|
||||
r"(^[a-z0-9]([-a-z0-9]{1,61}[a-z0-9])?$)|(^arn:aws(-cn|-us-gov|-iso|-iso-b)?:[a-zA-Z0-9\-]+:([a-z]{2}-[a-z]+-\d{1})?:(\d{12})?:[a-zA-Z0-9\-_\/:\.\*]+(:\d+)?$)",
|
||||
r"^[a-z0-9][A-Za-z0-9_.:\/-]{1,250}$",
|
||||
value,
|
||||
):
|
||||
raise ModelValidationError(
|
||||
detail="The value must either be a valid Kubernetes UID (up to 63 characters, "
|
||||
"starting and ending with a lowercase letter or number, containing only "
|
||||
"lowercase alphanumeric characters and hyphens) or a valid EKS ARN.",
|
||||
"lowercase alphanumeric characters and hyphens) or a valid AWS EKS Cluster ARN, GCP GKE Context Name or Azure AKS Cluster Name.",
|
||||
code="kubernetes-uid",
|
||||
pointer="/data/attributes/uid",
|
||||
)
|
||||
@@ -246,7 +247,7 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
)
|
||||
uid = models.CharField(
|
||||
"Unique identifier for the provider, set by the provider",
|
||||
max_length=63,
|
||||
max_length=250,
|
||||
blank=False,
|
||||
validators=[MinLengthValidator(3)],
|
||||
)
|
||||
@@ -410,6 +411,10 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
started_at = models.DateTimeField(null=True, blank=True)
|
||||
completed_at = models.DateTimeField(null=True, blank=True)
|
||||
next_scan_at = models.DateTimeField(null=True, blank=True)
|
||||
scheduler_task = models.ForeignKey(
|
||||
PeriodicTask, on_delete=models.CASCADE, null=True, blank=True
|
||||
)
|
||||
output_location = models.CharField(blank=True, null=True, max_length=200)
|
||||
# TODO: mutelist foreign key
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
@@ -428,6 +433,10 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
fields=["provider", "state", "trigger", "scheduled_at"],
|
||||
name="scans_prov_state_trig_sche_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id", "state", "inserted_at"],
|
||||
name="scans_prov_state_insert_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -544,6 +553,10 @@ class Resource(RowLevelSecurityProtectedModel):
|
||||
fields=["uid", "region", "service", "name"],
|
||||
name="resource_uid_reg_serv_name_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "service", "region", "type"],
|
||||
name="resource_tenant_metadata_idx",
|
||||
),
|
||||
GinIndex(fields=["text_search"], name="gin_resources_search_idx"),
|
||||
]
|
||||
|
||||
@@ -591,6 +604,12 @@ class ResourceTagMapping(RowLevelSecurityProtectedModel):
|
||||
),
|
||||
]
|
||||
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["tenant_id", "resource_id"], name="resource_tag_tenant_idx"
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
"""
|
||||
@@ -615,6 +634,7 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
id = models.UUIDField(primary_key=True, default=uuid7, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
updated_at = models.DateTimeField(auto_now=True, editable=False)
|
||||
first_seen_at = models.DateTimeField(editable=False, null=True)
|
||||
|
||||
uid = models.CharField(max_length=300)
|
||||
delta = FindingDeltaEnumField(
|
||||
@@ -688,7 +708,17 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
],
|
||||
name="findings_filter_idx",
|
||||
),
|
||||
models.Index(fields=["tenant_id", "id"], name="findings_tenant_and_id_idx"),
|
||||
GinIndex(fields=["text_search"], name="gin_findings_search_idx"),
|
||||
models.Index(fields=["tenant_id", "scan_id"], name="find_tenant_scan_idx"),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "id"], name="find_tenant_scan_id_idx"
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "id"],
|
||||
condition=Q(delta="new"),
|
||||
name="find_delta_new_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -1099,6 +1129,12 @@ class ScanSummary(RowLevelSecurityProtectedModel):
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id"],
|
||||
name="scan_summaries_tenant_scan_idx",
|
||||
)
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "scan-summaries"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,8 @@ from conftest import TEST_PASSWORD, get_api_tokens, get_authorization_header
|
||||
from django.urls import reverse
|
||||
from rest_framework.test import APIClient
|
||||
|
||||
from api.models import Membership, User
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_basic_authentication():
|
||||
@@ -177,3 +179,122 @@ def test_user_me_when_inviting_users(create_test_user, tenants_fixture, roles_fi
|
||||
user2_me = client.get(reverse("user-me"), headers=user2_headers)
|
||||
assert user2_me.status_code == 200
|
||||
assert user2_me.json()["data"]["attributes"]["email"] == user2_email
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestTokenSwitchTenant:
|
||||
def test_switch_tenant_with_valid_token(self, tenants_fixture, providers_fixture):
|
||||
client = APIClient()
|
||||
|
||||
test_user = "test_email@prowler.com"
|
||||
test_password = "test_password"
|
||||
|
||||
# Check that we can create a new user without any kind of authentication
|
||||
user_creation_response = client.post(
|
||||
reverse("user-list"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "users",
|
||||
"attributes": {
|
||||
"name": "test",
|
||||
"email": test_user,
|
||||
"password": test_password,
|
||||
},
|
||||
}
|
||||
},
|
||||
format="vnd.api+json",
|
||||
)
|
||||
assert user_creation_response.status_code == 201
|
||||
|
||||
# Create a new relationship between this user and another tenant
|
||||
tenant_id = tenants_fixture[0].id
|
||||
user_instance = User.objects.get(email=test_user)
|
||||
Membership.objects.create(user=user_instance, tenant_id=tenant_id)
|
||||
|
||||
# Check that using our new user's credentials we can authenticate and get the providers
|
||||
access_token, _ = get_api_tokens(client, test_user, test_password)
|
||||
auth_headers = get_authorization_header(access_token)
|
||||
|
||||
user_me_response = client.get(
|
||||
reverse("user-me"),
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert user_me_response.status_code == 200
|
||||
# Assert this user belongs to two tenants
|
||||
assert (
|
||||
user_me_response.json()["data"]["relationships"]["memberships"]["meta"][
|
||||
"count"
|
||||
]
|
||||
== 2
|
||||
)
|
||||
|
||||
provider_response = client.get(
|
||||
reverse("provider-list"),
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert provider_response.status_code == 200
|
||||
# Empty response since there are no providers in this tenant
|
||||
assert not provider_response.json()["data"]
|
||||
|
||||
switch_tenant_response = client.post(
|
||||
reverse("token-switch"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "tokens-switch-tenant",
|
||||
"attributes": {"tenant_id": tenant_id},
|
||||
}
|
||||
},
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert switch_tenant_response.status_code == 200
|
||||
new_access_token = switch_tenant_response.json()["data"]["attributes"]["access"]
|
||||
new_auth_headers = get_authorization_header(new_access_token)
|
||||
|
||||
provider_response = client.get(
|
||||
reverse("provider-list"),
|
||||
headers=new_auth_headers,
|
||||
)
|
||||
assert provider_response.status_code == 200
|
||||
# Now it must be data because we switched to another tenant with providers
|
||||
assert provider_response.json()["data"]
|
||||
|
||||
def test_switch_tenant_with_invalid_token(self, create_test_user, tenants_fixture):
|
||||
client = APIClient()
|
||||
|
||||
access_token, refresh_token = get_api_tokens(
|
||||
client, create_test_user.email, TEST_PASSWORD
|
||||
)
|
||||
auth_headers = get_authorization_header(access_token)
|
||||
|
||||
invalid_token_response = client.post(
|
||||
reverse("token-switch"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "tokens-switch-tenant",
|
||||
"attributes": {"tenant_id": "invalid_tenant_id"},
|
||||
}
|
||||
},
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert invalid_token_response.status_code == 400
|
||||
assert invalid_token_response.json()["errors"][0]["code"] == "invalid"
|
||||
assert (
|
||||
invalid_token_response.json()["errors"][0]["detail"]
|
||||
== "Must be a valid UUID."
|
||||
)
|
||||
|
||||
invalid_tenant_response = client.post(
|
||||
reverse("token-switch"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "tokens-switch-tenant",
|
||||
"attributes": {"tenant_id": tenants_fixture[-1].id},
|
||||
}
|
||||
},
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert invalid_tenant_response.status_code == 400
|
||||
assert invalid_tenant_response.json()["errors"][0]["code"] == "invalid"
|
||||
assert invalid_tenant_response.json()["errors"][0]["detail"] == (
|
||||
"Tenant does not exist or user is not a " "member."
|
||||
)
|
||||
|
||||
@@ -274,9 +274,10 @@ class TestValidateInvitation:
|
||||
expired_time = datetime.now(timezone.utc) - timedelta(days=1)
|
||||
invitation.expires_at = expired_time
|
||||
|
||||
with patch("api.utils.Invitation.objects.using") as mock_using, patch(
|
||||
"api.utils.datetime"
|
||||
) as mock_datetime:
|
||||
with (
|
||||
patch("api.utils.Invitation.objects.using") as mock_using,
|
||||
patch("api.utils.datetime") as mock_datetime,
|
||||
):
|
||||
mock_db = mock_using.return_value
|
||||
mock_db.get.return_value = invitation
|
||||
mock_datetime.now.return_value = datetime.now(timezone.utc)
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import glob
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import ANY, Mock, patch
|
||||
|
||||
import jwt
|
||||
import pytest
|
||||
from botocore.exceptions import NoCredentialsError
|
||||
from conftest import API_JSON_CONTENT_TYPE, TEST_PASSWORD, TEST_USER
|
||||
from django.conf import settings
|
||||
from django.urls import reverse
|
||||
from rest_framework import status
|
||||
|
||||
@@ -19,6 +24,7 @@ from api.models import (
|
||||
RoleProviderGroupRelationship,
|
||||
Scan,
|
||||
StateChoices,
|
||||
Task,
|
||||
User,
|
||||
UserRoleRelationship,
|
||||
)
|
||||
@@ -27,6 +33,12 @@ from api.rls import Tenant
|
||||
TODAY = str(datetime.today().date())
|
||||
|
||||
|
||||
def today_after_n_days(n_days: int) -> str:
|
||||
return datetime.strftime(
|
||||
datetime.today().date() + timedelta(days=n_days), "%Y-%m-%d"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestUserViewSet:
|
||||
def test_users_list(self, authenticated_client, create_test_user):
|
||||
@@ -261,6 +273,16 @@ class TestUserViewSet:
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
assert not User.objects.filter(id=create_test_user.id).exists()
|
||||
|
||||
def test_users_destroy_other_user(
|
||||
self, authenticated_client, create_test_user, users_fixture
|
||||
):
|
||||
user = users_fixture[2]
|
||||
response = authenticated_client.delete(
|
||||
reverse("user-detail", kwargs={"pk": str(user.id)})
|
||||
)
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert User.objects.filter(id=create_test_user.id).exists()
|
||||
|
||||
def test_users_destroy_invalid_user(self, authenticated_client, create_test_user):
|
||||
another_user = User.objects.create_user(
|
||||
password="otherpassword", email="other@example.com"
|
||||
@@ -268,7 +290,7 @@ class TestUserViewSet:
|
||||
response = authenticated_client.delete(
|
||||
reverse("user-detail", kwargs={"pk": another_user.id})
|
||||
)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert User.objects.filter(id=another_user.id).exists()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -868,6 +890,16 @@ class TestProviderViewSet:
|
||||
"uid": "kubernetes-test-123456789",
|
||||
"alias": "test",
|
||||
},
|
||||
{
|
||||
"provider": "kubernetes",
|
||||
"uid": "arn:aws:eks:us-east-1:111122223333:cluster/test-cluster-long-name-123456789",
|
||||
"alias": "EKS",
|
||||
},
|
||||
{
|
||||
"provider": "kubernetes",
|
||||
"uid": "gke_aaaa-dev_europe-test1_dev-aaaa-test-cluster-long-name-123456789",
|
||||
"alias": "GKE",
|
||||
},
|
||||
{
|
||||
"provider": "azure",
|
||||
"uid": "8851db6b-42e5-4533-aa9e-30a32d67e875",
|
||||
@@ -2052,9 +2084,9 @@ class TestScanViewSet:
|
||||
("started_at.gte", "2024-01-01", 3),
|
||||
("started_at.lte", "2024-01-01", 0),
|
||||
("trigger", Scan.TriggerChoices.MANUAL, 1),
|
||||
("state", StateChoices.AVAILABLE, 2),
|
||||
("state", StateChoices.AVAILABLE, 1),
|
||||
("state", StateChoices.FAILED, 1),
|
||||
("state.in", f"{StateChoices.FAILED},{StateChoices.AVAILABLE}", 3),
|
||||
("state.in", f"{StateChoices.FAILED},{StateChoices.AVAILABLE}", 2),
|
||||
("trigger", Scan.TriggerChoices.MANUAL, 1),
|
||||
]
|
||||
),
|
||||
@@ -2129,6 +2161,159 @@ class TestScanViewSet:
|
||||
response = authenticated_client.get(reverse("scan-list"), {"sort": "invalid"})
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
||||
def test_report_executing(self, authenticated_client, scans_fixture):
|
||||
"""
|
||||
When the scan is still executing (state == EXECUTING), the view should return
|
||||
the task data with HTTP 202 and a Content-Location header.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
scan.state = StateChoices.EXECUTING
|
||||
scan.save()
|
||||
|
||||
task = Task.objects.create(tenant_id=scan.tenant_id)
|
||||
dummy_task_data = {"id": str(task.id), "state": StateChoices.EXECUTING}
|
||||
|
||||
scan.task = task
|
||||
scan.save()
|
||||
|
||||
with patch(
|
||||
"api.v1.views.TaskSerializer",
|
||||
return_value=type("DummySerializer", (), {"data": dummy_task_data}),
|
||||
):
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
assert response.status_code == status.HTTP_202_ACCEPTED
|
||||
assert "Content-Location" in response
|
||||
assert dummy_task_data["id"] in response["Content-Location"]
|
||||
|
||||
def test_report_celery_task_executing(self, authenticated_client, scans_fixture):
|
||||
"""
|
||||
When the scan is not executing but a related celery task exists and is running,
|
||||
the view should return that task data with HTTP 202.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.output_location = "dummy"
|
||||
scan.save()
|
||||
|
||||
dummy_task = Task.objects.create(tenant_id=scan.tenant_id)
|
||||
dummy_task.id = "dummy-task-id"
|
||||
dummy_task_data = {"id": dummy_task.id, "state": StateChoices.EXECUTING}
|
||||
|
||||
with patch("api.v1.views.Task.objects.get", return_value=dummy_task), patch(
|
||||
"api.v1.views.TaskSerializer",
|
||||
return_value=type("DummySerializer", (), {"data": dummy_task_data}),
|
||||
):
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
assert response.status_code == status.HTTP_202_ACCEPTED
|
||||
assert "Content-Location" in response
|
||||
assert dummy_task_data["id"] in response["Content-Location"]
|
||||
|
||||
def test_report_no_output_location(self, authenticated_client, scans_fixture):
|
||||
"""
|
||||
If the scan does not have an output_location, the view should return a 404.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.output_location = ""
|
||||
scan.save()
|
||||
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
assert response.json()["errors"]["detail"] == "The scan has no reports."
|
||||
|
||||
def test_report_s3_no_credentials(
|
||||
self, authenticated_client, scans_fixture, monkeypatch
|
||||
):
|
||||
"""
|
||||
When output_location is an S3 URL and get_s3_client() raises a credentials exception,
|
||||
the view should return HTTP 403 with the proper error message.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
bucket = "test-bucket"
|
||||
key = "report.zip"
|
||||
scan.output_location = f"s3://{bucket}/{key}"
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.save()
|
||||
|
||||
def fake_get_s3_client():
|
||||
raise NoCredentialsError()
|
||||
|
||||
monkeypatch.setattr("api.v1.views.get_s3_client", fake_get_s3_client)
|
||||
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||
assert (
|
||||
response.json()["errors"]["detail"]
|
||||
== "There is a problem with credentials."
|
||||
)
|
||||
|
||||
def test_report_s3_success(self, authenticated_client, scans_fixture, monkeypatch):
|
||||
"""
|
||||
When output_location is an S3 URL and the S3 client returns the file successfully,
|
||||
the view should return the ZIP file with HTTP 200 and proper headers.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
bucket = "test-bucket"
|
||||
key = "report.zip"
|
||||
scan.output_location = f"s3://{bucket}/{key}"
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.save()
|
||||
|
||||
monkeypatch.setattr(
|
||||
"api.v1.views.env", type("env", (), {"str": lambda self, key: bucket})()
|
||||
)
|
||||
|
||||
class FakeS3Client:
|
||||
def get_object(self, Bucket, Key):
|
||||
assert Bucket == bucket
|
||||
assert Key == key
|
||||
return {"Body": io.BytesIO(b"s3 zip content")}
|
||||
|
||||
monkeypatch.setattr("api.v1.views.get_s3_client", lambda: FakeS3Client())
|
||||
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
assert response.status_code == 200
|
||||
expected_filename = os.path.basename("report.zip")
|
||||
content_disposition = response.get("Content-Disposition")
|
||||
assert content_disposition.startswith('attachment; filename="')
|
||||
assert f'filename="{expected_filename}"' in content_disposition
|
||||
assert response.content == b"s3 zip content"
|
||||
|
||||
def test_report_local_file(
|
||||
self, authenticated_client, scans_fixture, tmp_path, monkeypatch
|
||||
):
|
||||
"""
|
||||
When output_location is a local file path, the view should read the file from disk
|
||||
and return it with proper headers.
|
||||
"""
|
||||
scan = scans_fixture[0]
|
||||
file_content = b"local zip file content"
|
||||
file_path = tmp_path / "report.zip"
|
||||
file_path.write_bytes(file_content)
|
||||
|
||||
scan.output_location = str(file_path)
|
||||
scan.state = StateChoices.COMPLETED
|
||||
scan.save()
|
||||
|
||||
monkeypatch.setattr(
|
||||
glob,
|
||||
"glob",
|
||||
lambda pattern: [str(file_path)] if pattern == str(file_path) else [],
|
||||
)
|
||||
|
||||
url = reverse("scan-report", kwargs={"pk": scan.id})
|
||||
response = authenticated_client.get(url)
|
||||
assert response.status_code == 200
|
||||
assert response.content == file_content
|
||||
content_disposition = response.get("Content-Disposition")
|
||||
assert content_disposition.startswith('attachment; filename="')
|
||||
assert f'filename="{file_path.name}"' in content_disposition
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestTaskViewSet:
|
||||
@@ -2369,12 +2554,33 @@ class TestResourceViewSet:
|
||||
@pytest.mark.django_db
|
||||
class TestFindingViewSet:
|
||||
def test_findings_list_none(self, authenticated_client):
|
||||
response = authenticated_client.get(reverse("finding-list"))
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-list"), {"filter[inserted_at]": TODAY}
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert len(response.json()["data"]) == 0
|
||||
|
||||
def test_findings_list(self, authenticated_client, findings_fixture):
|
||||
def test_findings_list_no_date_filter(self, authenticated_client):
|
||||
response = authenticated_client.get(reverse("finding-list"))
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert response.json()["errors"][0]["code"] == "required"
|
||||
|
||||
def test_findings_date_range_too_large(self, authenticated_client):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-list"),
|
||||
{
|
||||
"filter[inserted_at.lte]": today_after_n_days(
|
||||
-(settings.FINDINGS_MAX_DAYS_IN_RANGE + 1)
|
||||
),
|
||||
},
|
||||
)
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert response.json()["errors"][0]["code"] == "invalid"
|
||||
|
||||
def test_findings_list(self, authenticated_client, findings_fixture):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-list"), {"filter[inserted_at]": TODAY}
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert len(response.json()["data"]) == len(findings_fixture)
|
||||
assert (
|
||||
@@ -2387,14 +2593,15 @@ class TestFindingViewSet:
|
||||
[
|
||||
("resources", ["resources"]),
|
||||
("scan", ["scans"]),
|
||||
("resources.provider,scan", ["resources", "scans", "providers"]),
|
||||
("resources,scan.provider", ["resources", "scans", "providers"]),
|
||||
],
|
||||
)
|
||||
def test_findings_list_include(
|
||||
self, include_values, expected_resources, authenticated_client, findings_fixture
|
||||
):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-list"), {"include": include_values}
|
||||
reverse("finding-list"),
|
||||
{"include": include_values, "filter[inserted_at]": TODAY},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert len(response.json()["data"]) == len(findings_fixture)
|
||||
@@ -2429,21 +2636,31 @@ class TestFindingViewSet:
|
||||
("service.icontains", "ec", 1),
|
||||
("inserted_at", "2024-01-01", 0),
|
||||
("inserted_at.date", "2024-01-01", 0),
|
||||
("inserted_at.gte", "2024-01-01", 2),
|
||||
("inserted_at.gte", today_after_n_days(-1), 2),
|
||||
(
|
||||
"inserted_at.lte",
|
||||
"2028-12-31",
|
||||
today_after_n_days(1),
|
||||
2,
|
||||
), # TODO: To avoid having to modify this value and to ensure that the tests always work, we should set the time before the fixtures are inserted
|
||||
("updated_at.lte", "2024-01-01", 0),
|
||||
),
|
||||
("updated_at.lte", today_after_n_days(-1), 0),
|
||||
("resource_type.icontains", "prowler", 2),
|
||||
# full text search on finding
|
||||
("search", "dev-qa", 1),
|
||||
("search", "orange juice", 1),
|
||||
# full text search on resource
|
||||
("search", "ec2", 2),
|
||||
# full text search on finding tags
|
||||
("search", "value2", 2),
|
||||
# full text search on finding tags (disabled for now)
|
||||
# ("search", "value2", 2),
|
||||
# Temporary disabled until we implement tag filtering in the UI
|
||||
# ("resource_tag_key", "key", 2),
|
||||
# ("resource_tag_key__in", "key,key2", 2),
|
||||
# ("resource_tag_key__icontains", "key", 2),
|
||||
# ("resource_tag_value", "value", 2),
|
||||
# ("resource_tag_value__in", "value,value2", 2),
|
||||
# ("resource_tag_value__icontains", "value", 2),
|
||||
# ("resource_tags", "key:value", 2),
|
||||
# ("resource_tags", "not:exists", 0),
|
||||
# ("resource_tags", "not:exists,key:value", 2),
|
||||
]
|
||||
),
|
||||
)
|
||||
@@ -2455,9 +2672,13 @@ class TestFindingViewSet:
|
||||
filter_value,
|
||||
expected_count,
|
||||
):
|
||||
filters = {f"filter[{filter_name}]": filter_value}
|
||||
if "inserted_at" not in filter_name:
|
||||
filters["filter[inserted_at]"] = TODAY
|
||||
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-list"),
|
||||
{f"filter[{filter_name}]": filter_value},
|
||||
filters,
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
@@ -2466,9 +2687,7 @@ class TestFindingViewSet:
|
||||
def test_finding_filter_by_scan_id(self, authenticated_client, findings_fixture):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-list"),
|
||||
{
|
||||
"filter[scan]": findings_fixture[0].scan.id,
|
||||
},
|
||||
{"filter[scan]": findings_fixture[0].scan.id},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert len(response.json()["data"]) == 2
|
||||
@@ -2491,6 +2710,7 @@ class TestFindingViewSet:
|
||||
reverse("finding-list"),
|
||||
{
|
||||
"filter[provider]": findings_fixture[0].scan.provider.id,
|
||||
"filter[inserted_at]": TODAY,
|
||||
},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
@@ -2505,7 +2725,8 @@ class TestFindingViewSet:
|
||||
"filter[provider.in]": [
|
||||
findings_fixture[0].scan.provider.id,
|
||||
findings_fixture[1].scan.provider.id,
|
||||
]
|
||||
],
|
||||
"filter[inserted_at]": TODAY,
|
||||
},
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
@@ -2539,13 +2760,13 @@ class TestFindingViewSet:
|
||||
)
|
||||
def test_findings_sort(self, authenticated_client, sort_field):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-list"), {"sort": sort_field}
|
||||
reverse("finding-list"), {"sort": sort_field, "filter[inserted_at]": TODAY}
|
||||
)
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
|
||||
def test_findings_sort_invalid(self, authenticated_client):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-list"), {"sort": "invalid"}
|
||||
reverse("finding-list"), {"sort": "invalid", "filter[inserted_at]": TODAY}
|
||||
)
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert response.json()["errors"][0]["code"] == "invalid"
|
||||
@@ -2582,59 +2803,74 @@ class TestFindingViewSet:
|
||||
)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
def test_findings_services_regions_retrieve(
|
||||
self, authenticated_client, findings_fixture
|
||||
):
|
||||
def test_findings_metadata_retrieve(self, authenticated_client, findings_fixture):
|
||||
finding_1, *_ = findings_fixture
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{"filter[inserted_at]": finding_1.updated_at.strftime("%Y-%m-%d")},
|
||||
)
|
||||
data = response.json()
|
||||
|
||||
expected_services = {"ec2", "s3"}
|
||||
expected_regions = {"eu-west-1", "us-east-1"}
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# expected_tags = {"key": ["value"], "key2": ["value2"]}
|
||||
expected_resource_types = {"prowler-test"}
|
||||
|
||||
assert data["data"]["type"] == "finding-dynamic-filters"
|
||||
assert data["data"]["type"] == "findings-metadata"
|
||||
assert data["data"]["id"] is None
|
||||
assert set(data["data"]["attributes"]["services"]) == expected_services
|
||||
assert set(data["data"]["attributes"]["regions"]) == expected_regions
|
||||
assert (
|
||||
set(data["data"]["attributes"]["resource_types"]) == expected_resource_types
|
||||
)
|
||||
# assert data["data"]["attributes"]["tags"] == expected_tags
|
||||
|
||||
def test_findings_services_regions_severity_retrieve(
|
||||
def test_findings_metadata_severity_retrieve(
|
||||
self, authenticated_client, findings_fixture
|
||||
):
|
||||
finding_1, *_ = findings_fixture
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{
|
||||
"filter[severity__in]": ["low", "medium"],
|
||||
"filter[inserted_at]": finding_1.updated_at.strftime("%Y-%m-%d"),
|
||||
"filter[inserted_at]": finding_1.inserted_at.strftime("%Y-%m-%d"),
|
||||
},
|
||||
)
|
||||
data = response.json()
|
||||
|
||||
expected_services = {"s3"}
|
||||
expected_regions = {"eu-west-1"}
|
||||
# Temporary disabled until we implement tag filtering in the UI
|
||||
# expected_tags = {"key": ["value"], "key2": ["value2"]}
|
||||
expected_resource_types = {"prowler-test"}
|
||||
|
||||
assert data["data"]["type"] == "finding-dynamic-filters"
|
||||
assert data["data"]["type"] == "findings-metadata"
|
||||
assert data["data"]["id"] is None
|
||||
assert set(data["data"]["attributes"]["services"]) == expected_services
|
||||
assert set(data["data"]["attributes"]["regions"]) == expected_regions
|
||||
assert (
|
||||
set(data["data"]["attributes"]["resource_types"]) == expected_resource_types
|
||||
)
|
||||
# assert data["data"]["attributes"]["tags"] == expected_tags
|
||||
|
||||
def test_findings_services_regions_future_date(self, authenticated_client):
|
||||
def test_findings_metadata_future_date(self, authenticated_client):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{"filter[inserted_at]": "2048-01-01"},
|
||||
)
|
||||
data = response.json()
|
||||
assert data["data"]["type"] == "finding-dynamic-filters"
|
||||
assert data["data"]["type"] == "findings-metadata"
|
||||
assert data["data"]["id"] is None
|
||||
assert data["data"]["attributes"]["services"] == []
|
||||
assert data["data"]["attributes"]["regions"] == []
|
||||
# Temporary disabled until we implement tag filtering in the UI
|
||||
# assert data["data"]["attributes"]["tags"] == {}
|
||||
assert data["data"]["attributes"]["resource_types"] == []
|
||||
|
||||
def test_findings_services_regions_invalid_date(self, authenticated_client):
|
||||
def test_findings_metadata_invalid_date(self, authenticated_client):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{"filter[inserted_at]": "2048-01-011"},
|
||||
)
|
||||
assert response.json() == {
|
||||
@@ -4249,18 +4485,15 @@ class TestOverviewViewSet:
|
||||
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
|
||||
|
||||
def test_overview_providers_list(
|
||||
self, authenticated_client, findings_fixture, resources_fixture
|
||||
self, authenticated_client, scan_summaries_fixture, resources_fixture
|
||||
):
|
||||
response = authenticated_client.get(reverse("overview-providers"))
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
# Only findings from one provider
|
||||
assert len(response.json()["data"]) == 1
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["total"] == len(
|
||||
findings_fixture
|
||||
)
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["pass"] == 0
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["fail"] == 2
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["manual"] == 0
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["total"] == 4
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["pass"] == 2
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["fail"] == 1
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["muted"] == 1
|
||||
assert response.json()["data"][0]["attributes"]["resources"]["total"] == len(
|
||||
resources_fixture
|
||||
)
|
||||
|
||||
@@ -1,15 +1,25 @@
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from allauth.socialaccount.providers.oauth2.client import OAuth2Client
|
||||
from rest_framework.exceptions import NotFound, ValidationError
|
||||
|
||||
from api.db_router import MainRouter
|
||||
from api.exceptions import InvitationTokenExpiredException
|
||||
from api.models import Invitation, Provider
|
||||
from prowler.providers.aws.aws_provider import AwsProvider
|
||||
from prowler.providers.azure.azure_provider import AzureProvider
|
||||
from prowler.providers.common.models import Connection
|
||||
from prowler.providers.gcp.gcp_provider import GcpProvider
|
||||
from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
|
||||
from rest_framework.exceptions import ValidationError, NotFound
|
||||
|
||||
from api.db_router import MainRouter
|
||||
from api.exceptions import InvitationTokenExpiredException
|
||||
from api.models import Provider, Invitation
|
||||
|
||||
class CustomOAuth2Client(OAuth2Client):
|
||||
def __init__(self, client_id, secret, *args, **kwargs):
|
||||
# Remove any duplicate "scope_delimiter" from kwargs
|
||||
# Bug present in dj-rest-auth after version v7.0.1
|
||||
# https://github.com/iMerica/dj-rest-auth/issues/673
|
||||
kwargs.pop("scope_delimiter", None)
|
||||
super().__init__(client_id, secret, *args, **kwargs)
|
||||
|
||||
|
||||
def merge_dicts(default_dict: dict, replacement_dict: dict) -> dict:
|
||||
|
||||
@@ -106,7 +106,7 @@ def uuid7_end(uuid_obj: UUID, offset_months: int = 1) -> UUID:
|
||||
|
||||
Args:
|
||||
uuid_obj: A UUIDv7 object.
|
||||
offset_days: Number of months to offset from the given UUID's date. Defaults to 1 to handle if
|
||||
offset_months: Number of months to offset from the given UUID's date. Defaults to 1 to handle if
|
||||
partitions are not being used, if so the value will be the one set at FINDINGS_TABLE_PARTITION_MONTHS.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -38,7 +38,65 @@ from api.rls import Tenant
|
||||
# Tokens
|
||||
|
||||
|
||||
class TokenSerializer(TokenObtainPairSerializer):
|
||||
def generate_tokens(user: User, tenant_id: str) -> dict:
|
||||
try:
|
||||
refresh = RefreshToken.for_user(user)
|
||||
except InvalidKeyError:
|
||||
# Handle invalid key error
|
||||
raise ValidationError(
|
||||
{
|
||||
"detail": "Token generation failed due to invalid key configuration. Provide valid "
|
||||
"DJANGO_TOKEN_SIGNING_KEY and DJANGO_TOKEN_VERIFYING_KEY in the environment."
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValidationError({"detail": str(e)})
|
||||
|
||||
# Post-process the tokens
|
||||
# Set the tenant_id
|
||||
refresh["tenant_id"] = tenant_id
|
||||
|
||||
# Set the nbf (not before) claim to the iat (issued at) claim. At this moment, simplejwt does not provide a
|
||||
# way to set the nbf claim
|
||||
refresh.payload["nbf"] = refresh["iat"]
|
||||
|
||||
# Get the access token
|
||||
access = refresh.access_token
|
||||
|
||||
if settings.SIMPLE_JWT["UPDATE_LAST_LOGIN"]:
|
||||
update_last_login(None, user)
|
||||
|
||||
return {"access": str(access), "refresh": str(refresh)}
|
||||
|
||||
|
||||
class BaseTokenSerializer(TokenObtainPairSerializer):
|
||||
def custom_validate(self, attrs, social: bool = False):
|
||||
email = attrs.get("email")
|
||||
password = attrs.get("password")
|
||||
tenant_id = str(attrs.get("tenant_id", ""))
|
||||
|
||||
# Authenticate user
|
||||
user = (
|
||||
User.objects.get(email=email)
|
||||
if social
|
||||
else authenticate(username=email, password=password)
|
||||
)
|
||||
if user is None:
|
||||
raise ValidationError("Invalid credentials")
|
||||
|
||||
if tenant_id:
|
||||
if not user.is_member_of_tenant(tenant_id):
|
||||
raise ValidationError("Tenant does not exist or user is not a member.")
|
||||
else:
|
||||
first_membership = user.memberships.order_by("date_joined").first()
|
||||
if first_membership is None:
|
||||
raise ValidationError("User has no memberships.")
|
||||
tenant_id = str(first_membership.tenant_id)
|
||||
|
||||
return generate_tokens(user, tenant_id)
|
||||
|
||||
|
||||
class TokenSerializer(BaseTokenSerializer):
|
||||
email = serializers.EmailField(write_only=True)
|
||||
password = serializers.CharField(write_only=True)
|
||||
tenant_id = serializers.UUIDField(
|
||||
@@ -56,53 +114,25 @@ class TokenSerializer(TokenObtainPairSerializer):
|
||||
resource_name = "tokens"
|
||||
|
||||
def validate(self, attrs):
|
||||
email = attrs.get("email")
|
||||
password = attrs.get("password")
|
||||
tenant_id = str(attrs.get("tenant_id", ""))
|
||||
return super().custom_validate(attrs)
|
||||
|
||||
# Authenticate user
|
||||
user = authenticate(username=email, password=password)
|
||||
if user is None:
|
||||
raise ValidationError("Invalid credentials")
|
||||
|
||||
if tenant_id:
|
||||
if not user.is_member_of_tenant(tenant_id):
|
||||
raise ValidationError("Tenant does not exist or user is not a member.")
|
||||
else:
|
||||
first_membership = user.memberships.order_by("date_joined").first()
|
||||
if first_membership is None:
|
||||
raise ValidationError("User has no memberships.")
|
||||
tenant_id = str(first_membership.tenant_id)
|
||||
class TokenSocialLoginSerializer(BaseTokenSerializer):
|
||||
email = serializers.EmailField(write_only=True)
|
||||
|
||||
# Generate tokens
|
||||
try:
|
||||
refresh = RefreshToken.for_user(user)
|
||||
except InvalidKeyError:
|
||||
# Handle invalid key error
|
||||
raise ValidationError(
|
||||
{
|
||||
"detail": "Token generation failed due to invalid key configuration. Provide valid "
|
||||
"DJANGO_TOKEN_SIGNING_KEY and DJANGO_TOKEN_VERIFYING_KEY in the environment."
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValidationError({"detail": str(e)})
|
||||
# Output tokens
|
||||
refresh = serializers.CharField(read_only=True)
|
||||
access = serializers.CharField(read_only=True)
|
||||
|
||||
# Post-process the tokens
|
||||
# Set the tenant_id
|
||||
refresh["tenant_id"] = tenant_id
|
||||
class JSONAPIMeta:
|
||||
resource_name = "tokens"
|
||||
|
||||
# Set the nbf (not before) claim to the iat (issued at) claim. At this moment, simplejwt does not provide a
|
||||
# way to set the nbf claim
|
||||
refresh.payload["nbf"] = refresh["iat"]
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.fields.pop("password", None)
|
||||
|
||||
# Get the access token
|
||||
access = refresh.access_token
|
||||
|
||||
if settings.SIMPLE_JWT["UPDATE_LAST_LOGIN"]:
|
||||
update_last_login(None, user)
|
||||
|
||||
return {"access": str(access), "refresh": str(refresh)}
|
||||
def validate(self, attrs):
|
||||
return super().custom_validate(attrs, social=True)
|
||||
|
||||
|
||||
# TODO: Check if we can change the parent class to TokenRefreshSerializer from rest_framework_simplejwt.serializers
|
||||
@@ -140,6 +170,30 @@ class TokenRefreshSerializer(serializers.Serializer):
|
||||
raise ValidationError({"refresh": "Invalid or expired token"})
|
||||
|
||||
|
||||
class TokenSwitchTenantSerializer(serializers.Serializer):
|
||||
tenant_id = serializers.UUIDField(
|
||||
write_only=True, help_text="The tenant ID for which to request a new token."
|
||||
)
|
||||
access = serializers.CharField(read_only=True)
|
||||
refresh = serializers.CharField(read_only=True)
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "tokens-switch-tenant"
|
||||
|
||||
def validate(self, attrs):
|
||||
request = self.context["request"]
|
||||
user = request.user
|
||||
|
||||
if not user.is_authenticated:
|
||||
raise ValidationError("Invalid or expired token.")
|
||||
|
||||
tenant_id = str(attrs.get("tenant_id"))
|
||||
if not user.is_member_of_tenant(tenant_id):
|
||||
raise ValidationError("Tenant does not exist or user is not a member.")
|
||||
|
||||
return generate_tokens(user, tenant_id)
|
||||
|
||||
|
||||
# Base
|
||||
|
||||
|
||||
@@ -691,6 +745,43 @@ class ProviderSerializer(RLSSerializer):
|
||||
}
|
||||
|
||||
|
||||
class ProviderIncludeSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the Provider model.
|
||||
"""
|
||||
|
||||
provider = ProviderEnumSerializerField()
|
||||
connection = serializers.SerializerMethodField(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = Provider
|
||||
fields = [
|
||||
"id",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"provider",
|
||||
"uid",
|
||||
"alias",
|
||||
"connection",
|
||||
# "scanner_args",
|
||||
]
|
||||
|
||||
@extend_schema_field(
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"connected": {"type": "boolean"},
|
||||
"last_checked_at": {"type": "string", "format": "date-time"},
|
||||
},
|
||||
}
|
||||
)
|
||||
def get_connection(self, obj):
|
||||
return {
|
||||
"connected": obj.connected,
|
||||
"last_checked_at": obj.connection_last_checked_at,
|
||||
}
|
||||
|
||||
|
||||
class ProviderCreateSerializer(RLSSerializer, BaseWriteSerializer):
|
||||
class Meta:
|
||||
model = Provider
|
||||
@@ -753,6 +844,35 @@ class ScanSerializer(RLSSerializer):
|
||||
]
|
||||
|
||||
|
||||
class ScanIncludeSerializer(RLSSerializer):
|
||||
trigger = serializers.ChoiceField(
|
||||
choices=Scan.TriggerChoices.choices, read_only=True
|
||||
)
|
||||
state = StateEnumSerializerField(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = Scan
|
||||
fields = [
|
||||
"id",
|
||||
"name",
|
||||
"trigger",
|
||||
"state",
|
||||
"unique_resource_count",
|
||||
"progress",
|
||||
# "scanner_args",
|
||||
"duration",
|
||||
"inserted_at",
|
||||
"started_at",
|
||||
"completed_at",
|
||||
"scheduled_at",
|
||||
"provider",
|
||||
]
|
||||
|
||||
included_serializers = {
|
||||
"provider": "api.v1.serializers.ProviderIncludeSerializer",
|
||||
}
|
||||
|
||||
|
||||
class ScanCreateSerializer(RLSSerializer, BaseWriteSerializer):
|
||||
class Meta:
|
||||
model = Scan
|
||||
@@ -819,6 +939,14 @@ class ScanTaskSerializer(RLSSerializer):
|
||||
]
|
||||
|
||||
|
||||
class ScanReportSerializer(serializers.Serializer):
|
||||
id = serializers.CharField(source="scan")
|
||||
|
||||
class Meta:
|
||||
resource_name = "scan-reports"
|
||||
fields = ["id"]
|
||||
|
||||
|
||||
class ResourceTagSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the ResourceTag model
|
||||
@@ -884,6 +1012,51 @@ class ResourceSerializer(RLSSerializer):
|
||||
return fields
|
||||
|
||||
|
||||
class ResourceIncludeSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the Resource model.
|
||||
"""
|
||||
|
||||
tags = serializers.SerializerMethodField()
|
||||
type_ = serializers.CharField(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = Resource
|
||||
fields = [
|
||||
"id",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"uid",
|
||||
"name",
|
||||
"region",
|
||||
"service",
|
||||
"type_",
|
||||
"tags",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"id": {"read_only": True},
|
||||
"inserted_at": {"read_only": True},
|
||||
"updated_at": {"read_only": True},
|
||||
}
|
||||
|
||||
@extend_schema_field(
|
||||
{
|
||||
"type": "object",
|
||||
"description": "Tags associated with the resource",
|
||||
"example": {"env": "prod", "owner": "johndoe"},
|
||||
}
|
||||
)
|
||||
def get_tags(self, obj):
|
||||
return obj.get_tags(self.context.get("tenant_id"))
|
||||
|
||||
def get_fields(self):
|
||||
"""`type` is a Python reserved keyword."""
|
||||
fields = super().get_fields()
|
||||
type_ = fields.pop("type_")
|
||||
fields["type"] = type_
|
||||
return fields
|
||||
|
||||
|
||||
class FindingSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the Finding model.
|
||||
@@ -905,6 +1078,7 @@ class FindingSerializer(RLSSerializer):
|
||||
"raw_result",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"first_seen_at",
|
||||
"url",
|
||||
# Relationships
|
||||
"scan",
|
||||
@@ -912,11 +1086,12 @@ class FindingSerializer(RLSSerializer):
|
||||
]
|
||||
|
||||
included_serializers = {
|
||||
"scan": ScanSerializer,
|
||||
"resources": ResourceSerializer,
|
||||
"scan": ScanIncludeSerializer,
|
||||
"resources": ResourceIncludeSerializer,
|
||||
}
|
||||
|
||||
|
||||
# To be removed when the related endpoint is removed as well
|
||||
class FindingDynamicFilterSerializer(serializers.Serializer):
|
||||
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
@@ -925,6 +1100,19 @@ class FindingDynamicFilterSerializer(serializers.Serializer):
|
||||
resource_name = "finding-dynamic-filters"
|
||||
|
||||
|
||||
class FindingMetadataSerializer(serializers.Serializer):
|
||||
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
resource_types = serializers.ListField(
|
||||
child=serializers.CharField(), allow_empty=True
|
||||
)
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# tags = serializers.JSONField(help_text="Tags are described as key-value pairs.")
|
||||
|
||||
class Meta:
|
||||
resource_name = "findings-metadata"
|
||||
|
||||
|
||||
# Provider secrets
|
||||
class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
|
||||
@staticmethod
|
||||
@@ -997,7 +1185,7 @@ class KubernetesProviderSecret(serializers.Serializer):
|
||||
|
||||
class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
role_arn = serializers.CharField()
|
||||
external_id = serializers.CharField(required=False)
|
||||
external_id = serializers.CharField()
|
||||
role_session_name = serializers.CharField(required=False)
|
||||
session_duration = serializers.IntegerField(
|
||||
required=False, min_value=900, max_value=43200
|
||||
@@ -1044,6 +1232,10 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"description": "The Amazon Resource Name (ARN) of the role to assume. Required for AWS role "
|
||||
"assumption.",
|
||||
},
|
||||
"external_id": {
|
||||
"type": "string",
|
||||
"description": "An identifier to enhance security for role assumption.",
|
||||
},
|
||||
"aws_access_key_id": {
|
||||
"type": "string",
|
||||
"description": "The AWS access key ID. Only required if the environment lacks pre-configured "
|
||||
@@ -1065,11 +1257,6 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"default": 3600,
|
||||
"description": "The duration (in seconds) for the role session.",
|
||||
},
|
||||
"external_id": {
|
||||
"type": "string",
|
||||
"description": "An optional identifier to enhance security for role assumption; may be "
|
||||
"required by the role administrator.",
|
||||
},
|
||||
"role_session_name": {
|
||||
"type": "string",
|
||||
"description": "An identifier for the role session, useful for tracking sessions in AWS logs. "
|
||||
@@ -1083,7 +1270,7 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"pattern": "^[a-zA-Z0-9=,.@_-]+$",
|
||||
},
|
||||
},
|
||||
"required": ["role_arn"],
|
||||
"required": ["role_arn", "external_id"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
@@ -1722,7 +1909,7 @@ class OverviewProviderSerializer(serializers.Serializer):
|
||||
"properties": {
|
||||
"pass": {"type": "integer"},
|
||||
"fail": {"type": "integer"},
|
||||
"manual": {"type": "integer"},
|
||||
"muted": {"type": "integer"},
|
||||
"total": {"type": "integer"},
|
||||
},
|
||||
}
|
||||
@@ -1731,7 +1918,7 @@ class OverviewProviderSerializer(serializers.Serializer):
|
||||
return {
|
||||
"pass": obj["findings_passed"],
|
||||
"fail": obj["findings_failed"],
|
||||
"manual": obj["findings_manual"],
|
||||
"muted": obj["findings_muted"],
|
||||
"total": obj["total_findings"],
|
||||
}
|
||||
|
||||
|
||||
@@ -3,28 +3,31 @@ from drf_spectacular.views import SpectacularRedocView
|
||||
from rest_framework_nested import routers
|
||||
|
||||
from api.v1.views import (
|
||||
ComplianceOverviewViewSet,
|
||||
CustomTokenObtainView,
|
||||
CustomTokenRefreshView,
|
||||
CustomTokenSwitchTenantView,
|
||||
FindingViewSet,
|
||||
MembershipViewSet,
|
||||
ProviderGroupViewSet,
|
||||
ProviderGroupProvidersRelationshipView,
|
||||
ProviderSecretViewSet,
|
||||
InvitationViewSet,
|
||||
GithubSocialLoginView,
|
||||
GoogleSocialLoginView,
|
||||
InvitationAcceptViewSet,
|
||||
RoleViewSet,
|
||||
RoleProviderGroupRelationshipView,
|
||||
UserRoleRelationshipView,
|
||||
InvitationViewSet,
|
||||
MembershipViewSet,
|
||||
OverviewViewSet,
|
||||
ComplianceOverviewViewSet,
|
||||
ProviderGroupProvidersRelationshipView,
|
||||
ProviderGroupViewSet,
|
||||
ProviderSecretViewSet,
|
||||
ProviderViewSet,
|
||||
ResourceViewSet,
|
||||
RoleProviderGroupRelationshipView,
|
||||
RoleViewSet,
|
||||
ScanViewSet,
|
||||
ScheduleViewSet,
|
||||
SchemaView,
|
||||
TaskViewSet,
|
||||
TenantMembersViewSet,
|
||||
TenantViewSet,
|
||||
UserRoleRelationshipView,
|
||||
UserViewSet,
|
||||
)
|
||||
|
||||
@@ -56,6 +59,7 @@ users_router.register(r"memberships", MembershipViewSet, basename="user-membersh
|
||||
urlpatterns = [
|
||||
path("tokens", CustomTokenObtainView.as_view(), name="token-obtain"),
|
||||
path("tokens/refresh", CustomTokenRefreshView.as_view(), name="token-refresh"),
|
||||
path("tokens/switch", CustomTokenSwitchTenantView.as_view(), name="token-switch"),
|
||||
path(
|
||||
"providers/secrets",
|
||||
ProviderSecretViewSet.as_view({"get": "list", "post": "create"}),
|
||||
@@ -106,6 +110,8 @@ urlpatterns = [
|
||||
),
|
||||
name="provider_group-providers-relationship",
|
||||
),
|
||||
path("tokens/google", GoogleSocialLoginView.as_view(), name="token-google"),
|
||||
path("tokens/github", GithubSocialLoginView.as_view(), name="token-github"),
|
||||
path("", include(router.urls)),
|
||||
path("", include(tenants_router.urls)),
|
||||
path("", include(users_router.urls)),
|
||||
|
||||
@@ -1,9 +1,23 @@
|
||||
import glob
|
||||
import os
|
||||
|
||||
from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter
|
||||
from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
|
||||
from botocore.exceptions import ClientError, NoCredentialsError, ParamValidationError
|
||||
from celery.result import AsyncResult
|
||||
from config.env import env
|
||||
from config.settings.social_login import (
|
||||
GITHUB_OAUTH_CALLBACK_URL,
|
||||
GOOGLE_OAUTH_CALLBACK_URL,
|
||||
)
|
||||
from dj_rest_auth.registration.views import SocialLoginView
|
||||
from django.conf import settings as django_settings
|
||||
from django.contrib.postgres.aggregates import ArrayAgg
|
||||
from django.contrib.postgres.search import SearchQuery
|
||||
from django.db import transaction
|
||||
from django.db.models import Count, F, OuterRef, Prefetch, Q, Subquery, Sum
|
||||
from django.db.models import Count, Exists, F, OuterRef, Prefetch, Q, Subquery, Sum
|
||||
from django.db.models.functions import Coalesce
|
||||
from django.http import HttpResponse
|
||||
from django.urls import reverse
|
||||
from django.utils.decorators import method_decorator
|
||||
from django.views.decorators.cache import cache_control
|
||||
@@ -30,11 +44,11 @@ from rest_framework.permissions import SAFE_METHODS
|
||||
from rest_framework_json_api.views import RelationshipView, Response
|
||||
from rest_framework_simplejwt.exceptions import InvalidToken, TokenError
|
||||
from tasks.beat import schedule_provider_scan
|
||||
from tasks.jobs.export import get_s3_client
|
||||
from tasks.tasks import (
|
||||
check_provider_connection_task,
|
||||
delete_provider_task,
|
||||
delete_tenant_task,
|
||||
perform_scan_summary_task,
|
||||
perform_scan_task,
|
||||
)
|
||||
|
||||
@@ -67,13 +81,13 @@ from api.models import (
|
||||
ProviderGroupMembership,
|
||||
ProviderSecret,
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
Role,
|
||||
RoleProviderGroupRelationship,
|
||||
Scan,
|
||||
ScanSummary,
|
||||
SeverityChoices,
|
||||
StateChoices,
|
||||
StatusChoices,
|
||||
Task,
|
||||
User,
|
||||
UserRoleRelationship,
|
||||
@@ -81,12 +95,12 @@ from api.models import (
|
||||
from api.pagination import ComplianceOverviewPagination
|
||||
from api.rbac.permissions import Permissions, get_providers, get_role
|
||||
from api.rls import Tenant
|
||||
from api.utils import validate_invitation
|
||||
from api.uuid_utils import datetime_to_uuid7
|
||||
from api.utils import CustomOAuth2Client, validate_invitation
|
||||
from api.v1.serializers import (
|
||||
ComplianceOverviewFullSerializer,
|
||||
ComplianceOverviewSerializer,
|
||||
FindingDynamicFilterSerializer,
|
||||
FindingMetadataSerializer,
|
||||
FindingSerializer,
|
||||
InvitationAcceptSerializer,
|
||||
InvitationCreateSerializer,
|
||||
@@ -113,6 +127,7 @@ from api.v1.serializers import (
|
||||
RoleSerializer,
|
||||
RoleUpdateSerializer,
|
||||
ScanCreateSerializer,
|
||||
ScanReportSerializer,
|
||||
ScanSerializer,
|
||||
ScanUpdateSerializer,
|
||||
ScheduleDailyCreateSerializer,
|
||||
@@ -120,6 +135,8 @@ from api.v1.serializers import (
|
||||
TenantSerializer,
|
||||
TokenRefreshSerializer,
|
||||
TokenSerializer,
|
||||
TokenSocialLoginSerializer,
|
||||
TokenSwitchTenantSerializer,
|
||||
UserCreateSerializer,
|
||||
UserRoleRelationshipSerializer,
|
||||
UserSerializer,
|
||||
@@ -186,13 +203,43 @@ class CustomTokenRefreshView(GenericAPIView):
|
||||
)
|
||||
|
||||
|
||||
@extend_schema(
|
||||
tags=["Token"],
|
||||
summary="Switch tenant using a valid tenant ID",
|
||||
description="Switch tenant by providing a valid tenant ID. The authenticated user must belong to the tenant.",
|
||||
)
|
||||
class CustomTokenSwitchTenantView(GenericAPIView):
|
||||
permission_classes = [permissions.IsAuthenticated]
|
||||
resource_name = "tokens-switch-tenant"
|
||||
serializer_class = TokenSwitchTenantSerializer
|
||||
http_method_names = ["post"]
|
||||
|
||||
def post(self, request):
|
||||
serializer = TokenSwitchTenantSerializer(
|
||||
data=request.data, context={"request": request}
|
||||
)
|
||||
|
||||
try:
|
||||
serializer.is_valid(raise_exception=True)
|
||||
except TokenError as e:
|
||||
raise InvalidToken(e.args[0])
|
||||
|
||||
return Response(
|
||||
data={
|
||||
"type": "tokens-switch-tenant",
|
||||
"attributes": serializer.validated_data,
|
||||
},
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
|
||||
@extend_schema(exclude=True)
|
||||
class SchemaView(SpectacularAPIView):
|
||||
serializer_class = None
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.1.1"
|
||||
spectacular_settings.VERSION = "1.5.0"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -252,6 +299,58 @@ class SchemaView(SpectacularAPIView):
|
||||
return super().get(request, *args, **kwargs)
|
||||
|
||||
|
||||
@extend_schema(exclude=True)
|
||||
class GoogleSocialLoginView(SocialLoginView):
|
||||
adapter_class = GoogleOAuth2Adapter
|
||||
client_class = CustomOAuth2Client
|
||||
callback_url = GOOGLE_OAUTH_CALLBACK_URL
|
||||
|
||||
def get_response(self):
|
||||
original_response = super().get_response()
|
||||
|
||||
if self.user and self.user.is_authenticated:
|
||||
serializer = TokenSocialLoginSerializer(data={"email": self.user.email})
|
||||
try:
|
||||
serializer.is_valid(raise_exception=True)
|
||||
except TokenError as e:
|
||||
raise InvalidToken(e.args[0])
|
||||
return Response(
|
||||
data={
|
||||
"type": "google-social-tokens",
|
||||
"attributes": serializer.validated_data,
|
||||
},
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
return original_response
|
||||
|
||||
|
||||
@extend_schema(exclude=True)
|
||||
class GithubSocialLoginView(SocialLoginView):
|
||||
adapter_class = GitHubOAuth2Adapter
|
||||
client_class = CustomOAuth2Client
|
||||
callback_url = GITHUB_OAUTH_CALLBACK_URL
|
||||
|
||||
def get_response(self):
|
||||
original_response = super().get_response()
|
||||
|
||||
if self.user and self.user.is_authenticated:
|
||||
serializer = TokenSocialLoginSerializer(data={"email": self.user.email})
|
||||
|
||||
try:
|
||||
serializer.is_valid(raise_exception=True)
|
||||
except TokenError as e:
|
||||
raise InvalidToken(e.args[0])
|
||||
|
||||
return Response(
|
||||
data={
|
||||
"type": "github-social-tokens",
|
||||
"attributes": serializer.validated_data,
|
||||
},
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
return original_response
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
tags=["User"],
|
||||
@@ -275,8 +374,8 @@ class SchemaView(SpectacularAPIView):
|
||||
),
|
||||
destroy=extend_schema(
|
||||
tags=["User"],
|
||||
summary="Delete a user account",
|
||||
description="Remove a user account from the system.",
|
||||
summary="Delete the user account",
|
||||
description="Remove the current user account from the system.",
|
||||
),
|
||||
me=extend_schema(
|
||||
tags=["User"],
|
||||
@@ -340,6 +439,12 @@ class UserViewSet(BaseUserViewset):
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
if kwargs["pk"] != str(self.request.user.id):
|
||||
raise ValidationError("Only the current user can be deleted.")
|
||||
|
||||
return super().destroy(request, *args, **kwargs)
|
||||
|
||||
@extend_schema(
|
||||
parameters=[
|
||||
OpenApiParameter(
|
||||
@@ -1018,6 +1123,18 @@ class ProviderViewSet(BaseRLSViewSet):
|
||||
request=ScanCreateSerializer,
|
||||
responses={202: OpenApiResponse(response=TaskSerializer)},
|
||||
),
|
||||
report=extend_schema(
|
||||
tags=["Scan"],
|
||||
summary="Download ZIP report",
|
||||
description="Returns a ZIP file containing the requested report",
|
||||
request=ScanReportSerializer,
|
||||
responses={
|
||||
200: OpenApiResponse(description="Report obtained successfully"),
|
||||
202: OpenApiResponse(description="The task is in progress"),
|
||||
403: OpenApiResponse(description="There is a problem with credentials"),
|
||||
404: OpenApiResponse(description="The scan has no reports"),
|
||||
},
|
||||
),
|
||||
)
|
||||
@method_decorator(CACHE_DECORATOR, name="list")
|
||||
@method_decorator(CACHE_DECORATOR, name="retrieve")
|
||||
@@ -1044,7 +1161,7 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"""
|
||||
if self.request.method in SAFE_METHODS:
|
||||
# No permissions required for GET requests
|
||||
self.required_permissions = [Permissions.MANAGE_PROVIDERS]
|
||||
self.required_permissions = []
|
||||
else:
|
||||
# Require permission for non-GET requests
|
||||
self.required_permissions = [Permissions.MANAGE_SCANS]
|
||||
@@ -1066,6 +1183,10 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
return ScanCreateSerializer
|
||||
elif self.action == "partial_update":
|
||||
return ScanUpdateSerializer
|
||||
elif self.action == "report":
|
||||
if hasattr(self, "response_serializer_class"):
|
||||
return self.response_serializer_class
|
||||
return ScanReportSerializer
|
||||
return super().get_serializer_class()
|
||||
|
||||
def partial_update(self, request, *args, **kwargs):
|
||||
@@ -1083,6 +1204,93 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
)
|
||||
return Response(data=read_serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
@action(detail=True, methods=["get"], url_name="report")
|
||||
def report(self, request, pk=None):
|
||||
scan_instance = self.get_object()
|
||||
|
||||
if scan_instance.state == StateChoices.EXECUTING:
|
||||
# If the scan is still running, return the task
|
||||
prowler_task = Task.objects.get(id=scan_instance.task.id)
|
||||
self.response_serializer_class = TaskSerializer
|
||||
output_serializer = self.get_serializer(prowler_task)
|
||||
return Response(
|
||||
data=output_serializer.data,
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
headers={
|
||||
"Content-Location": reverse(
|
||||
"task-detail", kwargs={"pk": output_serializer.data["id"]}
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
try:
|
||||
output_celery_task = Task.objects.get(
|
||||
task_runner_task__task_name="scan-report",
|
||||
task_runner_task__task_args__contains=pk,
|
||||
)
|
||||
self.response_serializer_class = TaskSerializer
|
||||
output_serializer = self.get_serializer(output_celery_task)
|
||||
if output_serializer.data["state"] == StateChoices.EXECUTING:
|
||||
# If the task is still running, return the task
|
||||
return Response(
|
||||
data=output_serializer.data,
|
||||
status=status.HTTP_202_ACCEPTED,
|
||||
headers={
|
||||
"Content-Location": reverse(
|
||||
"task-detail", kwargs={"pk": output_serializer.data["id"]}
|
||||
)
|
||||
},
|
||||
)
|
||||
except Task.DoesNotExist:
|
||||
# If the task does not exist, it means that the task is removed from the database
|
||||
pass
|
||||
|
||||
output_location = scan_instance.output_location
|
||||
if not output_location:
|
||||
return Response(
|
||||
{"detail": "The scan has no reports."},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
|
||||
if scan_instance.output_location.startswith("s3://"):
|
||||
try:
|
||||
s3_client = get_s3_client()
|
||||
except (ClientError, NoCredentialsError, ParamValidationError):
|
||||
return Response(
|
||||
{"detail": "There is a problem with credentials."},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
|
||||
bucket_name = env.str("DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET")
|
||||
key = output_location[len(f"s3://{bucket_name}/") :]
|
||||
try:
|
||||
s3_object = s3_client.get_object(Bucket=bucket_name, Key=key)
|
||||
except ClientError as e:
|
||||
error_code = e.response.get("Error", {}).get("Code")
|
||||
if error_code == "NoSuchKey":
|
||||
return Response(
|
||||
{"detail": "The scan has no reports."},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
return Response(
|
||||
{"detail": "There is a problem with credentials."},
|
||||
status=status.HTTP_403_FORBIDDEN,
|
||||
)
|
||||
file_content = s3_object["Body"].read()
|
||||
filename = os.path.basename(output_location.split("/")[-1])
|
||||
else:
|
||||
zip_files = glob.glob(output_location)
|
||||
file_path = zip_files[0]
|
||||
with open(file_path, "rb") as f:
|
||||
file_content = f.read()
|
||||
filename = os.path.basename(file_path)
|
||||
|
||||
response = HttpResponse(
|
||||
file_content, content_type="application/x-zip-compressed"
|
||||
)
|
||||
response["Content-Disposition"] = f'attachment; filename="{filename}"'
|
||||
return response
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
input_serializer = self.get_serializer(data=request.data)
|
||||
input_serializer.is_valid(raise_exception=True)
|
||||
@@ -1097,10 +1305,6 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
# Disabled for now
|
||||
# checks_to_execute=scan.scanner_args.get("checks_to_execute"),
|
||||
},
|
||||
link=perform_scan_summary_task.si(
|
||||
tenant_id=self.request.tenant_id,
|
||||
scan_id=str(scan.id),
|
||||
),
|
||||
)
|
||||
|
||||
scan.task_id = task.id
|
||||
@@ -1264,6 +1468,14 @@ class ResourceViewSet(BaseRLSViewSet):
|
||||
tags=["Finding"],
|
||||
summary="List all findings",
|
||||
description="Retrieve a list of all findings with options for filtering by various criteria.",
|
||||
parameters=[
|
||||
OpenApiParameter(
|
||||
name="filter[inserted_at]",
|
||||
description="At least one of the variations of the `filter[inserted_at]` filter must be provided.",
|
||||
required=True,
|
||||
type=OpenApiTypes.DATE,
|
||||
)
|
||||
],
|
||||
),
|
||||
retrieve=extend_schema(
|
||||
tags=["Finding"],
|
||||
@@ -1274,33 +1486,51 @@ class ResourceViewSet(BaseRLSViewSet):
|
||||
tags=["Finding"],
|
||||
summary="Retrieve the services and regions that are impacted by findings",
|
||||
description="Fetch services and regions affected in findings.",
|
||||
responses={201: OpenApiResponse(response=MembershipSerializer)},
|
||||
filters=True,
|
||||
deprecated=True,
|
||||
),
|
||||
metadata=extend_schema(
|
||||
tags=["Finding"],
|
||||
summary="Retrieve metadata values from findings",
|
||||
description="Fetch unique metadata values from a set of findings. This is useful for dynamic filtering.",
|
||||
parameters=[
|
||||
OpenApiParameter(
|
||||
name="filter[inserted_at]",
|
||||
description="At least one of the variations of the `filter[inserted_at]` filter must be provided.",
|
||||
required=True,
|
||||
type=OpenApiTypes.DATE,
|
||||
)
|
||||
],
|
||||
filters=True,
|
||||
),
|
||||
)
|
||||
@method_decorator(CACHE_DECORATOR, name="list")
|
||||
@method_decorator(CACHE_DECORATOR, name="retrieve")
|
||||
class FindingViewSet(BaseRLSViewSet):
|
||||
queryset = Finding.objects.all()
|
||||
queryset = Finding.all_objects.all()
|
||||
serializer_class = FindingSerializer
|
||||
prefetch_for_includes = {
|
||||
"__all__": [],
|
||||
"resources": [
|
||||
Prefetch("resources", queryset=Resource.objects.select_related("findings"))
|
||||
],
|
||||
"scan": [Prefetch("scan", queryset=Scan.objects.select_related("findings"))],
|
||||
}
|
||||
http_method_names = ["get"]
|
||||
filterset_class = FindingFilter
|
||||
ordering = ["-id"]
|
||||
http_method_names = ["get"]
|
||||
ordering = ["-inserted_at"]
|
||||
ordering_fields = [
|
||||
"id",
|
||||
"status",
|
||||
"severity",
|
||||
"check_id",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
]
|
||||
prefetch_for_includes = {
|
||||
"__all__": [],
|
||||
"resources": [
|
||||
Prefetch(
|
||||
"resources",
|
||||
queryset=Resource.all_objects.prefetch_related("tags", "findings"),
|
||||
)
|
||||
],
|
||||
"scan": [
|
||||
Prefetch("scan", queryset=Scan.all_objects.select_related("findings"))
|
||||
],
|
||||
}
|
||||
# RBAC required permissions (implicit -> MANAGE_PROVIDERS enable unlimited visibility or check the visibility of
|
||||
# the provider through the provider group)
|
||||
required_permissions = []
|
||||
@@ -1308,52 +1538,65 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
def get_serializer_class(self):
|
||||
if self.action == "findings_services_regions":
|
||||
return FindingDynamicFilterSerializer
|
||||
elif self.action == "metadata":
|
||||
return FindingMetadataSerializer
|
||||
|
||||
return super().get_serializer_class()
|
||||
|
||||
def get_queryset(self):
|
||||
tenant_id = self.request.tenant_id
|
||||
user_roles = get_role(self.request.user)
|
||||
if user_roles.unlimited_visibility:
|
||||
# User has unlimited visibility, return all scans
|
||||
queryset = Finding.objects.filter(tenant_id=self.request.tenant_id)
|
||||
# User has unlimited visibility, return all findings
|
||||
queryset = Finding.all_objects.filter(tenant_id=tenant_id)
|
||||
else:
|
||||
# User lacks permission, filter providers based on provider groups associated with the role
|
||||
queryset = Finding.objects.filter(
|
||||
# User lacks permission, filter findings based on provider groups associated with the role
|
||||
queryset = Finding.all_objects.filter(
|
||||
scan__provider__in=get_providers(user_roles)
|
||||
)
|
||||
|
||||
search_value = self.request.query_params.get("filter[search]", None)
|
||||
if search_value:
|
||||
# Django's ORM will build a LEFT JOIN and OUTER JOIN on any "through" tables, resulting in duplicates
|
||||
# The duplicates then require a `distinct` query
|
||||
search_query = SearchQuery(
|
||||
search_value, config="simple", search_type="plain"
|
||||
)
|
||||
|
||||
resource_match = Resource.all_objects.filter(
|
||||
text_search=search_query,
|
||||
id__in=ResourceFindingMapping.objects.filter(
|
||||
resource_id=OuterRef("pk"),
|
||||
tenant_id=tenant_id,
|
||||
).values("resource_id"),
|
||||
)
|
||||
|
||||
queryset = queryset.filter(
|
||||
Q(impact_extended__contains=search_value)
|
||||
| Q(status_extended__contains=search_value)
|
||||
| Q(check_id=search_value)
|
||||
| Q(check_id__icontains=search_value)
|
||||
| Q(text_search=search_query)
|
||||
| Q(resources__uid=search_value)
|
||||
| Q(resources__name=search_value)
|
||||
| Q(resources__region=search_value)
|
||||
| Q(resources__service=search_value)
|
||||
| Q(resources__type=search_value)
|
||||
| Q(resources__uid__contains=search_value)
|
||||
| Q(resources__name__contains=search_value)
|
||||
| Q(resources__region__contains=search_value)
|
||||
| Q(resources__service__contains=search_value)
|
||||
| Q(resources__tags__text_search=search_query)
|
||||
| Q(resources__text_search=search_query)
|
||||
).distinct()
|
||||
Q(text_search=search_query) | Q(Exists(resource_match))
|
||||
)
|
||||
|
||||
return queryset
|
||||
|
||||
def inserted_at_to_uuidv7(self, inserted_at):
|
||||
if inserted_at is None:
|
||||
return None
|
||||
return datetime_to_uuid7(inserted_at)
|
||||
def filter_queryset(self, queryset):
|
||||
# Do not apply filters when retrieving specific finding
|
||||
if self.action == "retrieve":
|
||||
return queryset
|
||||
return super().filter_queryset(queryset)
|
||||
|
||||
def list(self, request, *args, **kwargs):
|
||||
base_qs = self.filter_queryset(self.get_queryset())
|
||||
paginated_ids = self.paginate_queryset(base_qs.values_list("id", flat=True))
|
||||
if paginated_ids is not None:
|
||||
ids = list(paginated_ids)
|
||||
findings = (
|
||||
Finding.all_objects.filter(tenant_id=self.request.tenant_id, id__in=ids)
|
||||
.select_related("scan")
|
||||
.prefetch_related("resources")
|
||||
)
|
||||
# Re-sort in Python to preserve ordering:
|
||||
findings = sorted(findings, key=lambda x: ids.index(x.id))
|
||||
serializer = self.get_serializer(findings, many=True)
|
||||
return self.get_paginated_response(serializer.data)
|
||||
serializer = self.get_serializer(base_qs, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="findings_services_regions")
|
||||
def findings_services_regions(self, request):
|
||||
@@ -1376,6 +1619,38 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
|
||||
return Response(data=serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="metadata")
|
||||
def metadata(self, request):
|
||||
tenant_id = self.request.tenant_id
|
||||
queryset = self.get_queryset()
|
||||
filtered_queryset = self.filter_queryset(queryset)
|
||||
|
||||
filtered_ids = filtered_queryset.order_by().values("id")
|
||||
|
||||
relevant_resources = Resource.all_objects.filter(
|
||||
tenant_id=tenant_id, findings__id__in=Subquery(filtered_ids)
|
||||
).only("service", "region", "type")
|
||||
|
||||
aggregation = relevant_resources.aggregate(
|
||||
services=ArrayAgg("service", flat=True),
|
||||
regions=ArrayAgg("region", flat=True),
|
||||
resource_types=ArrayAgg("type", flat=True),
|
||||
)
|
||||
|
||||
services = sorted(set(aggregation["services"] or []))
|
||||
regions = sorted({region for region in aggregation["regions"] or [] if region})
|
||||
resource_types = sorted(set(aggregation["resource_types"] or []))
|
||||
|
||||
result = {
|
||||
"services": services,
|
||||
"regions": regions,
|
||||
"resource_types": resource_types,
|
||||
}
|
||||
|
||||
serializer = self.get_serializer(data=result)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
@@ -1938,68 +2213,53 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
@action(detail=False, methods=["get"], url_name="providers")
|
||||
def providers(self, request):
|
||||
tenant_id = self.request.tenant_id
|
||||
# Subquery to get the most recent finding for each uid
|
||||
latest_finding_ids = (
|
||||
Finding.objects.filter(
|
||||
|
||||
latest_scan_ids = (
|
||||
Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
uid=OuterRef("uid"),
|
||||
scan__provider=OuterRef("scan__provider"),
|
||||
state=StateChoices.COMPLETED,
|
||||
)
|
||||
.order_by("-id") # Most recent by id
|
||||
.values("id")[:1]
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
|
||||
# Filter findings to only include the most recent for each uid
|
||||
recent_findings = Finding.objects.filter(
|
||||
tenant_id=tenant_id, id__in=Subquery(latest_finding_ids)
|
||||
)
|
||||
|
||||
# Aggregate findings by provider
|
||||
findings_aggregated = (
|
||||
recent_findings.values("scan__provider__provider")
|
||||
ScanSummary.objects.filter(tenant_id=tenant_id, scan_id__in=latest_scan_ids)
|
||||
.values("scan__provider__provider")
|
||||
.annotate(
|
||||
findings_passed=Count("id", filter=Q(status=StatusChoices.PASS.value)),
|
||||
findings_failed=Count("id", filter=Q(status=StatusChoices.FAIL.value)),
|
||||
findings_manual=Count(
|
||||
"id", filter=Q(status=StatusChoices.MANUAL.value)
|
||||
),
|
||||
total_findings=Count("id"),
|
||||
findings_passed=Coalesce(Sum("_pass"), 0),
|
||||
findings_failed=Coalesce(Sum("fail"), 0),
|
||||
findings_muted=Coalesce(Sum("muted"), 0),
|
||||
total_findings=Coalesce(Sum("total"), 0),
|
||||
)
|
||||
.order_by("-findings_failed")
|
||||
)
|
||||
|
||||
# Aggregate total resources by provider
|
||||
resources_aggregated = (
|
||||
Resource.objects.filter(tenant_id=tenant_id)
|
||||
.values("provider__provider")
|
||||
.annotate(total_resources=Count("id"))
|
||||
)
|
||||
resources_dict = {
|
||||
row["provider__provider"]: row["total_resources"]
|
||||
for row in resources_aggregated
|
||||
}
|
||||
|
||||
# Combine findings and resources data
|
||||
overview = []
|
||||
for findings in findings_aggregated:
|
||||
provider = findings["scan__provider__provider"]
|
||||
total_resources = next(
|
||||
(
|
||||
res["total_resources"]
|
||||
for res in resources_aggregated
|
||||
if res["provider__provider"] == provider
|
||||
),
|
||||
0,
|
||||
)
|
||||
for row in findings_aggregated:
|
||||
provider_type = row["scan__provider__provider"]
|
||||
overview.append(
|
||||
{
|
||||
"provider": provider,
|
||||
"total_resources": total_resources,
|
||||
"total_findings": findings["total_findings"],
|
||||
"findings_passed": findings["findings_passed"],
|
||||
"findings_failed": findings["findings_failed"],
|
||||
"findings_manual": findings["findings_manual"],
|
||||
"provider": provider_type,
|
||||
"total_resources": resources_dict.get(provider_type, 0),
|
||||
"total_findings": row["total_findings"],
|
||||
"findings_passed": row["findings_passed"],
|
||||
"findings_failed": row["findings_failed"],
|
||||
"findings_muted": row["findings_muted"],
|
||||
}
|
||||
)
|
||||
|
||||
serializer = OverviewProviderSerializer(overview, many=True)
|
||||
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="findings")
|
||||
@@ -2014,7 +2274,7 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-id")
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
)
|
||||
|
||||
@@ -2059,7 +2319,7 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-id")
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
)
|
||||
|
||||
@@ -2095,7 +2355,7 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-id")
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@ from config.custom_logging import LOGGING # noqa
|
||||
from config.env import BASE_DIR, env # noqa
|
||||
from config.settings.celery import * # noqa
|
||||
from config.settings.partitions import * # noqa
|
||||
from config.settings.sentry import * # noqa
|
||||
from config.settings.social_login import * # noqa
|
||||
|
||||
SECRET_KEY = env("SECRET_KEY", default="secret")
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=False)
|
||||
@@ -29,6 +31,13 @@ INSTALLED_APPS = [
|
||||
"django_celery_results",
|
||||
"django_celery_beat",
|
||||
"rest_framework_simplejwt.token_blacklist",
|
||||
"allauth",
|
||||
"allauth.account",
|
||||
"allauth.socialaccount",
|
||||
"allauth.socialaccount.providers.google",
|
||||
"allauth.socialaccount.providers.github",
|
||||
"dj_rest_auth.registration",
|
||||
"rest_framework.authtoken",
|
||||
]
|
||||
|
||||
MIDDLEWARE = [
|
||||
@@ -42,8 +51,11 @@ MIDDLEWARE = [
|
||||
"django.contrib.messages.middleware.MessageMiddleware",
|
||||
"django.middleware.clickjacking.XFrameOptionsMiddleware",
|
||||
"api.middleware.APILoggingMiddleware",
|
||||
"allauth.account.middleware.AccountMiddleware",
|
||||
]
|
||||
|
||||
SITE_ID = 1
|
||||
|
||||
CORS_ALLOWED_ORIGINS = ["http://localhost", "http://127.0.0.1"]
|
||||
|
||||
ROOT_URLCONF = "config.urls"
|
||||
@@ -207,3 +219,20 @@ CACHE_STALE_WHILE_REVALIDATE = env.int("DJANGO_STALE_WHILE_REVALIDATE", 60)
|
||||
|
||||
|
||||
TESTING = False
|
||||
|
||||
FINDINGS_MAX_DAYS_IN_RANGE = env.int("DJANGO_FINDINGS_MAX_DAYS_IN_RANGE", 7)
|
||||
|
||||
|
||||
# API export settings
|
||||
DJANGO_TMP_OUTPUT_DIRECTORY = env.str(
|
||||
"DJANGO_TMP_OUTPUT_DIRECTORY", "/tmp/prowler_api_output"
|
||||
)
|
||||
DJANGO_FINDINGS_BATCH_SIZE = env.str("DJANGO_FINDINGS_BATCH_SIZE", 1000)
|
||||
|
||||
DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET = env.str("DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET", "")
|
||||
DJANGO_OUTPUT_S3_AWS_ACCESS_KEY_ID = env.str("DJANGO_OUTPUT_S3_AWS_ACCESS_KEY_ID", "")
|
||||
DJANGO_OUTPUT_S3_AWS_SECRET_ACCESS_KEY = env.str(
|
||||
"DJANGO_OUTPUT_S3_AWS_SECRET_ACCESS_KEY", ""
|
||||
)
|
||||
DJANGO_OUTPUT_S3_AWS_SESSION_TOKEN = env.str("DJANGO_OUTPUT_S3_AWS_SESSION_TOKEN", "")
|
||||
DJANGO_OUTPUT_S3_AWS_DEFAULT_REGION = env.str("DJANGO_OUTPUT_S3_AWS_DEFAULT_REGION", "")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=True)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["*"])
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=False)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=False)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
|
||||
|
||||
|
||||
54
api/src/backend/config/settings/sentry.py
Normal file
54
api/src/backend/config/settings/sentry.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import sentry_sdk
|
||||
from config.env import env
|
||||
|
||||
IGNORED_EXCEPTIONS = [
|
||||
# Authentication Errors from AWS
|
||||
"InvalidToken",
|
||||
"AccessDeniedException",
|
||||
"AuthorizationErrorException",
|
||||
"UnrecognizedClientException",
|
||||
"UnauthorizedOperation",
|
||||
"AuthFailure",
|
||||
"InvalidClientTokenId",
|
||||
"AccessDenied",
|
||||
# Shodan Check
|
||||
"No Shodan API Key",
|
||||
# For now we don't want to log the RequestLimitExceeded errors
|
||||
"RequestLimitExceeded",
|
||||
"ThrottlingException",
|
||||
"Rate exceeded",
|
||||
# The following comes from urllib3
|
||||
# eu-west-1 -- HTTPClientError[126]: An HTTP Client raised an unhandled exception: AWSHTTPSConnectionPool(host='hostname.s3.eu-west-1.amazonaws.com', port=443): Pool is closed.
|
||||
"Pool is closed",
|
||||
]
|
||||
|
||||
|
||||
def before_send(event, hint):
|
||||
"""
|
||||
before_send handles the Sentry events in order to sent them or not
|
||||
"""
|
||||
# Ignore logs with the ignored_exceptions
|
||||
# https://docs.python.org/3/library/logging.html#logrecord-objects
|
||||
if "log_record" in hint:
|
||||
log_msg = hint["log_record"].msg
|
||||
log_lvl = hint["log_record"].levelno
|
||||
|
||||
# Handle Error events and discard the rest
|
||||
if log_lvl == 40 and any(ignored in log_msg for ignored in IGNORED_EXCEPTIONS):
|
||||
return
|
||||
return event
|
||||
|
||||
|
||||
sentry_sdk.init(
|
||||
dsn=env.str("DJANGO_SENTRY_DSN", ""),
|
||||
# Add data like request headers and IP for users,
|
||||
# see https://docs.sentry.io/platforms/python/data-management/data-collected/ for more info
|
||||
before_send=before_send,
|
||||
send_default_pii=True,
|
||||
_experiments={
|
||||
# Set continuous_profiling_auto_start to True
|
||||
# to automatically start the profiler on when
|
||||
# possible.
|
||||
"continuous_profiling_auto_start": True,
|
||||
},
|
||||
)
|
||||
53
api/src/backend/config/settings/social_login.py
Normal file
53
api/src/backend/config/settings/social_login.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from config.env import env
|
||||
|
||||
# Google Oauth settings
|
||||
GOOGLE_OAUTH_CLIENT_ID = env("DJANGO_GOOGLE_OAUTH_CLIENT_ID", default="")
|
||||
GOOGLE_OAUTH_CLIENT_SECRET = env("DJANGO_GOOGLE_OAUTH_CLIENT_SECRET", default="")
|
||||
GOOGLE_OAUTH_CALLBACK_URL = env("DJANGO_GOOGLE_OAUTH_CALLBACK_URL", default="")
|
||||
|
||||
GITHUB_OAUTH_CLIENT_ID = env("DJANGO_GITHUB_OAUTH_CLIENT_ID", default="")
|
||||
GITHUB_OAUTH_CLIENT_SECRET = env("DJANGO_GITHUB_OAUTH_CLIENT_SECRET", default="")
|
||||
GITHUB_OAUTH_CALLBACK_URL = env("DJANGO_GITHUB_OAUTH_CALLBACK_URL", default="")
|
||||
|
||||
# Allauth settings
|
||||
ACCOUNT_LOGIN_METHODS = {"email"} # Use Email / Password authentication
|
||||
ACCOUNT_USERNAME_REQUIRED = False
|
||||
ACCOUNT_EMAIL_REQUIRED = True
|
||||
ACCOUNT_EMAIL_VERIFICATION = "none" # Do not require email confirmation
|
||||
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
|
||||
REST_AUTH = {
|
||||
"TOKEN_MODEL": None,
|
||||
"REST_USE_JWT": True,
|
||||
}
|
||||
# django-allauth (social)
|
||||
# Authenticate if local account with this email address already exists
|
||||
SOCIALACCOUNT_EMAIL_AUTHENTICATION = True
|
||||
# Connect local account and social account if local account with that email address already exists
|
||||
SOCIALACCOUNT_EMAIL_AUTHENTICATION_AUTO_CONNECT = True
|
||||
SOCIALACCOUNT_ADAPTER = "api.adapters.ProwlerSocialAccountAdapter"
|
||||
SOCIALACCOUNT_PROVIDERS = {
|
||||
"google": {
|
||||
"APP": {
|
||||
"client_id": GOOGLE_OAUTH_CLIENT_ID,
|
||||
"secret": GOOGLE_OAUTH_CLIENT_SECRET,
|
||||
"key": "",
|
||||
},
|
||||
"SCOPE": [
|
||||
"email",
|
||||
"profile",
|
||||
],
|
||||
"AUTH_PARAMS": {
|
||||
"access_type": "online",
|
||||
},
|
||||
},
|
||||
"github": {
|
||||
"APP": {
|
||||
"client_id": GITHUB_OAUTH_CLIENT_ID,
|
||||
"secret": GITHUB_OAUTH_CLIENT_SECRET,
|
||||
},
|
||||
"SCOPE": [
|
||||
"user",
|
||||
"read:org",
|
||||
],
|
||||
},
|
||||
}
|
||||
@@ -486,7 +486,7 @@ def scans_fixture(tenants_fixture, providers_fixture):
|
||||
name="Scan 1",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.AVAILABLE,
|
||||
state=StateChoices.COMPLETED,
|
||||
tenant_id=tenant.id,
|
||||
started_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
@@ -626,6 +626,7 @@ def findings_fixture(scans_fixture, resources_fixture):
|
||||
"CheckId": "test_check_id",
|
||||
"Description": "test description apple sauce",
|
||||
},
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
|
||||
finding1.add_resources([resource1])
|
||||
@@ -651,6 +652,7 @@ def findings_fixture(scans_fixture, resources_fixture):
|
||||
"CheckId": "test_check_id",
|
||||
"Description": "test description orange juice",
|
||||
},
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
|
||||
finding2.add_resources([resource2])
|
||||
|
||||
@@ -5,10 +5,14 @@ from django_celery_beat.models import IntervalSchedule, PeriodicTask
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
from tasks.tasks import perform_scheduled_scan_task
|
||||
|
||||
from api.models import Provider
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Provider, Scan, StateChoices
|
||||
|
||||
|
||||
def schedule_provider_scan(provider_instance: Provider):
|
||||
tenant_id = str(provider_instance.tenant_id)
|
||||
provider_id = str(provider_instance.id)
|
||||
|
||||
schedule, _ = IntervalSchedule.objects.get_or_create(
|
||||
every=24,
|
||||
period=IntervalSchedule.HOURS,
|
||||
@@ -17,23 +21,9 @@ def schedule_provider_scan(provider_instance: Provider):
|
||||
# Create a unique name for the periodic task
|
||||
task_name = f"scan-perform-scheduled-{provider_instance.id}"
|
||||
|
||||
# Schedule the task
|
||||
_, created = PeriodicTask.objects.get_or_create(
|
||||
interval=schedule,
|
||||
name=task_name,
|
||||
task="scan-perform-scheduled",
|
||||
kwargs=json.dumps(
|
||||
{
|
||||
"tenant_id": str(provider_instance.tenant_id),
|
||||
"provider_id": str(provider_instance.id),
|
||||
}
|
||||
),
|
||||
one_off=False,
|
||||
defaults={
|
||||
"start_time": datetime.now(timezone.utc) + timedelta(hours=24),
|
||||
},
|
||||
)
|
||||
if not created:
|
||||
if PeriodicTask.objects.filter(
|
||||
interval=schedule, name=task_name, task="scan-perform-scheduled"
|
||||
).exists():
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
@@ -45,9 +35,36 @@ def schedule_provider_scan(provider_instance: Provider):
|
||||
]
|
||||
)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
scheduled_scan = Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
state=StateChoices.AVAILABLE,
|
||||
scheduled_at=datetime.now(timezone.utc),
|
||||
)
|
||||
|
||||
# Schedule the task
|
||||
periodic_task_instance = PeriodicTask.objects.create(
|
||||
interval=schedule,
|
||||
name=task_name,
|
||||
task="scan-perform-scheduled",
|
||||
kwargs=json.dumps(
|
||||
{
|
||||
"tenant_id": tenant_id,
|
||||
"provider_id": provider_id,
|
||||
}
|
||||
),
|
||||
one_off=False,
|
||||
start_time=datetime.now(timezone.utc) + timedelta(hours=24),
|
||||
)
|
||||
scheduled_scan.scheduler_task_id = periodic_task_instance.id
|
||||
scheduled_scan.save()
|
||||
|
||||
return perform_scheduled_scan_task.apply_async(
|
||||
kwargs={
|
||||
"tenant_id": str(provider_instance.tenant_id),
|
||||
"provider_id": str(provider_instance.id),
|
||||
"provider_id": provider_id,
|
||||
},
|
||||
)
|
||||
|
||||
156
api/src/backend/tasks/jobs/export.py
Normal file
156
api/src/backend/tasks/jobs/export.py
Normal file
@@ -0,0 +1,156 @@
|
||||
import os
|
||||
import zipfile
|
||||
|
||||
import boto3
|
||||
import config.django.base as base
|
||||
from botocore.exceptions import ClientError, NoCredentialsError, ParamValidationError
|
||||
from celery.utils.log import get_task_logger
|
||||
from django.conf import settings
|
||||
|
||||
from prowler.config.config import (
|
||||
csv_file_suffix,
|
||||
html_file_suffix,
|
||||
json_ocsf_file_suffix,
|
||||
output_file_timestamp,
|
||||
)
|
||||
from prowler.lib.outputs.csv.csv import CSV
|
||||
from prowler.lib.outputs.html.html import HTML
|
||||
from prowler.lib.outputs.ocsf.ocsf import OCSF
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
# Predefined mapping for output formats and their configurations
|
||||
OUTPUT_FORMATS_MAPPING = {
|
||||
"csv": {
|
||||
"class": CSV,
|
||||
"suffix": csv_file_suffix,
|
||||
"kwargs": {},
|
||||
},
|
||||
"json-ocsf": {"class": OCSF, "suffix": json_ocsf_file_suffix, "kwargs": {}},
|
||||
"html": {"class": HTML, "suffix": html_file_suffix, "kwargs": {"stats": {}}},
|
||||
}
|
||||
|
||||
|
||||
def _compress_output_files(output_directory: str) -> str:
|
||||
"""
|
||||
Compress output files from all configured output formats into a ZIP archive.
|
||||
Args:
|
||||
output_directory (str): The directory where the output files are located.
|
||||
The function looks up all known suffixes in OUTPUT_FORMATS_MAPPING
|
||||
and compresses those files into a single ZIP.
|
||||
Returns:
|
||||
str: The full path to the newly created ZIP archive.
|
||||
"""
|
||||
zip_path = f"{output_directory}.zip"
|
||||
|
||||
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
|
||||
for suffix in [config["suffix"] for config in OUTPUT_FORMATS_MAPPING.values()]:
|
||||
zipf.write(
|
||||
f"{output_directory}{suffix}",
|
||||
f"output/{output_directory.split('/')[-1]}{suffix}",
|
||||
)
|
||||
|
||||
return zip_path
|
||||
|
||||
|
||||
def get_s3_client():
|
||||
"""
|
||||
Create and return a boto3 S3 client using AWS credentials from environment variables.
|
||||
|
||||
This function attempts to initialize an S3 client by reading the AWS access key, secret key,
|
||||
session token, and region from environment variables. It then validates the client by listing
|
||||
available S3 buckets. If an error occurs during this process (for example, due to missing or
|
||||
invalid credentials), it falls back to creating an S3 client without explicitly provided credentials,
|
||||
which may rely on other configuration sources (e.g., IAM roles).
|
||||
|
||||
Returns:
|
||||
boto3.client: A configured S3 client instance.
|
||||
|
||||
Raises:
|
||||
ClientError, NoCredentialsError, or ParamValidationError if both attempts to create a client fail.
|
||||
"""
|
||||
s3_client = None
|
||||
try:
|
||||
s3_client = boto3.client(
|
||||
"s3",
|
||||
aws_access_key_id=settings.DJANGO_OUTPUT_S3_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.DJANGO_OUTPUT_S3_AWS_SECRET_ACCESS_KEY,
|
||||
aws_session_token=settings.DJANGO_OUTPUT_S3_AWS_SESSION_TOKEN,
|
||||
region_name=settings.DJANGO_OUTPUT_S3_AWS_DEFAULT_REGION,
|
||||
)
|
||||
s3_client.list_buckets()
|
||||
except (ClientError, NoCredentialsError, ParamValidationError, ValueError):
|
||||
s3_client = boto3.client("s3")
|
||||
s3_client.list_buckets()
|
||||
|
||||
return s3_client
|
||||
|
||||
|
||||
def _upload_to_s3(tenant_id: str, zip_path: str, scan_id: str) -> str:
|
||||
"""
|
||||
Upload the specified ZIP file to an S3 bucket.
|
||||
If the S3 bucket environment variables are not configured,
|
||||
the function returns None without performing an upload.
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier, used as part of the S3 key prefix.
|
||||
zip_path (str): The local file system path to the ZIP file to be uploaded.
|
||||
scan_id (str): The scan identifier, used as part of the S3 key prefix.
|
||||
Returns:
|
||||
str: The S3 URI of the uploaded file (e.g., "s3://<bucket>/<key>") if successful.
|
||||
None: If the required environment variables for the S3 bucket are not set.
|
||||
Raises:
|
||||
botocore.exceptions.ClientError: If the upload attempt to S3 fails for any reason.
|
||||
"""
|
||||
if not base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET:
|
||||
return
|
||||
|
||||
try:
|
||||
s3 = get_s3_client()
|
||||
s3_key = f"{tenant_id}/{scan_id}/{os.path.basename(zip_path)}"
|
||||
s3.upload_file(
|
||||
Filename=zip_path,
|
||||
Bucket=base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET,
|
||||
Key=s3_key,
|
||||
)
|
||||
return f"s3://{base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET}/{s3_key}"
|
||||
except (ClientError, NoCredentialsError, ParamValidationError, ValueError) as e:
|
||||
logger.error(f"S3 upload failed: {str(e)}")
|
||||
|
||||
|
||||
def _generate_output_directory(
|
||||
output_directory, prowler_provider: object, tenant_id: str, scan_id: str
|
||||
) -> str:
|
||||
"""
|
||||
Generate a file system path for the output directory of a prowler scan.
|
||||
|
||||
This function constructs the output directory path by combining a base
|
||||
temporary output directory, the tenant ID, the scan ID, and details about
|
||||
the prowler provider along with a timestamp. The resulting path is used to
|
||||
store the output files of a prowler scan.
|
||||
|
||||
Note:
|
||||
This function depends on one external variable:
|
||||
- `output_file_timestamp`: A timestamp (as a string) used to uniquely identify the output.
|
||||
|
||||
Args:
|
||||
output_directory (str): The base output directory.
|
||||
prowler_provider (object): An identifier or descriptor for the prowler provider.
|
||||
Typically, this is a string indicating the provider (e.g., "aws").
|
||||
tenant_id (str): The unique identifier for the tenant.
|
||||
scan_id (str): The unique identifier for the scan.
|
||||
|
||||
Returns:
|
||||
str: The constructed file system path for the prowler scan output directory.
|
||||
|
||||
Example:
|
||||
>>> _generate_output_directory("/tmp", "aws", "tenant-1234", "scan-5678")
|
||||
'/tmp/tenant-1234/aws/scan-5678/prowler-output-2023-02-15T12:34:56'
|
||||
"""
|
||||
path = (
|
||||
f"{output_directory}/{tenant_id}/{scan_id}/prowler-output-"
|
||||
f"{prowler_provider}-{output_file_timestamp}"
|
||||
)
|
||||
os.makedirs("/".join(path.split("/")[:-1]), exist_ok=True)
|
||||
|
||||
return path
|
||||
@@ -152,6 +152,9 @@ def perform_prowler_scan(
|
||||
|
||||
for progress, findings in prowler_scan.scan():
|
||||
for finding in findings:
|
||||
if finding is None:
|
||||
logger.error(f"None finding detected on scan {scan_id}.")
|
||||
continue
|
||||
for attempt in range(CELERY_DEADLOCK_ATTEMPTS):
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
@@ -176,7 +179,10 @@ def perform_prowler_scan(
|
||||
|
||||
# Update resource fields if necessary
|
||||
updated_fields = []
|
||||
if resource_instance.region != finding.region:
|
||||
if (
|
||||
finding.region
|
||||
and resource_instance.region != finding.region
|
||||
):
|
||||
resource_instance.region = finding.region
|
||||
updated_fields.append("region")
|
||||
if resource_instance.service != finding.service_name:
|
||||
@@ -219,24 +225,33 @@ def perform_prowler_scan(
|
||||
# Process finding
|
||||
with rls_transaction(tenant_id):
|
||||
finding_uid = finding.uid
|
||||
last_first_seen_at = None
|
||||
if finding_uid not in last_status_cache:
|
||||
most_recent_finding = (
|
||||
Finding.objects.filter(uid=finding_uid)
|
||||
.order_by("-id")
|
||||
.values("status")
|
||||
Finding.all_objects.filter(
|
||||
tenant_id=tenant_id, uid=finding_uid
|
||||
)
|
||||
.order_by("-inserted_at")
|
||||
.values("status", "first_seen_at")
|
||||
.first()
|
||||
)
|
||||
last_status = (
|
||||
most_recent_finding["status"]
|
||||
if most_recent_finding
|
||||
else None
|
||||
)
|
||||
last_status_cache[finding_uid] = last_status
|
||||
last_status = None
|
||||
if most_recent_finding:
|
||||
last_status = most_recent_finding["status"]
|
||||
last_first_seen_at = most_recent_finding["first_seen_at"]
|
||||
last_status_cache[finding_uid] = last_status, last_first_seen_at
|
||||
else:
|
||||
last_status = last_status_cache[finding_uid]
|
||||
last_status, last_first_seen_at = last_status_cache[finding_uid]
|
||||
|
||||
status = FindingStatus[finding.status]
|
||||
delta = _create_finding_delta(last_status, status)
|
||||
# For the findings prior to the change, when a first finding is found with delta!="new" it will be
|
||||
# assigned a current date as first_seen_at and the successive findings with the same UID will
|
||||
# always get the date of the previous finding.
|
||||
# For new findings, when a finding (delta="new") is found for the first time, the first_seen_at
|
||||
# attribute will be assigned the current date, the following findings will get that date.
|
||||
if not last_first_seen_at:
|
||||
last_first_seen_at = datetime.now(tz=timezone.utc)
|
||||
|
||||
# Create the finding
|
||||
finding_instance = Finding.objects.create(
|
||||
@@ -251,6 +266,7 @@ def perform_prowler_scan(
|
||||
raw_result=finding.raw,
|
||||
check_id=finding.check_id,
|
||||
scan=scan_instance,
|
||||
first_seen_at=last_first_seen_at,
|
||||
)
|
||||
finding_instance.add_resources([resource_instance])
|
||||
|
||||
@@ -328,9 +344,18 @@ def perform_prowler_scan(
|
||||
total_requirements=compliance["total_requirements"],
|
||||
)
|
||||
)
|
||||
with rls_transaction(tenant_id):
|
||||
ComplianceOverview.objects.bulk_create(compliance_overview_objects)
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
ComplianceOverview.objects.bulk_create(
|
||||
compliance_overview_objects, batch_size=100
|
||||
)
|
||||
except Exception as overview_exception:
|
||||
import sentry_sdk
|
||||
|
||||
sentry_sdk.capture_exception(overview_exception)
|
||||
logger.error(
|
||||
f"Error storing compliance overview for scan {scan_id}: {overview_exception}"
|
||||
)
|
||||
if exception is not None:
|
||||
raise exception
|
||||
|
||||
@@ -367,7 +392,7 @@ def aggregate_findings(tenant_id: str, scan_id: str):
|
||||
- muted_changed: Muted findings with a delta of 'changed'.
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
findings = Finding.objects.filter(scan_id=scan_id)
|
||||
findings = Finding.objects.filter(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
aggregation = findings.values(
|
||||
"check_id",
|
||||
|
||||
@@ -1,15 +1,28 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from shutil import rmtree
|
||||
|
||||
from celery import shared_task
|
||||
from celery import chain, shared_task
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.celery import RLSTask
|
||||
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE, DJANGO_TMP_OUTPUT_DIRECTORY
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from tasks.jobs.connection import check_provider_connection
|
||||
from tasks.jobs.deletion import delete_provider, delete_tenant
|
||||
from tasks.jobs.export import (
|
||||
OUTPUT_FORMATS_MAPPING,
|
||||
_compress_output_files,
|
||||
_generate_output_directory,
|
||||
_upload_to_s3,
|
||||
)
|
||||
from tasks.jobs.scan import aggregate_findings, perform_prowler_scan
|
||||
from tasks.utils import batched, get_next_execution_datetime
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.decorators import set_tenant
|
||||
from api.models import Provider, Scan
|
||||
from api.models import Finding, Provider, Scan, ScanSummary, StateChoices
|
||||
from api.utils import initialize_prowler_provider
|
||||
from prowler.lib.outputs.finding import Finding as FindingOutput
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="provider-connection-check")
|
||||
@@ -29,7 +42,7 @@ def check_provider_connection_task(provider_id: str):
|
||||
return check_provider_connection(provider_id=provider_id)
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="provider-deletion")
|
||||
@shared_task(base=RLSTask, name="provider-deletion", queue="deletion")
|
||||
@set_tenant
|
||||
def delete_provider_task(provider_id: str):
|
||||
"""
|
||||
@@ -69,13 +82,22 @@ def perform_scan_task(
|
||||
Returns:
|
||||
dict: The result of the scan execution, typically including the status and results of the performed checks.
|
||||
"""
|
||||
return perform_prowler_scan(
|
||||
result = perform_prowler_scan(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
provider_id=provider_id,
|
||||
checks_to_execute=checks_to_execute,
|
||||
)
|
||||
|
||||
chain(
|
||||
perform_scan_summary_task.si(tenant_id, scan_id),
|
||||
generate_outputs.si(
|
||||
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
|
||||
),
|
||||
).apply_async()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, bind=True, name="scan-perform-scheduled", queue="scans")
|
||||
def perform_scheduled_scan_task(self, tenant_id: str, provider_id: str):
|
||||
@@ -100,34 +122,49 @@ def perform_scheduled_scan_task(self, tenant_id: str, provider_id: str):
|
||||
task_id = self.request.id
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
provider_instance = Provider.objects.get(pk=provider_id)
|
||||
periodic_task_instance = PeriodicTask.objects.get(
|
||||
name=f"scan-perform-scheduled-{provider_id}"
|
||||
)
|
||||
next_scan_date = datetime.combine(
|
||||
datetime.now(timezone.utc), periodic_task_instance.start_time.time()
|
||||
) + timedelta(hours=24)
|
||||
|
||||
scan_instance = Scan.objects.create(
|
||||
next_scan_datetime = get_next_execution_datetime(task_id, provider_id)
|
||||
scan_instance, _ = Scan.objects.get_or_create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider=provider_instance,
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
next_scan_at=next_scan_date,
|
||||
task_id=task_id,
|
||||
state__in=(StateChoices.SCHEDULED, StateChoices.AVAILABLE),
|
||||
scheduler_task_id=periodic_task_instance.id,
|
||||
defaults={"state": StateChoices.SCHEDULED},
|
||||
)
|
||||
|
||||
result = perform_prowler_scan(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=str(scan_instance.id),
|
||||
provider_id=provider_id,
|
||||
)
|
||||
perform_scan_summary_task.apply_async(
|
||||
kwargs={
|
||||
"tenant_id": tenant_id,
|
||||
"scan_id": str(scan_instance.id),
|
||||
}
|
||||
)
|
||||
scan_instance.task_id = task_id
|
||||
scan_instance.save()
|
||||
|
||||
try:
|
||||
result = perform_prowler_scan(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=str(scan_instance.id),
|
||||
provider_id=provider_id,
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
finally:
|
||||
with rls_transaction(tenant_id):
|
||||
Scan.objects.get_or_create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
state=StateChoices.SCHEDULED,
|
||||
scheduled_at=next_scan_datetime,
|
||||
scheduler_task_id=periodic_task_instance.id,
|
||||
)
|
||||
|
||||
chain(
|
||||
perform_scan_summary_task.si(tenant_id, scan_instance.id),
|
||||
generate_outputs.si(
|
||||
scan_id=str(scan_instance.id), provider_id=provider_id, tenant_id=tenant_id
|
||||
),
|
||||
).apply_async()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -136,6 +173,107 @@ def perform_scan_summary_task(tenant_id: str, scan_id: str):
|
||||
return aggregate_findings(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(name="tenant-deletion")
|
||||
@shared_task(name="tenant-deletion", queue="deletion")
|
||||
def delete_tenant_task(tenant_id: str):
|
||||
return delete_tenant(pk=tenant_id)
|
||||
|
||||
|
||||
@shared_task(
|
||||
base=RLSTask,
|
||||
name="scan-report",
|
||||
queue="scan-reports",
|
||||
)
|
||||
@set_tenant(keep_tenant=True)
|
||||
def generate_outputs(scan_id: str, provider_id: str, tenant_id: str):
|
||||
"""
|
||||
Process findings in batches and generate output files in multiple formats.
|
||||
|
||||
This function retrieves findings associated with a scan, processes them
|
||||
in batches of 50, and writes each batch to the corresponding output files.
|
||||
It reuses output writer instances across batches, updates them with each
|
||||
batch of transformed findings, and uses a flag to indicate when the final
|
||||
batch is being processed. Finally, the output files are compressed and
|
||||
uploaded to S3.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier.
|
||||
scan_id (str): The scan identifier.
|
||||
provider_id (str): The provider_id id to be used in generating outputs.
|
||||
"""
|
||||
# Initialize the prowler provider
|
||||
prowler_provider = initialize_prowler_provider(Provider.objects.get(id=provider_id))
|
||||
|
||||
# Get the provider UID
|
||||
provider_uid = Provider.objects.get(id=provider_id).uid
|
||||
|
||||
# Generate and ensure the output directory exists
|
||||
output_directory = _generate_output_directory(
|
||||
DJANGO_TMP_OUTPUT_DIRECTORY, provider_uid, tenant_id, scan_id
|
||||
)
|
||||
|
||||
# Define auxiliary variables
|
||||
output_writers = {}
|
||||
scan_summary = FindingOutput._transform_findings_stats(
|
||||
ScanSummary.objects.filter(scan_id=scan_id)
|
||||
)
|
||||
|
||||
# Retrieve findings queryset
|
||||
findings_qs = Finding.all_objects.filter(scan_id=scan_id).order_by("uid")
|
||||
|
||||
# Process findings in batches
|
||||
for batch, is_last_batch in batched(
|
||||
findings_qs.iterator(), DJANGO_FINDINGS_BATCH_SIZE
|
||||
):
|
||||
finding_outputs = [
|
||||
FindingOutput.transform_api_finding(finding, prowler_provider)
|
||||
for finding in batch
|
||||
]
|
||||
|
||||
# Generate output files
|
||||
for mode, config in OUTPUT_FORMATS_MAPPING.items():
|
||||
kwargs = dict(config.get("kwargs", {}))
|
||||
if mode == "html":
|
||||
kwargs["provider"] = prowler_provider
|
||||
kwargs["stats"] = scan_summary
|
||||
|
||||
writer_class = config["class"]
|
||||
if writer_class in output_writers:
|
||||
writer = output_writers[writer_class]
|
||||
writer.transform(finding_outputs)
|
||||
writer.close_file = is_last_batch
|
||||
else:
|
||||
writer = writer_class(
|
||||
findings=finding_outputs,
|
||||
file_path=output_directory,
|
||||
file_extension=config["suffix"],
|
||||
from_cli=False,
|
||||
)
|
||||
writer.close_file = is_last_batch
|
||||
output_writers[writer_class] = writer
|
||||
|
||||
# Write the current batch using the writer
|
||||
writer.batch_write_data_to_file(**kwargs)
|
||||
|
||||
# TODO: Refactor the output classes to avoid this manual reset
|
||||
writer._data = []
|
||||
|
||||
# Compress output files
|
||||
output_directory = _compress_output_files(output_directory)
|
||||
|
||||
# Save to configured storage
|
||||
uploaded = _upload_to_s3(tenant_id, output_directory, scan_id)
|
||||
|
||||
if uploaded:
|
||||
output_directory = uploaded
|
||||
uploaded = True
|
||||
# Remove the local files after upload
|
||||
rmtree(DJANGO_TMP_OUTPUT_DIRECTORY, ignore_errors=True)
|
||||
else:
|
||||
uploaded = False
|
||||
|
||||
# Update the scan instance with the output path
|
||||
Scan.all_objects.filter(id=scan_id).update(output_location=output_directory)
|
||||
|
||||
logger.info(f"Scan output files generated, output location: {output_directory}")
|
||||
|
||||
return {"upload": uploaded}
|
||||
|
||||
@@ -6,6 +6,8 @@ from django_celery_beat.models import IntervalSchedule, PeriodicTask
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
from tasks.beat import schedule_provider_scan
|
||||
|
||||
from api.models import Scan
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestScheduleProviderScan:
|
||||
@@ -15,9 +17,11 @@ class TestScheduleProviderScan:
|
||||
with patch(
|
||||
"tasks.tasks.perform_scheduled_scan_task.apply_async"
|
||||
) as mock_apply_async:
|
||||
assert Scan.all_objects.count() == 0
|
||||
result = schedule_provider_scan(provider_instance)
|
||||
|
||||
assert result is not None
|
||||
assert Scan.all_objects.count() == 1
|
||||
|
||||
mock_apply_async.assert_called_once_with(
|
||||
kwargs={
|
||||
|
||||
102
api/src/backend/tasks/tests/test_utils.py
Normal file
102
api/src/backend/tasks/tests/test_utils.py
Normal file
@@ -0,0 +1,102 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from django_celery_beat.models import IntervalSchedule, PeriodicTask
|
||||
from django_celery_results.models import TaskResult
|
||||
from tasks.utils import batched, get_next_execution_datetime
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestGetNextExecutionDatetime:
|
||||
@pytest.fixture
|
||||
def setup_periodic_task(self, db):
|
||||
# Create a periodic task with an hourly interval
|
||||
interval = IntervalSchedule.objects.create(
|
||||
every=1, period=IntervalSchedule.HOURS
|
||||
)
|
||||
periodic_task = PeriodicTask.objects.create(
|
||||
name="scan-perform-scheduled-123",
|
||||
task="scan-perform-scheduled",
|
||||
interval=interval,
|
||||
)
|
||||
return periodic_task
|
||||
|
||||
@pytest.fixture
|
||||
def setup_task_result(self, db):
|
||||
# Create a task result record
|
||||
task_result = TaskResult.objects.create(
|
||||
task_id="abc123",
|
||||
task_name="scan-perform-scheduled",
|
||||
status="SUCCESS",
|
||||
date_created=datetime.now(timezone.utc) - timedelta(hours=1),
|
||||
result="Success",
|
||||
)
|
||||
return task_result
|
||||
|
||||
def test_get_next_execution_datetime_success(
|
||||
self, setup_task_result, setup_periodic_task
|
||||
):
|
||||
task_result = setup_task_result
|
||||
periodic_task = setup_periodic_task
|
||||
|
||||
# Mock periodic_task_name on TaskResult
|
||||
with patch.object(
|
||||
TaskResult, "periodic_task_name", return_value=periodic_task.name
|
||||
):
|
||||
next_execution = get_next_execution_datetime(
|
||||
task_id=task_result.task_id, provider_id="123"
|
||||
)
|
||||
|
||||
expected_time = task_result.date_created + timedelta(hours=1)
|
||||
assert next_execution == expected_time
|
||||
|
||||
def test_get_next_execution_datetime_fallback_to_provider_id(
|
||||
self, setup_task_result, setup_periodic_task
|
||||
):
|
||||
task_result = setup_task_result
|
||||
|
||||
# Simulate the case where `periodic_task_name` is missing
|
||||
with patch.object(TaskResult, "periodic_task_name", return_value=None):
|
||||
next_execution = get_next_execution_datetime(
|
||||
task_id=task_result.task_id, provider_id="123"
|
||||
)
|
||||
|
||||
expected_time = task_result.date_created + timedelta(hours=1)
|
||||
assert next_execution == expected_time
|
||||
|
||||
def test_get_next_execution_datetime_periodic_task_does_not_exist(
|
||||
self, setup_task_result
|
||||
):
|
||||
task_result = setup_task_result
|
||||
|
||||
with pytest.raises(PeriodicTask.DoesNotExist):
|
||||
get_next_execution_datetime(
|
||||
task_id=task_result.task_id, provider_id="nonexistent"
|
||||
)
|
||||
|
||||
|
||||
class TestBatchedFunction:
|
||||
def test_empty_iterable(self):
|
||||
result = list(batched([], 3))
|
||||
assert result == [([], True)]
|
||||
|
||||
def test_exact_batches(self):
|
||||
result = list(batched([1, 2, 3, 4], 2))
|
||||
expected = [([1, 2], False), ([3, 4], False), ([], True)]
|
||||
assert result == expected
|
||||
|
||||
def test_inexact_batches(self):
|
||||
result = list(batched([1, 2, 3, 4, 5], 2))
|
||||
expected = [([1, 2], False), ([3, 4], False), ([5], True)]
|
||||
assert result == expected
|
||||
|
||||
def test_batch_size_one(self):
|
||||
result = list(batched([1, 2, 3], 1))
|
||||
expected = [([1], False), ([2], False), ([3], False), ([], True)]
|
||||
assert result == expected
|
||||
|
||||
def test_batch_size_greater_than_length(self):
|
||||
result = list(batched([1, 2, 3], 5))
|
||||
expected = [([1, 2, 3], True)]
|
||||
assert result == expected
|
||||
50
api/src/backend/tasks/utils.py
Normal file
50
api/src/backend/tasks/utils.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from django_celery_results.models import TaskResult
|
||||
|
||||
|
||||
def get_next_execution_datetime(task_id: int, provider_id: str) -> datetime:
|
||||
task_instance = TaskResult.objects.get(task_id=task_id)
|
||||
try:
|
||||
periodic_task_instance = PeriodicTask.objects.get(
|
||||
name=task_instance.periodic_task_name
|
||||
)
|
||||
except PeriodicTask.DoesNotExist:
|
||||
periodic_task_instance = PeriodicTask.objects.get(
|
||||
name=f"scan-perform-scheduled-{provider_id}"
|
||||
)
|
||||
|
||||
interval = periodic_task_instance.interval
|
||||
|
||||
current_scheduled_time = datetime.combine(
|
||||
datetime.now(timezone.utc).date(),
|
||||
task_instance.date_created.time(),
|
||||
tzinfo=timezone.utc,
|
||||
)
|
||||
|
||||
return current_scheduled_time + timedelta(**{interval.period: interval.every})
|
||||
|
||||
|
||||
def batched(iterable, batch_size):
|
||||
"""
|
||||
Yield successive batches from an iterable.
|
||||
|
||||
Args:
|
||||
iterable: An iterable source of items.
|
||||
batch_size (int): The number of items per batch.
|
||||
|
||||
Yields:
|
||||
tuple: A pair (batch, is_last_batch) where:
|
||||
- batch (list): A list of items (with length equal to batch_size,
|
||||
except possibly for the last batch).
|
||||
- is_last_batch (bool): True if this is the final batch, False otherwise.
|
||||
"""
|
||||
batch = []
|
||||
for item in iterable:
|
||||
batch.append(item)
|
||||
if len(batch) == batch_size:
|
||||
yield batch, False
|
||||
batch = []
|
||||
|
||||
yield batch, True
|
||||
24
contrib/k8s/helm/prowler-api/Chart.yaml
Normal file
24
contrib/k8s/helm/prowler-api/Chart.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v2
|
||||
name: prowler-api
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "5.1.1"
|
||||
22
contrib/k8s/helm/prowler-api/templates/NOTES.txt
Normal file
22
contrib/k8s/helm/prowler-api/templates/NOTES.txt
Normal file
@@ -0,0 +1,22 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prowler-api.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prowler-api.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prowler-api.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prowler-api.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
62
contrib/k8s/helm/prowler-api/templates/_helpers.tpl
Normal file
62
contrib/k8s/helm/prowler-api/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "prowler-api.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "prowler-api.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "prowler-api.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "prowler-api.labels" -}}
|
||||
helm.sh/chart: {{ include "prowler-api.chart" . }}
|
||||
{{ include "prowler-api.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "prowler-api.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "prowler-api.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "prowler-api.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "prowler-api.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
9
contrib/k8s/helm/prowler-api/templates/configmap.yaml
Normal file
9
contrib/k8s/helm/prowler-api/templates/configmap.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}-config
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
data:
|
||||
config.yaml: |-
|
||||
{{- toYaml .Values.mainConfig | nindent 4 }}
|
||||
85
contrib/k8s/helm/prowler-api/templates/deployment.yaml
Normal file
85
contrib/k8s/helm/prowler-api/templates/deployment.yaml
Normal file
@@ -0,0 +1,85 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "prowler-api.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "prowler-api.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
{{- range $name,$config := .Values.containers }}
|
||||
{{- if $config.enabled }}
|
||||
- name: {{ $name }}
|
||||
securityContext:
|
||||
{{- toYaml $config.securityContext | nindent 12 }}
|
||||
image: "{{ $config.image.repository }}:{{ $config.image.tag | default $.Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ $config.image.pullPolicy }}
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "prowler-api.fullname" $ }}
|
||||
command:
|
||||
{{- toYaml $config.command | nindent 12 }}
|
||||
{{- if $config.ports }}
|
||||
ports:
|
||||
{{- toYaml $config.ports | nindent 12 }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml $config.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml $config.readinessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml $config.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: {{ include "prowler-api.fullname" $ }}-config
|
||||
mountPath: {{ $.Values.releaseConfigRoot }}{{ $.Values.releaseConfigPath }}
|
||||
subPath: config.yaml
|
||||
{{- with .volumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: {{ include "prowler-api.fullname" . }}-config
|
||||
configMap:
|
||||
name: {{ include "prowler-api.fullname" . }}-config
|
||||
{{- with .Values.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
43
contrib/k8s/helm/prowler-api/templates/ingress.yaml
Normal file
43
contrib/k8s/helm/prowler-api/templates/ingress.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.ingress.className }}
|
||||
ingressClassName: {{ . }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- with .pathType }}
|
||||
pathType: {{ . }}
|
||||
{{- end }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "prowler-api.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
11
contrib/k8s/helm/prowler-api/templates/secrets.yaml
Normal file
11
contrib/k8s/helm/prowler-api/templates/secrets.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- range $k, $v := .Values.secrets }}
|
||||
{{ $k }}: {{ $v | toString | b64enc | quote }}
|
||||
{{- end }}
|
||||
21
contrib/k8s/helm/prowler-api/templates/service.yaml
Normal file
21
contrib/k8s/helm/prowler-api/templates/service.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
{{- range $name,$config := .Values.containers }}
|
||||
{{- if $config.ports }}
|
||||
{{- range $p := $config.ports }}
|
||||
- port: {{ $p.containerPort }}
|
||||
targetPort: {{ $p.containerPort }}
|
||||
protocol: TCP
|
||||
name: {{ $config.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "prowler-api.selectorLabels" . | nindent 4 }}
|
||||
13
contrib/k8s/helm/prowler-api/templates/serviceaccount.yaml
Normal file
13
contrib/k8s/helm/prowler-api/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "prowler-api.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
||||
625
contrib/k8s/helm/prowler-api/values.yaml
Normal file
625
contrib/k8s/helm/prowler-api/values.yaml
Normal file
@@ -0,0 +1,625 @@
|
||||
# Default values for prowler-api.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
|
||||
replicaCount: 1
|
||||
|
||||
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
|
||||
containers:
|
||||
prowler-api:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
command: ["/home/prowler/docker-entrypoint.sh", "prod"]
|
||||
worker:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
command: ["/home/prowler/docker-entrypoint.sh", "worker"]
|
||||
worker-beat:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
command: ["../docker-entrypoint.sh", "beat"]
|
||||
|
||||
secrets:
|
||||
POSTGRES_HOST:
|
||||
POSTGRES_PORT: 5432
|
||||
POSTGRES_ADMIN_USER:
|
||||
POSTGRES_ADMIN_PASSWORD:
|
||||
POSTGRES_USER:
|
||||
POSTGRES_PASSWORD:
|
||||
POSTGRES_DB:
|
||||
# Valkey settings
|
||||
VALKEY_HOST: valkey-headless
|
||||
VALKEY_PORT: "6379"
|
||||
VALKEY_DB: "0"
|
||||
# Django settings
|
||||
DJANGO_ALLOWED_HOSTS: localhost,127.0.0.1,prowler-api
|
||||
DJANGO_BIND_ADDRESS: 0.0.0.0
|
||||
DJANGO_PORT: "8080"
|
||||
DJANGO_DEBUG: False
|
||||
DJANGO_SETTINGS_MODULE: config.django.production
|
||||
# Select one of [ndjson|human_readable]
|
||||
DJANGO_LOGGING_FORMATTER: human_readable
|
||||
# Select one of [DEBUG|INFO|WARNING|ERROR|CRITICAL]
|
||||
# Applies to both Django and Celery Workers
|
||||
DJANGO_LOGGING_LEVEL: INFO
|
||||
# Defaults to the maximum available based on CPU cores if not set.
|
||||
DJANGO_WORKERS: 2
|
||||
# Token lifetime is in minutes
|
||||
DJANGO_ACCESS_TOKEN_LIFETIME: "30"
|
||||
# Token lifetime is in minutes
|
||||
DJANGO_REFRESH_TOKEN_LIFETIME: "1440"
|
||||
DJANGO_CACHE_MAX_AGE: "3600"
|
||||
DJANGO_STALE_WHILE_REVALIDATE: "60"
|
||||
DJANGO_MANAGE_DB_PARTITIONS: "False"
|
||||
# openssl genrsa -out private.pem 2048
|
||||
DJANGO_TOKEN_SIGNING_KEY:
|
||||
# openssl rsa -in private.pem -pubout -out public.pem
|
||||
DJANGO_TOKEN_VERIFYING_KEY:
|
||||
# openssl rand -base64 32
|
||||
DJANGO_SECRETS_ENCRYPTION_KEY:
|
||||
DJANGO_BROKER_VISIBILITY_TIMEOUT: 86400
|
||||
|
||||
releaseConfigRoot: /home/prowler/.cache/pypoetry/virtualenvs/prowler-api-NnJNioq7-py3.12/lib/python3.12/site-packages/
|
||||
releaseConfigPath: prowler/config/config.yaml
|
||||
|
||||
mainConfig:
|
||||
# AWS Configuration
|
||||
aws:
|
||||
# AWS Global Configuration
|
||||
# aws.mute_non_default_regions --> Set to True to muted failed findings in non-default regions for AccessAnalyzer, GuardDuty, SecurityHub, DRS and Config
|
||||
mute_non_default_regions: False
|
||||
# If you want to mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w mutelist.yaml`:
|
||||
# Mutelist:
|
||||
# Accounts:
|
||||
# "*":
|
||||
# Checks:
|
||||
# "*":
|
||||
# Regions:
|
||||
# - "ap-southeast-1"
|
||||
# - "ap-southeast-2"
|
||||
# Resources:
|
||||
# - "*"
|
||||
|
||||
# AWS IAM Configuration
|
||||
# aws.iam_user_accesskey_unused --> CIS recommends 45 days
|
||||
max_unused_access_keys_days: 45
|
||||
# aws.iam_user_console_access_unused --> CIS recommends 45 days
|
||||
max_console_access_days: 45
|
||||
|
||||
# AWS EC2 Configuration
|
||||
# aws.ec2_elastic_ip_shodan
|
||||
# TODO: create common config
|
||||
shodan_api_key: null
|
||||
# aws.ec2_securitygroup_with_many_ingress_egress_rules --> by default is 50 rules
|
||||
max_security_group_rules: 50
|
||||
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
|
||||
max_ec2_instance_age_in_days: 180
|
||||
# aws.ec2_securitygroup_allow_ingress_from_internet_to_any_port
|
||||
# allowed network interface types for security groups open to the Internet
|
||||
ec2_allowed_interface_types:
|
||||
[
|
||||
"api_gateway_managed",
|
||||
"vpc_endpoint",
|
||||
]
|
||||
# allowed network interface owners for security groups open to the Internet
|
||||
ec2_allowed_instance_owners:
|
||||
[
|
||||
"amazon-elb"
|
||||
]
|
||||
# aws.ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports
|
||||
ec2_high_risk_ports:
|
||||
[
|
||||
25,
|
||||
110,
|
||||
135,
|
||||
143,
|
||||
445,
|
||||
3000,
|
||||
4333,
|
||||
5000,
|
||||
5500,
|
||||
8080,
|
||||
8088,
|
||||
]
|
||||
|
||||
# AWS ECS Configuration
|
||||
# aws.ecs_service_fargate_latest_platform_version
|
||||
fargate_linux_latest_version: "1.4.0"
|
||||
fargate_windows_latest_version: "1.0.0"
|
||||
|
||||
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
|
||||
# AWS SSM Configuration (aws.ssm_documents_set_as_public)
|
||||
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
|
||||
# Multi account environment: Any additional trusted account number should be added as a space separated list, e.g.
|
||||
# trusted_account_ids : ["123456789012", "098765432109", "678901234567"]
|
||||
trusted_account_ids: []
|
||||
|
||||
# AWS Cloudwatch Configuration
|
||||
# aws.cloudwatch_log_group_retention_policy_specific_days_enabled --> by default is 365 days
|
||||
log_group_retention_days: 365
|
||||
|
||||
# AWS CloudFormation Configuration
|
||||
# cloudformation_stack_cdktoolkit_bootstrap_version --> by default is 21
|
||||
recommended_cdk_bootstrap_version: 21
|
||||
|
||||
# AWS AppStream Session Configuration
|
||||
# aws.appstream_fleet_session_idle_disconnect_timeout
|
||||
max_idle_disconnect_timeout_in_seconds: 600 # 10 Minutes
|
||||
# aws.appstream_fleet_session_disconnect_timeout
|
||||
max_disconnect_timeout_in_seconds: 300 # 5 Minutes
|
||||
# aws.appstream_fleet_maximum_session_duration
|
||||
max_session_duration_seconds: 36000 # 10 Hours
|
||||
|
||||
# AWS Lambda Configuration
|
||||
# aws.awslambda_function_using_supported_runtimes
|
||||
obsolete_lambda_runtimes:
|
||||
[
|
||||
"java8",
|
||||
"go1.x",
|
||||
"provided",
|
||||
"python3.6",
|
||||
"python2.7",
|
||||
"python3.7",
|
||||
"nodejs4.3",
|
||||
"nodejs4.3-edge",
|
||||
"nodejs6.10",
|
||||
"nodejs",
|
||||
"nodejs8.10",
|
||||
"nodejs10.x",
|
||||
"nodejs12.x",
|
||||
"nodejs14.x",
|
||||
"nodejs16.x",
|
||||
"dotnet5.0",
|
||||
"dotnet7",
|
||||
"dotnetcore1.0",
|
||||
"dotnetcore2.0",
|
||||
"dotnetcore2.1",
|
||||
"dotnetcore3.1",
|
||||
"ruby2.5",
|
||||
"ruby2.7",
|
||||
]
|
||||
# aws.awslambda_function_vpc_is_in_multi_azs
|
||||
lambda_min_azs: 2
|
||||
|
||||
# AWS Organizations
|
||||
# aws.organizations_scp_check_deny_regions
|
||||
# aws.organizations_enabled_regions: [
|
||||
# "eu-central-1",
|
||||
# "eu-west-1",
|
||||
# "us-east-1"
|
||||
# ]
|
||||
organizations_enabled_regions: []
|
||||
organizations_trusted_delegated_administrators: []
|
||||
|
||||
# AWS ECR
|
||||
# aws.ecr_repositories_scan_vulnerabilities_in_latest_image
|
||||
# CRITICAL
|
||||
# HIGH
|
||||
# MEDIUM
|
||||
ecr_repository_vulnerability_minimum_severity: "MEDIUM"
|
||||
|
||||
# AWS Trusted Advisor
|
||||
# aws.trustedadvisor_premium_support_plan_subscribed
|
||||
verify_premium_support_plans: True
|
||||
|
||||
# AWS CloudTrail Configuration
|
||||
# aws.cloudtrail_threat_detection_privilege_escalation
|
||||
threat_detection_privilege_escalation_threshold: 0.2 # Percentage of actions found to decide if it is an privilege_escalation attack event, by default is 0.2 (20%)
|
||||
threat_detection_privilege_escalation_minutes: 1440 # Past minutes to search from now for privilege_escalation attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_privilege_escalation_actions:
|
||||
[
|
||||
"AddPermission",
|
||||
"AddRoleToInstanceProfile",
|
||||
"AddUserToGroup",
|
||||
"AssociateAccessPolicy",
|
||||
"AssumeRole",
|
||||
"AttachGroupPolicy",
|
||||
"AttachRolePolicy",
|
||||
"AttachUserPolicy",
|
||||
"ChangePassword",
|
||||
"CreateAccessEntry",
|
||||
"CreateAccessKey",
|
||||
"CreateDevEndpoint",
|
||||
"CreateEventSourceMapping",
|
||||
"CreateFunction",
|
||||
"CreateGroup",
|
||||
"CreateJob",
|
||||
"CreateKeyPair",
|
||||
"CreateLoginProfile",
|
||||
"CreatePipeline",
|
||||
"CreatePolicyVersion",
|
||||
"CreateRole",
|
||||
"CreateStack",
|
||||
"DeleteRolePermissionsBoundary",
|
||||
"DeleteRolePolicy",
|
||||
"DeleteUserPermissionsBoundary",
|
||||
"DeleteUserPolicy",
|
||||
"DetachRolePolicy",
|
||||
"DetachUserPolicy",
|
||||
"GetCredentialsForIdentity",
|
||||
"GetId",
|
||||
"GetPolicyVersion",
|
||||
"GetUserPolicy",
|
||||
"Invoke",
|
||||
"ModifyInstanceAttribute",
|
||||
"PassRole",
|
||||
"PutGroupPolicy",
|
||||
"PutPipelineDefinition",
|
||||
"PutRolePermissionsBoundary",
|
||||
"PutRolePolicy",
|
||||
"PutUserPermissionsBoundary",
|
||||
"PutUserPolicy",
|
||||
"ReplaceIamInstanceProfileAssociation",
|
||||
"RunInstances",
|
||||
"SetDefaultPolicyVersion",
|
||||
"UpdateAccessKey",
|
||||
"UpdateAssumeRolePolicy",
|
||||
"UpdateDevEndpoint",
|
||||
"UpdateEventSourceMapping",
|
||||
"UpdateFunctionCode",
|
||||
"UpdateJob",
|
||||
"UpdateLoginProfile",
|
||||
]
|
||||
# aws.cloudtrail_threat_detection_enumeration
|
||||
threat_detection_enumeration_threshold: 0.3 # Percentage of actions found to decide if it is an enumeration attack event, by default is 0.3 (30%)
|
||||
threat_detection_enumeration_minutes: 1440 # Past minutes to search from now for enumeration attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_enumeration_actions:
|
||||
[
|
||||
"DescribeAccessEntry",
|
||||
"DescribeAccountAttributes",
|
||||
"DescribeAvailabilityZones",
|
||||
"DescribeBundleTasks",
|
||||
"DescribeCarrierGateways",
|
||||
"DescribeClientVpnRoutes",
|
||||
"DescribeCluster",
|
||||
"DescribeDhcpOptions",
|
||||
"DescribeFlowLogs",
|
||||
"DescribeImages",
|
||||
"DescribeInstanceAttribute",
|
||||
"DescribeInstanceInformation",
|
||||
"DescribeInstanceTypes",
|
||||
"DescribeInstances",
|
||||
"DescribeInstances",
|
||||
"DescribeKeyPairs",
|
||||
"DescribeLogGroups",
|
||||
"DescribeLogStreams",
|
||||
"DescribeOrganization",
|
||||
"DescribeRegions",
|
||||
"DescribeSecurityGroups",
|
||||
"DescribeSnapshotAttribute",
|
||||
"DescribeSnapshotTierStatus",
|
||||
"DescribeSubscriptionFilters",
|
||||
"DescribeTransitGatewayMulticastDomains",
|
||||
"DescribeVolumes",
|
||||
"DescribeVolumesModifications",
|
||||
"DescribeVpcEndpointConnectionNotifications",
|
||||
"DescribeVpcs",
|
||||
"GetAccount",
|
||||
"GetAccountAuthorizationDetails",
|
||||
"GetAccountSendingEnabled",
|
||||
"GetBucketAcl",
|
||||
"GetBucketLogging",
|
||||
"GetBucketPolicy",
|
||||
"GetBucketReplication",
|
||||
"GetBucketVersioning",
|
||||
"GetCallerIdentity",
|
||||
"GetCertificate",
|
||||
"GetConsoleScreenshot",
|
||||
"GetCostAndUsage",
|
||||
"GetDetector",
|
||||
"GetEbsDefaultKmsKeyId",
|
||||
"GetEbsEncryptionByDefault",
|
||||
"GetFindings",
|
||||
"GetFlowLogsIntegrationTemplate",
|
||||
"GetIdentityVerificationAttributes",
|
||||
"GetInstances",
|
||||
"GetIntrospectionSchema",
|
||||
"GetLaunchTemplateData",
|
||||
"GetLaunchTemplateData",
|
||||
"GetLogRecord",
|
||||
"GetParameters",
|
||||
"GetPolicyVersion",
|
||||
"GetPublicAccessBlock",
|
||||
"GetQueryResults",
|
||||
"GetRegions",
|
||||
"GetSMSAttributes",
|
||||
"GetSMSSandboxAccountStatus",
|
||||
"GetSendQuota",
|
||||
"GetTransitGatewayRouteTableAssociations",
|
||||
"GetUserPolicy",
|
||||
"HeadObject",
|
||||
"ListAccessKeys",
|
||||
"ListAccounts",
|
||||
"ListAllMyBuckets",
|
||||
"ListAssociatedAccessPolicies",
|
||||
"ListAttachedUserPolicies",
|
||||
"ListClusters",
|
||||
"ListDetectors",
|
||||
"ListDomains",
|
||||
"ListFindings",
|
||||
"ListHostedZones",
|
||||
"ListIPSets",
|
||||
"ListIdentities",
|
||||
"ListInstanceProfiles",
|
||||
"ListObjects",
|
||||
"ListOrganizationalUnitsForParent",
|
||||
"ListOriginationNumbers",
|
||||
"ListPolicyVersions",
|
||||
"ListRoles",
|
||||
"ListRoles",
|
||||
"ListRules",
|
||||
"ListServiceQuotas",
|
||||
"ListSubscriptions",
|
||||
"ListTargetsByRule",
|
||||
"ListTopics",
|
||||
"ListUsers",
|
||||
"LookupEvents",
|
||||
"Search",
|
||||
]
|
||||
# aws.cloudtrail_threat_detection_llm_jacking
|
||||
threat_detection_llm_jacking_threshold: 0.4 # Percentage of actions found to decide if it is an LLM Jacking attack event, by default is 0.4 (40%)
|
||||
threat_detection_llm_jacking_minutes: 1440 # Past minutes to search from now for LLM Jacking attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_llm_jacking_actions:
|
||||
[
|
||||
"PutUseCaseForModelAccess", # Submits a use case for model access, providing justification (Write).
|
||||
"PutFoundationModelEntitlement", # Grants entitlement for accessing a foundation model (Write).
|
||||
"PutModelInvocationLoggingConfiguration", # Configures logging for model invocations (Write).
|
||||
"CreateFoundationModelAgreement", # Creates a new agreement to use a foundation model (Write).
|
||||
"InvokeModel", # Invokes a specified Bedrock model for inference using provided prompt and parameters (Read).
|
||||
"InvokeModelWithResponseStream", # Invokes a Bedrock model for inference with real-time token streaming (Read).
|
||||
"GetUseCaseForModelAccess", # Retrieves an existing use case for model access (Read).
|
||||
"GetModelInvocationLoggingConfiguration", # Fetches the logging configuration for model invocations (Read).
|
||||
"GetFoundationModelAvailability", # Checks the availability of a foundation model for use (Read).
|
||||
"ListFoundationModelAgreementOffers", # Lists available agreement offers for accessing foundation models (List).
|
||||
"ListFoundationModels", # Lists the available foundation models in Bedrock (List).
|
||||
"ListProvisionedModelThroughputs", # Lists the provisioned throughput for previously created models (List).
|
||||
]
|
||||
|
||||
# AWS RDS Configuration
|
||||
# aws.rds_instance_backup_enabled
|
||||
# Whether to check RDS instance replicas or not
|
||||
check_rds_instance_replicas: False
|
||||
|
||||
# AWS ACM Configuration
|
||||
# aws.acm_certificates_expiration_check
|
||||
days_to_expire_threshold: 7
|
||||
# aws.acm_certificates_with_secure_key_algorithms
|
||||
insecure_key_algorithms:
|
||||
[
|
||||
"RSA-1024",
|
||||
"P-192",
|
||||
"SHA-1",
|
||||
]
|
||||
|
||||
# AWS EKS Configuration
|
||||
# aws.eks_control_plane_logging_all_types_enabled
|
||||
# EKS control plane logging types that must be enabled
|
||||
eks_required_log_types:
|
||||
[
|
||||
"api",
|
||||
"audit",
|
||||
"authenticator",
|
||||
"controllerManager",
|
||||
"scheduler",
|
||||
]
|
||||
|
||||
# aws.eks_cluster_uses_a_supported_version
|
||||
# EKS clusters must be version 1.28 or higher
|
||||
eks_cluster_oldest_version_supported: "1.28"
|
||||
|
||||
# AWS CodeBuild Configuration
|
||||
# aws.codebuild_project_no_secrets_in_variables
|
||||
# CodeBuild sensitive variables that are excluded from the check
|
||||
excluded_sensitive_environment_variables:
|
||||
[
|
||||
|
||||
]
|
||||
|
||||
# AWS ELB Configuration
|
||||
# aws.elb_is_in_multiple_az
|
||||
# Minimum number of Availability Zones that an CLB must be in
|
||||
elb_min_azs: 2
|
||||
|
||||
# AWS ELBv2 Configuration
|
||||
# aws.elbv2_is_in_multiple_az
|
||||
# Minimum number of Availability Zones that an ELBv2 must be in
|
||||
elbv2_min_azs: 2
|
||||
|
||||
|
||||
# AWS Secrets Configuration
|
||||
# Patterns to ignore in the secrets checks
|
||||
secrets_ignore_patterns: []
|
||||
|
||||
# AWS Secrets Manager Configuration
|
||||
# aws.secretsmanager_secret_unused
|
||||
# Maximum number of days a secret can be unused
|
||||
max_days_secret_unused: 90
|
||||
|
||||
# aws.secretsmanager_secret_rotated_periodically
|
||||
# Maximum number of days a secret should be rotated
|
||||
max_days_secret_unrotated: 90
|
||||
|
||||
# AWS Kinesis Configuration
|
||||
# Minimum retention period in hours for Kinesis streams
|
||||
min_kinesis_stream_retention_hours: 168 # 7 days
|
||||
|
||||
|
||||
# Azure Configuration
|
||||
azure:
|
||||
# Azure Network Configuration
|
||||
# azure.network_public_ip_shodan
|
||||
# TODO: create common config
|
||||
shodan_api_key: null
|
||||
|
||||
# Azure App Service
|
||||
# azure.app_ensure_php_version_is_latest
|
||||
php_latest_version: "8.2"
|
||||
# azure.app_ensure_python_version_is_latest
|
||||
python_latest_version: "3.12"
|
||||
# azure.app_ensure_java_version_is_latest
|
||||
java_latest_version: "17"
|
||||
|
||||
# Azure SQL Server
|
||||
# azure.sqlserver_minimal_tls_version
|
||||
recommended_minimal_tls_versions:
|
||||
[
|
||||
"1.2",
|
||||
"1.3",
|
||||
]
|
||||
|
||||
# GCP Configuration
|
||||
gcp:
|
||||
# GCP Compute Configuration
|
||||
# gcp.compute_public_address_shodan
|
||||
shodan_api_key: null
|
||||
|
||||
# Kubernetes Configuration
|
||||
kubernetes:
|
||||
# Kubernetes API Server
|
||||
# kubernetes.apiserver_audit_log_maxbackup_set
|
||||
audit_log_maxbackup: 10
|
||||
# kubernetes.apiserver_audit_log_maxsize_set
|
||||
audit_log_maxsize: 100
|
||||
# kubernetes.apiserver_audit_log_maxage_set
|
||||
audit_log_maxage: 30
|
||||
# kubernetes.apiserver_strong_ciphers_only
|
||||
apiserver_strong_ciphers:
|
||||
[
|
||||
"TLS_AES_128_GCM_SHA256",
|
||||
"TLS_AES_256_GCM_SHA384",
|
||||
"TLS_CHACHA20_POLY1305_SHA256",
|
||||
]
|
||||
# Kubelet
|
||||
# kubernetes.kubelet_strong_ciphers_only
|
||||
kubelet_strong_ciphers:
|
||||
[
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
]
|
||||
|
||||
|
||||
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# This is for setting Kubernetes Annotations to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
podAnnotations: {}
|
||||
# This is for setting Kubernetes Labels to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
podLabels: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
|
||||
service:
|
||||
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
|
||||
type: ClusterIP
|
||||
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
|
||||
port: 80
|
||||
|
||||
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
|
||||
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes: []
|
||||
# - name: foo
|
||||
# secret:
|
||||
# secretName: mysecret
|
||||
# optional: false
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts: []
|
||||
# - name: foo
|
||||
# mountPath: "/etc/foo"
|
||||
# readOnly: true
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
23
contrib/k8s/helm/prowler-cli/.helmignore
Normal file
23
contrib/k8s/helm/prowler-cli/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -39,4 +39,3 @@ spec:
|
||||
path: {{ $value }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
23
contrib/k8s/helm/prowler-ui/.helmignore
Normal file
23
contrib/k8s/helm/prowler-ui/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
6
contrib/k8s/helm/prowler-ui/Chart.yaml
Normal file
6
contrib/k8s/helm/prowler-ui/Chart.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v2
|
||||
name: prowler-ui
|
||||
description: A Helm chart for Kubernetes
|
||||
type: application
|
||||
version: 0.1.0
|
||||
appVersion: "5.1.1"
|
||||
22
contrib/k8s/helm/prowler-ui/templates/NOTES.txt
Normal file
22
contrib/k8s/helm/prowler-ui/templates/NOTES.txt
Normal file
@@ -0,0 +1,22 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prowler-ui.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prowler-ui.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prowler-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prowler-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
62
contrib/k8s/helm/prowler-ui/templates/_helpers.tpl
Normal file
62
contrib/k8s/helm/prowler-ui/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "prowler-ui.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "prowler-ui.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "prowler-ui.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "prowler-ui.labels" -}}
|
||||
helm.sh/chart: {{ include "prowler-ui.chart" . }}
|
||||
{{ include "prowler-ui.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "prowler-ui.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "prowler-ui.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "prowler-ui.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "prowler-ui.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
72
contrib/k8s/helm/prowler-ui/templates/deployment.yaml
Normal file
72
contrib/k8s/helm/prowler-ui/templates/deployment.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "prowler-ui.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "prowler-ui.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "prowler-ui.fullname" $ }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml .Values.readinessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
43
contrib/k8s/helm/prowler-ui/templates/ingress.yaml
Normal file
43
contrib/k8s/helm/prowler-ui/templates/ingress.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.ingress.className }}
|
||||
ingressClassName: {{ . }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- with .pathType }}
|
||||
pathType: {{ . }}
|
||||
{{- end }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "prowler-ui.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
11
contrib/k8s/helm/prowler-ui/templates/secrets.yaml
Normal file
11
contrib/k8s/helm/prowler-ui/templates/secrets.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- range $k, $v := .Values.secrets }}
|
||||
{{ $k }}: {{ $v | toString | b64enc | quote }}
|
||||
{{- end }}
|
||||
15
contrib/k8s/helm/prowler-ui/templates/service.yaml
Normal file
15
contrib/k8s/helm/prowler-ui/templates/service.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
{{- include "prowler-ui.selectorLabels" . | nindent 4 }}
|
||||
13
contrib/k8s/helm/prowler-ui/templates/serviceaccount.yaml
Normal file
13
contrib/k8s/helm/prowler-ui/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
||||
132
contrib/k8s/helm/prowler-ui/values.yaml
Normal file
132
contrib/k8s/helm/prowler-ui/values.yaml
Normal file
@@ -0,0 +1,132 @@
|
||||
# Default values for prowler-ui.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
|
||||
replicaCount: 1
|
||||
|
||||
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
|
||||
image:
|
||||
repository: prowlercloud/prowler-ui
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
secrets:
|
||||
SITE_URL: http://localhost:3000
|
||||
API_BASE_URL: http://prowler-api:8080/api/v1
|
||||
NEXT_PUBLIC_API_DOCS_URL: http://prowler-api:8080/api/v1/docs
|
||||
AUTH_TRUST_HOST: True
|
||||
UI_PORT: 3000
|
||||
# openssl rand -base64 32
|
||||
AUTH_SECRET:
|
||||
|
||||
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# This is for setting Kubernetes Annotations to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
podAnnotations: {}
|
||||
# This is for setting Kubernetes Labels to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
podLabels: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
|
||||
service:
|
||||
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
|
||||
type: ClusterIP
|
||||
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
|
||||
port: 3000
|
||||
|
||||
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
|
||||
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes: []
|
||||
# - name: foo
|
||||
# secret:
|
||||
# secretName: mysecret
|
||||
# optional: false
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts: []
|
||||
# - name: foo
|
||||
# mountPath: "/etc/foo"
|
||||
# readOnly: true
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
24
dashboard/compliance/cis_1_10_kubernetes.py
Normal file
24
dashboard/compliance/cis_1_10_kubernetes.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
25
dashboard/compliance/cis_3_0_azure.py
Normal file
25
dashboard/compliance/cis_3_0_azure.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
24
dashboard/compliance/cis_4_0_aws.py
Normal file
24
dashboard/compliance/cis_4_0_aws.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
23
dashboard/compliance/iso27001_2022_aws.py
Normal file
23
dashboard/compliance/iso27001_2022_aws.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_container_iso
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ATTRIBUTES_CATEGORY",
|
||||
"REQUIREMENTS_ATTRIBUTES_OBJETIVE_ID",
|
||||
"REQUIREMENTS_ATTRIBUTES_OBJETIVE_NAME",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
]
|
||||
return get_section_container_iso(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_CATEGORY", "REQUIREMENTS_ATTRIBUTES_OBJETIVE_ID"
|
||||
)
|
||||
24
dashboard/compliance/pci_4_0_aws.py
Normal file
24
dashboard/compliance/pci_4_0_aws.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_format3
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
]
|
||||
|
||||
return get_section_containers_format3(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SECTION", "REQUIREMENTS_ID"
|
||||
)
|
||||
23
dashboard/compliance/pci_4_0_azure.py
Normal file
23
dashboard/compliance/pci_4_0_azure.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_format3
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
]
|
||||
return get_section_containers_format3(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SECTION", "REQUIREMENTS_ID"
|
||||
)
|
||||
23
dashboard/compliance/pci_4_0_gcp.py
Normal file
23
dashboard/compliance/pci_4_0_gcp.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_format3
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
]
|
||||
return get_section_containers_format3(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SECTION", "REQUIREMENTS_ID"
|
||||
)
|
||||
24
dashboard/compliance/pci_4_0_kubernetes.py
Normal file
24
dashboard/compliance/pci_4_0_kubernetes.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_format3
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
]
|
||||
|
||||
return get_section_containers_format3(
|
||||
aux, "REQUIREMENTS_ATTRIBUTES_SECTION", "REQUIREMENTS_ID"
|
||||
)
|
||||
@@ -76,7 +76,6 @@ def load_csv_files(csv_files):
|
||||
result = result.replace("_AZURE", " - AZURE")
|
||||
if "KUBERNETES" in result:
|
||||
result = result.replace("_KUBERNETES", " - KUBERNETES")
|
||||
result = result[result.find("CIS_") :]
|
||||
results.append(result)
|
||||
|
||||
unique_results = set(results)
|
||||
@@ -532,8 +531,8 @@ def get_bar_graph(df, column_name):
|
||||
|
||||
# Cut the text if it is too long
|
||||
for i in range(len(colums)):
|
||||
if len(colums[i]) > 15:
|
||||
colums[i] = colums[i][:15] + "..."
|
||||
if len(colums[i]) > 43:
|
||||
colums[i] = colums[i][:43] + "..."
|
||||
|
||||
fig = px.bar(
|
||||
df,
|
||||
|
||||
@@ -165,9 +165,21 @@ else:
|
||||
)
|
||||
|
||||
# For the timestamp, remove the two columns and keep only the date
|
||||
|
||||
data["TIMESTAMP"] = pd.to_datetime(data["TIMESTAMP"])
|
||||
data["ASSESSMENT_TIME"] = data["TIMESTAMP"].dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
# Handle findings from v3 outputs
|
||||
if "FINDING_UNIQUE_ID" in data.columns:
|
||||
data.rename(columns={"FINDING_UNIQUE_ID": "FINDING_UID"}, inplace=True)
|
||||
if "ACCOUNT_ID" in data.columns:
|
||||
data.rename(columns={"ACCOUNT_ID": "ACCOUNT_UID"}, inplace=True)
|
||||
if "ASSESSMENT_START_TIME" in data.columns:
|
||||
data.rename(columns={"ASSESSMENT_START_TIME": "TIMESTAMP"}, inplace=True)
|
||||
if "RESOURCE_ID" in data.columns:
|
||||
data.rename(columns={"RESOURCE_ID": "RESOURCE_UID"}, inplace=True)
|
||||
|
||||
# Remove dupplicates on the finding_uid colummn but keep the last one taking into account the timestamp
|
||||
data = data.sort_values("TIMESTAMP").drop_duplicates("FINDING_UID", keep="last")
|
||||
|
||||
data["ASSESSMENT_TIME"] = data["TIMESTAMP"].dt.strftime("%Y-%m-%d")
|
||||
data_valid = pd.DataFrame()
|
||||
for account in data["ACCOUNT_UID"].unique():
|
||||
all_times = data[data["ACCOUNT_UID"] == account]["ASSESSMENT_TIME"].unique()
|
||||
@@ -1720,7 +1732,7 @@ def generate_table(data, index, color_mapping_severity, color_mapping_status):
|
||||
[
|
||||
html.P(
|
||||
html.Strong(
|
||||
"Recomendation: ",
|
||||
"Recommendation: ",
|
||||
style={
|
||||
"margin-right": "5px"
|
||||
},
|
||||
@@ -1744,7 +1756,7 @@ def generate_table(data, index, color_mapping_severity, color_mapping_status):
|
||||
[
|
||||
html.P(
|
||||
html.Strong(
|
||||
"RecomendationUrl: ",
|
||||
"RecommendationUrl: ",
|
||||
style={
|
||||
"margin-right": "5px"
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user