mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-04-01 13:47:21 +00:00
Compare commits
488 Commits
PRWLR-4669
...
PRWLR-6455
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cf80c41ce8 | ||
|
|
d6dc91062c | ||
|
|
2d10e4023a | ||
|
|
5f88e5a628 | ||
|
|
e420ff31d1 | ||
|
|
be204fec1c | ||
|
|
89d4c521ba | ||
|
|
bfabc65324 | ||
|
|
6e22d0839a | ||
|
|
0db303b926 | ||
|
|
97616bc541 | ||
|
|
7a4f0399c5 | ||
|
|
bf47642d46 | ||
|
|
49cd10136d | ||
|
|
55a8310334 | ||
|
|
b69f79f0fa | ||
|
|
b39ec544cf | ||
|
|
dab3ae4872 | ||
|
|
391b10d79f | ||
|
|
4ab392a4c1 | ||
|
|
f0d4c2cfda | ||
|
|
1ab91b0cae | ||
|
|
3bf62f7b70 | ||
|
|
f2e19d377a | ||
|
|
2b7b887b87 | ||
|
|
44c70b5d01 | ||
|
|
7514484c42 | ||
|
|
9594c4c99f | ||
|
|
56445c9753 | ||
|
|
07419fd5e1 | ||
|
|
2e4dd12b41 | ||
|
|
fed2046c49 | ||
|
|
db79db4786 | ||
|
|
6f027e3c57 | ||
|
|
bdb877009f | ||
|
|
6564ec1ff5 | ||
|
|
443dc067b3 | ||
|
|
6221650c5f | ||
|
|
034d0fd1f4 | ||
|
|
e617ff0460 | ||
|
|
4b1ed607a7 | ||
|
|
137365a670 | ||
|
|
1891a1b24f | ||
|
|
e57e070866 | ||
|
|
66998cd1ad | ||
|
|
c0b1833446 | ||
|
|
329a72c77c | ||
|
|
2610ee9d0c | ||
|
|
a13ca9034e | ||
|
|
5d1abb3689 | ||
|
|
e1d1c6d154 | ||
|
|
e18e0e7cd4 | ||
|
|
eaf3d07a3f | ||
|
|
c88ae32b7f | ||
|
|
605613e220 | ||
|
|
d2772000ec | ||
|
|
42939a79f5 | ||
|
|
ed17931117 | ||
|
|
66df5f7a1c | ||
|
|
fc6e6696e5 | ||
|
|
465748c8a1 | ||
|
|
e59cd71bbf | ||
|
|
8a76fea310 | ||
|
|
0e46be54ec | ||
|
|
dc81813fdf | ||
|
|
eaa0df16bb | ||
|
|
c23e911028 | ||
|
|
06b96a1007 | ||
|
|
fa545c591f | ||
|
|
e828b780c7 | ||
|
|
eca8c5cabd | ||
|
|
b7bce6008f | ||
|
|
2fdf89883d | ||
|
|
6c5d4bbaaa | ||
|
|
cb2f926d4f | ||
|
|
12c01b437e | ||
|
|
3253a58942 | ||
|
|
199f7f14ea | ||
|
|
d42406d765 | ||
|
|
2276ffb1f6 | ||
|
|
218fb3afb0 | ||
|
|
a9fb890979 | ||
|
|
54ebf5b455 | ||
|
|
c9a0475aa8 | ||
|
|
5567d9f88c | ||
|
|
56f3e661ae | ||
|
|
1aa4479a10 | ||
|
|
7b625d0a91 | ||
|
|
fd0529529d | ||
|
|
af43191954 | ||
|
|
2ce2ca7c91 | ||
|
|
a0fc3db665 | ||
|
|
feb458027f | ||
|
|
e5a5b7af5c | ||
|
|
ad456ae2fe | ||
|
|
690cb51f6c | ||
|
|
14aaa2f376 | ||
|
|
6e47ca2c41 | ||
|
|
0d99d2be9b | ||
|
|
c322ef00e7 | ||
|
|
3513421225 | ||
|
|
b0e6bfbefe | ||
|
|
f7a918730e | ||
|
|
cef33319c5 | ||
|
|
2036a59210 | ||
|
|
e5eccb6227 | ||
|
|
48c2c8567c | ||
|
|
bbeef0299f | ||
|
|
bec5584d63 | ||
|
|
bdc759d34c | ||
|
|
8db442d8ba | ||
|
|
9e7a0d4175 | ||
|
|
9c33b3f5a9 | ||
|
|
7e7e2c87dc | ||
|
|
2f741f35a8 | ||
|
|
c411466df7 | ||
|
|
9679939307 | ||
|
|
8539423b22 | ||
|
|
81edafdf09 | ||
|
|
e0a262882a | ||
|
|
89237ab99e | ||
|
|
0f414e451e | ||
|
|
1180522725 | ||
|
|
81c7ebf123 | ||
|
|
258f05e6f4 | ||
|
|
53efb1c153 | ||
|
|
26014a9705 | ||
|
|
00ef037e45 | ||
|
|
669ec74e67 | ||
|
|
c4528200b0 | ||
|
|
ba7cd0250a | ||
|
|
c5e97678a1 | ||
|
|
337a46cdcc | ||
|
|
7f74b67f1f | ||
|
|
5dcc48d2e5 | ||
|
|
8b04aab07d | ||
|
|
eab4f6cf2e | ||
|
|
7f8d623283 | ||
|
|
dbffed8f1f | ||
|
|
7e3688fdd0 | ||
|
|
2e111e9ad3 | ||
|
|
6d6070ff3f | ||
|
|
391bbde353 | ||
|
|
3c56eb3762 | ||
|
|
7c14ea354b | ||
|
|
c96aad0b77 | ||
|
|
a9dd3e424b | ||
|
|
8a144a4046 | ||
|
|
75f86d7267 | ||
|
|
bbf875fc2f | ||
|
|
59d491f61b | ||
|
|
ed640a1324 | ||
|
|
e86fbcaef7 | ||
|
|
7f48212054 | ||
|
|
a2c5c71baf | ||
|
|
b904f81cb9 | ||
|
|
d64fe374dd | ||
|
|
fe25e7938e | ||
|
|
931df361bf | ||
|
|
d7c45f4aee | ||
|
|
5e5bef581b | ||
|
|
2d9e95d812 | ||
|
|
e5f979d106 | ||
|
|
c7a5815203 | ||
|
|
03e268722e | ||
|
|
78a2774329 | ||
|
|
c1b5ab7f53 | ||
|
|
b861d97ad4 | ||
|
|
f3abcc9dd6 | ||
|
|
cab13fe018 | ||
|
|
cc4b19c7ce | ||
|
|
a754d9aee5 | ||
|
|
22b54b2d8d | ||
|
|
d12ca6301a | ||
|
|
bc1b2ad9ab | ||
|
|
1782ab1514 | ||
|
|
0384fc50e3 | ||
|
|
cc46dee9ee | ||
|
|
ed5a0ae45a | ||
|
|
928ccfefb8 | ||
|
|
7f6bfb7b3e | ||
|
|
bcbc9bf675 | ||
|
|
0ec4366f4c | ||
|
|
ff72b7eea1 | ||
|
|
a32ca19251 | ||
|
|
b79508956a | ||
|
|
d76c5bd658 | ||
|
|
580e11126c | ||
|
|
736d40546a | ||
|
|
88810d2bb5 | ||
|
|
3a8f4d2ffb | ||
|
|
1fe125a65f | ||
|
|
0ff4df0836 | ||
|
|
16b4775e2d | ||
|
|
c3a13b8a29 | ||
|
|
d1053375b7 | ||
|
|
0fa4538256 | ||
|
|
738644f288 | ||
|
|
2f80b055ac | ||
|
|
fd62a1df10 | ||
|
|
a85d0ebd0a | ||
|
|
2c06902baa | ||
|
|
76ac6429fe | ||
|
|
43cae66b0d | ||
|
|
dacddecc7d | ||
|
|
dcb9267c2f | ||
|
|
ff35fd90fa | ||
|
|
7469377079 | ||
|
|
c8441f8d38 | ||
|
|
abf4eb0ffc | ||
|
|
93717cc830 | ||
|
|
b629bc81f8 | ||
|
|
f628897fe1 | ||
|
|
54b82a78e3 | ||
|
|
377faf145f | ||
|
|
69e316948f | ||
|
|
62cbff4f53 | ||
|
|
5582265e9d | ||
|
|
fb5ea3c324 | ||
|
|
9b5f676f50 | ||
|
|
88cfc0fa7e | ||
|
|
665bfa2f13 | ||
|
|
b89b1a64f4 | ||
|
|
9ba657c261 | ||
|
|
bce958b8e6 | ||
|
|
914012de2b | ||
|
|
8d1c476aed | ||
|
|
567c729e9e | ||
|
|
3f03dd20e4 | ||
|
|
1c778354da | ||
|
|
3a149fa459 | ||
|
|
f3b121950d | ||
|
|
43c13b7ba1 | ||
|
|
9447b33800 | ||
|
|
2934752eeb | ||
|
|
dd6d8c71fd | ||
|
|
80267c389b | ||
|
|
acfbaf75d5 | ||
|
|
5f54377407 | ||
|
|
552aa64741 | ||
|
|
d64f611f51 | ||
|
|
a96cc92d77 | ||
|
|
3858cccc41 | ||
|
|
072828512a | ||
|
|
a73ffe5642 | ||
|
|
8e784a5b6d | ||
|
|
1b6f9332f1 | ||
|
|
db8b472729 | ||
|
|
867b371522 | ||
|
|
c0d7c9fc7d | ||
|
|
bb4685cf90 | ||
|
|
6a95426749 | ||
|
|
ef6af8e84d | ||
|
|
763130f253 | ||
|
|
1256c040e9 | ||
|
|
18b7b48a99 | ||
|
|
627c11503f | ||
|
|
712ba84f06 | ||
|
|
5186e029b3 | ||
|
|
5bfaedf903 | ||
|
|
5061da6897 | ||
|
|
c159a28016 | ||
|
|
82a1b1c921 | ||
|
|
bf2210d0f4 | ||
|
|
8f0772cb94 | ||
|
|
5b57079ecd | ||
|
|
350d759517 | ||
|
|
edd793c9f5 | ||
|
|
545c2dc685 | ||
|
|
84955c066c | ||
|
|
06dd03b170 | ||
|
|
47bc2ed2dc | ||
|
|
44281afc54 | ||
|
|
4d2859d145 | ||
|
|
45d44a1669 | ||
|
|
ddd83b340e | ||
|
|
ccdb54d7c3 | ||
|
|
bcc246d950 | ||
|
|
62139e252a | ||
|
|
86950c3a0a | ||
|
|
f4865ef68d | ||
|
|
ea7209e7ae | ||
|
|
998c551cf3 | ||
|
|
e6f29b0116 | ||
|
|
eb90bb39dc | ||
|
|
ad189b35ad | ||
|
|
7d2989a233 | ||
|
|
862137ae7d | ||
|
|
c86e082d9a | ||
|
|
80fe048f97 | ||
|
|
f2bffb3ce7 | ||
|
|
cbe2f9eef8 | ||
|
|
688f41f570 | ||
|
|
a29197637e | ||
|
|
7a2712a37f | ||
|
|
189f5cfd8c | ||
|
|
e509480892 | ||
|
|
7f7955351a | ||
|
|
46f1db21a8 | ||
|
|
fbe7bc6951 | ||
|
|
f658507847 | ||
|
|
374078683b | ||
|
|
114c4e0886 | ||
|
|
67c62766d4 | ||
|
|
3f2947158d | ||
|
|
278a7cb356 | ||
|
|
890158a79c | ||
|
|
4dc1602b77 | ||
|
|
bbba0abac9 | ||
|
|
d04fd807c6 | ||
|
|
3456df4cf1 | ||
|
|
f56aaa791e | ||
|
|
465a758770 | ||
|
|
0f7c0c1b2c | ||
|
|
bf8d10b6f6 | ||
|
|
20d04553d6 | ||
|
|
b56d62e3c4 | ||
|
|
9a332dcba1 | ||
|
|
166d9f8823 | ||
|
|
42f5eed75f | ||
|
|
01a7db18dd | ||
|
|
d4507465a3 | ||
|
|
3ac92ed10a | ||
|
|
43c76ca85c | ||
|
|
54d87fa96a | ||
|
|
f041f17268 | ||
|
|
31c80a6967 | ||
|
|
783ce136f4 | ||
|
|
f829145781 | ||
|
|
389337f8cd | ||
|
|
a0713c2d66 | ||
|
|
f94d3cbce4 | ||
|
|
8d8994b468 | ||
|
|
784a9097a5 | ||
|
|
b9601626e3 | ||
|
|
dc80b011f2 | ||
|
|
ee7d32d460 | ||
|
|
43fd9ee94e | ||
|
|
8821a91f3f | ||
|
|
98d9256f92 | ||
|
|
b35495eaa7 | ||
|
|
74d6b614b3 | ||
|
|
dd63c16a74 | ||
|
|
4280266a96 | ||
|
|
b1f02098ff | ||
|
|
95189b574a | ||
|
|
c5d23503bf | ||
|
|
77950f6069 | ||
|
|
ec5f2b3753 | ||
|
|
9e7104fb7f | ||
|
|
6b3b6ca45e | ||
|
|
20b8b0b24e | ||
|
|
4e11540458 | ||
|
|
ee87f2676d | ||
|
|
74a90aab98 | ||
|
|
48ff9a5100 | ||
|
|
3dfd578ee5 | ||
|
|
0db46cdc81 | ||
|
|
fdac58d031 | ||
|
|
df9d4ce856 | ||
|
|
e6ae4e97e8 | ||
|
|
10a4c28922 | ||
|
|
8a828c6e51 | ||
|
|
d7b40905ff | ||
|
|
f9a3b5f3cd | ||
|
|
b73b89242f | ||
|
|
23a0f6e8de | ||
|
|
87967abc3f | ||
|
|
ce60c286dc | ||
|
|
90fd9b0eb8 | ||
|
|
ca262a6797 | ||
|
|
c056d39775 | ||
|
|
1c4426ea4b | ||
|
|
36520bd7a1 | ||
|
|
badf0ace76 | ||
|
|
f1f61249e0 | ||
|
|
b371cac18c | ||
|
|
1846535d8d | ||
|
|
d7d9118b9b | ||
|
|
a65ca72177 | ||
|
|
1108d90768 | ||
|
|
6715aa351f | ||
|
|
851497eb0a | ||
|
|
3bb4663e3e | ||
|
|
6953fcf6b5 | ||
|
|
ab844eee3f | ||
|
|
708e06aa3b | ||
|
|
aa8b8bbcae | ||
|
|
0ce1e15c2c | ||
|
|
105a83d946 | ||
|
|
e9a885a54d | ||
|
|
0a8759ee06 | ||
|
|
33ec21bbac | ||
|
|
7c00f65ecc | ||
|
|
7777c8f135 | ||
|
|
2386490002 | ||
|
|
b620f12027 | ||
|
|
00722181ad | ||
|
|
15e888a939 | ||
|
|
43fa600f1c | ||
|
|
2e4b5399c9 | ||
|
|
62cbb442e8 | ||
|
|
b0fe696935 | ||
|
|
42dbefbb31 | ||
|
|
f3dbe28681 | ||
|
|
6a5f1a7839 | ||
|
|
3b70f9fed4 | ||
|
|
7eb01aaa5c | ||
|
|
1e27e52fba | ||
|
|
16d73619e4 | ||
|
|
bc82696f15 | ||
|
|
fdb90623fc | ||
|
|
5fa62a9770 | ||
|
|
8f3df7e45d | ||
|
|
bb417587ae | ||
|
|
6b6e12cea3 | ||
|
|
65e70b2ca4 | ||
|
|
94d25f6f6a | ||
|
|
4bcf036831 | ||
|
|
901bc69a7d | ||
|
|
465217442b | ||
|
|
e6b40358aa | ||
|
|
9d48f7286a | ||
|
|
80311d3837 | ||
|
|
f501149068 | ||
|
|
750de62828 | ||
|
|
d2f338ceb6 | ||
|
|
e8d66979b3 | ||
|
|
b5180389f8 | ||
|
|
fbd5235e15 | ||
|
|
afd2267c26 | ||
|
|
9e798ababd | ||
|
|
e9f2fc8ee1 | ||
|
|
12198b4f06 | ||
|
|
15fae4d8f8 | ||
|
|
3de3fed858 | ||
|
|
1bf4255d93 | ||
|
|
b91a132e61 | ||
|
|
39302c9e93 | ||
|
|
65e21c4268 | ||
|
|
3d6a6a9fec | ||
|
|
d185902c86 | ||
|
|
8ce4ad83ed | ||
|
|
89620a96bc | ||
|
|
f1c008f934 | ||
|
|
4d688c9b47 | ||
|
|
db5481cc9c | ||
|
|
ce9a5e6484 | ||
|
|
550165b42b | ||
|
|
080551132a | ||
|
|
0a61848365 | ||
|
|
fcb9ca7795 | ||
|
|
71c58cee9e | ||
|
|
c811b6715d | ||
|
|
231829d8cd | ||
|
|
dbd2f8becb | ||
|
|
cc04e6614e | ||
|
|
a5c5ed614c | ||
|
|
ea13241317 | ||
|
|
a377a9ff6a | ||
|
|
f7e510b333 | ||
|
|
4472b80f1c | ||
|
|
577eb3eec9 | ||
|
|
1ed6a1a40f | ||
|
|
fe4cd1cddf | ||
|
|
6d7a8c8130 | ||
|
|
3057aeeacf | ||
|
|
bb5b63f62f | ||
|
|
58cd944618 | ||
|
|
5964b68c86 | ||
|
|
c87aaeba04 | ||
|
|
6e361005dc | ||
|
|
f5ab254bc5 | ||
|
|
298392b409 | ||
|
|
74a2bf0721 | ||
|
|
ddc5dc0316 | ||
|
|
d3af947553 | ||
|
|
36bb2509ac | ||
|
|
e4c2b0c2d3 | ||
|
|
ac5260ad43 | ||
|
|
33857109c9 | ||
|
|
8cc8f76204 | ||
|
|
8f3229928e | ||
|
|
2551992fd8 | ||
|
|
eb1decfce1 | ||
|
|
fd5e7b809f | ||
|
|
1ac681226d | ||
|
|
366940298d |
47
.env
47
.env
@@ -3,16 +3,17 @@
|
||||
# For production, it is recommended to use a secure method to store these variables and change the default secret keys.
|
||||
|
||||
#### Prowler UI Configuration ####
|
||||
PROWLER_UI_VERSION="latest"
|
||||
PROWLER_UI_VERSION="stable"
|
||||
SITE_URL=http://localhost:3000
|
||||
API_BASE_URL=http://prowler-api:8080/api/v1
|
||||
NEXT_PUBLIC_API_DOCS_URL=http://prowler-api:8080/api/v1/docs
|
||||
AUTH_TRUST_HOST=true
|
||||
UI_PORT=3000
|
||||
# openssl rand -base64 32
|
||||
AUTH_SECRET="N/c6mnaS5+SWq81+819OrzQZlmx1Vxtp/orjttJSmw8="
|
||||
|
||||
#### Prowler API Configuration ####
|
||||
PROWLER_API_VERSION="latest"
|
||||
PROWLER_API_VERSION="stable"
|
||||
# PostgreSQL settings
|
||||
# If running Django and celery on host, use 'localhost', else use 'postgres-db'
|
||||
POSTGRES_HOST=postgres-db
|
||||
@@ -29,6 +30,30 @@ VALKEY_HOST=valkey
|
||||
VALKEY_PORT=6379
|
||||
VALKEY_DB=0
|
||||
|
||||
# API scan settings
|
||||
|
||||
# The path to the directory where scan output should be stored
|
||||
DJANGO_TMP_OUTPUT_DIRECTORY = "/tmp/prowler_api_output"
|
||||
|
||||
# The maximum number of findings to process in a single batch
|
||||
DJANGO_FINDINGS_BATCH_SIZE = 1000
|
||||
|
||||
# The AWS access key to be used when uploading scan output to an S3 bucket
|
||||
# If left empty, default AWS credentials resolution behavior will be used
|
||||
DJANGO_OUTPUT_S3_AWS_ACCESS_KEY_ID=""
|
||||
|
||||
# The AWS secret key to be used when uploading scan output to an S3 bucket
|
||||
DJANGO_OUTPUT_S3_AWS_SECRET_ACCESS_KEY=""
|
||||
|
||||
# An optional AWS session token
|
||||
DJANGO_OUTPUT_S3_AWS_SESSION_TOKEN=""
|
||||
|
||||
# The AWS region where your S3 bucket is located (e.g., "us-east-1")
|
||||
DJANGO_OUTPUT_S3_AWS_DEFAULT_REGION=""
|
||||
|
||||
# The name of the S3 bucket where scan output should be stored
|
||||
DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET=""
|
||||
|
||||
# Django settings
|
||||
DJANGO_ALLOWED_HOSTS=localhost,127.0.0.1,prowler-api
|
||||
DJANGO_BIND_ADDRESS=0.0.0.0
|
||||
@@ -40,9 +65,12 @@ DJANGO_LOGGING_FORMATTER=human_readable
|
||||
# Select one of [DEBUG|INFO|WARNING|ERROR|CRITICAL]
|
||||
# Applies to both Django and Celery Workers
|
||||
DJANGO_LOGGING_LEVEL=INFO
|
||||
DJANGO_WORKERS=4 # Defaults to the maximum available based on CPU cores if not set.
|
||||
DJANGO_ACCESS_TOKEN_LIFETIME=30 # Token lifetime is in minutes
|
||||
DJANGO_REFRESH_TOKEN_LIFETIME=1440 # Token lifetime is in minutes
|
||||
# Defaults to the maximum available based on CPU cores if not set.
|
||||
DJANGO_WORKERS=4
|
||||
# Token lifetime is in minutes
|
||||
DJANGO_ACCESS_TOKEN_LIFETIME=30
|
||||
# Token lifetime is in minutes
|
||||
DJANGO_REFRESH_TOKEN_LIFETIME=1440
|
||||
DJANGO_CACHE_MAX_AGE=3600
|
||||
DJANGO_STALE_WHILE_REVALIDATE=60
|
||||
DJANGO_MANAGE_DB_PARTITIONS=True
|
||||
@@ -87,3 +115,12 @@ jQIDAQAB
|
||||
-----END PUBLIC KEY-----"
|
||||
# openssl rand -base64 32
|
||||
DJANGO_SECRETS_ENCRYPTION_KEY="oE/ltOhp/n1TdbHjVmzcjDPLcLA41CVI/4Rk+UB5ESc="
|
||||
DJANGO_BROKER_VISIBILITY_TIMEOUT=86400
|
||||
DJANGO_SENTRY_DSN=
|
||||
|
||||
# Sentry settings
|
||||
SENTRY_ENVIRONMENT=local
|
||||
SENTRY_RELEASE=local
|
||||
|
||||
#### Prowler release version ####
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.5.0
|
||||
|
||||
14
.github/dependabot.yml
vendored
14
.github/dependabot.yml
vendored
@@ -16,6 +16,17 @@ updates:
|
||||
- "dependencies"
|
||||
- "pip"
|
||||
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/api"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: master
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "pip"
|
||||
- "component/api"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
@@ -27,7 +38,7 @@ updates:
|
||||
- "github_actions"
|
||||
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/"
|
||||
directory: "/ui"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
open-pull-requests-limit: 10
|
||||
@@ -35,6 +46,7 @@ updates:
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "npm"
|
||||
- "component/ui"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
|
||||
10
.github/labeler.yml
vendored
10
.github/labeler.yml
vendored
@@ -92,3 +92,13 @@ component/api:
|
||||
component/ui:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: "ui/**"
|
||||
|
||||
compliance:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: "prowler/compliance/**"
|
||||
- any-glob-to-any-file: "prowler/lib/outputs/compliance/**"
|
||||
- any-glob-to-any-file: "tests/lib/outputs/compliance/**"
|
||||
|
||||
review-django-migrations:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: "api/src/backend/api/migrations/**"
|
||||
|
||||
8
.github/pull_request_template.md
vendored
8
.github/pull_request_template.md
vendored
@@ -15,7 +15,13 @@ Please include a summary of the change and which issue is fixed. List any depend
|
||||
- [ ] Review if the code is being covered by tests.
|
||||
- [ ] Review if code is being documented following this specification https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings
|
||||
- [ ] Review if backport is needed.
|
||||
- [ ] Review if is needed to change the [Readme.md](https://github.com/prowler-cloud/prowler/blob/master/README.md)
|
||||
|
||||
#### API
|
||||
- [ ] Verify if API specs need to be regenerated.
|
||||
- [ ] Check if version updates are required (e.g., specs, Poetry, etc.).
|
||||
- [ ] Ensure new entries are added to [CHANGELOG.md](https://github.com/prowler-cloud/prowler/blob/master/api/CHANGELOG.md), if applicable.
|
||||
|
||||
### License
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
@@ -23,6 +23,7 @@ env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
STABLE_TAG: stable
|
||||
|
||||
WORKING_DIRECTORY: ./api
|
||||
|
||||
@@ -31,22 +32,43 @@ env:
|
||||
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler-api
|
||||
|
||||
jobs:
|
||||
repository-check:
|
||||
name: Repository check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is_repo: ${{ steps.repository_check.outputs.is_repo }}
|
||||
steps:
|
||||
- name: Repository check
|
||||
id: repository_check
|
||||
working-directory: /tmp
|
||||
run: |
|
||||
if [[ ${{ github.repository }} == "prowler-cloud/prowler" ]]
|
||||
then
|
||||
echo "is_repo=true" >> "${GITHUB_OUTPUT}"
|
||||
else
|
||||
echo "This action only runs for prowler-cloud/prowler"
|
||||
echo "is_repo=false" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
# Build Prowler OSS container
|
||||
container-build-push:
|
||||
needs: repository-check
|
||||
if: needs.repository-check.outputs.is_repo == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKING_DIRECTORY }}
|
||||
|
||||
steps:
|
||||
- name: Repository check
|
||||
working-directory: /tmp
|
||||
run: |
|
||||
[[ ${{ github.repository }} != "prowler-cloud/prowler" ]] && echo "This action only runs for prowler-cloud/prowler"; exit 0
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set short git commit SHA
|
||||
id: vars
|
||||
run: |
|
||||
shortSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "SHORT_SHA=${shortSha}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
@@ -66,6 +88,7 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.SHORT_SHA }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
@@ -77,5 +100,15 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Trigger deployment
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.CLOUD_DISPATCH }}
|
||||
event-type: prowler-api-deploy
|
||||
client-payload: '{"sha": "${{ github.sha }}", "short_sha": "${{ env.SHORT_SHA }}"}'
|
||||
|
||||
4
.github/workflows/api-codeql.yml
vendored
4
.github/workflows/api-codeql.yml
vendored
@@ -15,16 +15,12 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- "master"
|
||||
- "v3"
|
||||
- "v4.*"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- "api/**"
|
||||
pull_request:
|
||||
branches:
|
||||
- "master"
|
||||
- "v3"
|
||||
- "v4.*"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- "api/**"
|
||||
|
||||
28
.github/workflows/api-pull-request.yml
vendored
28
.github/workflows/api-pull-request.yml
vendored
@@ -4,15 +4,17 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- "master"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- ".github/workflows/api-pull-request.yml"
|
||||
- "api/**"
|
||||
pull_request:
|
||||
branches:
|
||||
- "master"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- "api/**"
|
||||
|
||||
|
||||
env:
|
||||
POSTGRES_HOST: localhost
|
||||
POSTGRES_PORT: 5432
|
||||
@@ -24,7 +26,8 @@ env:
|
||||
VALKEY_HOST: localhost
|
||||
VALKEY_PORT: 6379
|
||||
VALKEY_DB: 0
|
||||
|
||||
API_WORKING_DIR: ./api
|
||||
IMAGE_NAME: prowler-api
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -87,7 +90,7 @@ jobs:
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pipx install poetry
|
||||
pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
@@ -100,7 +103,7 @@ jobs:
|
||||
working-directory: ./api
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry install
|
||||
poetry install --no-root
|
||||
poetry run pip list
|
||||
VERSION=$(curl --silent "https://api.github.com/repos/hadolint/hadolint/releases/latest" | \
|
||||
grep '"tag_name":' | \
|
||||
@@ -112,7 +115,7 @@ jobs:
|
||||
working-directory: ./api
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry lock --check
|
||||
poetry check --lock
|
||||
|
||||
- name: Lint with ruff
|
||||
working-directory: ./api
|
||||
@@ -169,3 +172,18 @@ jobs:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: api
|
||||
test-container-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build Container
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ${{ env.API_WORKING_DIR }}
|
||||
push: false
|
||||
tags: ${{ env.IMAGE_NAME }}:latest
|
||||
outputs: type=docker
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
23
.github/workflows/conventional-commit.yml
vendored
Normal file
23
.github/workflows/conventional-commit.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Prowler - Conventional Commit
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- "opened"
|
||||
- "edited"
|
||||
- "synchronize"
|
||||
branches:
|
||||
- "master"
|
||||
- "v3"
|
||||
- "v4.*"
|
||||
- "v5.*"
|
||||
|
||||
jobs:
|
||||
conventional-commit-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: conventional-commit-check
|
||||
id: conventional-commit-check
|
||||
uses: agenthunt/conventional-commit-checker-action@v2.0.0
|
||||
with:
|
||||
pr-title-regex: '^([^\s(]+)(?:\(([^)]+)\))?: (.+)'
|
||||
2
.github/workflows/find-secrets.yml
vendored
2
.github/workflows/find-secrets.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: TruffleHog OSS
|
||||
uses: trufflesecurity/trufflehog@v3.86.1
|
||||
uses: trufflesecurity/trufflehog@v3.88.16
|
||||
with:
|
||||
path: ./
|
||||
base: ${{ github.event.repository.default_branch }}
|
||||
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
pipx install poetry
|
||||
pipx install poetry==1.8.5
|
||||
pipx inject poetry poetry-bumpversion
|
||||
|
||||
- name: Get Prowler version
|
||||
|
||||
2
.github/workflows/sdk-codeql.yml
vendored
2
.github/workflows/sdk-codeql.yml
vendored
@@ -17,6 +17,7 @@ on:
|
||||
- "master"
|
||||
- "v3"
|
||||
- "v4.*"
|
||||
- "v5.*"
|
||||
paths-ignore:
|
||||
- 'ui/**'
|
||||
- 'api/**'
|
||||
@@ -25,6 +26,7 @@ on:
|
||||
- "master"
|
||||
- "v3"
|
||||
- "v4.*"
|
||||
- "v5.*"
|
||||
paths-ignore:
|
||||
- 'ui/**'
|
||||
- 'api/**'
|
||||
|
||||
10
.github/workflows/sdk-pull-request.yml
vendored
10
.github/workflows/sdk-pull-request.yml
vendored
@@ -37,12 +37,16 @@ jobs:
|
||||
README.md
|
||||
mkdocs.yml
|
||||
.backportrc.json
|
||||
.env
|
||||
docker-compose*
|
||||
examples/**
|
||||
.gitignore
|
||||
|
||||
- name: Install poetry
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pipx install poetry
|
||||
pipx install poetry==2.1.1
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
@@ -54,7 +58,7 @@ jobs:
|
||||
- name: Install dependencies
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry install
|
||||
poetry install --no-root
|
||||
poetry run pip list
|
||||
VERSION=$(curl --silent "https://api.github.com/repos/hadolint/hadolint/releases/latest" | \
|
||||
grep '"tag_name":' | \
|
||||
@@ -65,7 +69,7 @@ jobs:
|
||||
- name: Poetry check
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
run: |
|
||||
poetry lock --check
|
||||
poetry check --lock
|
||||
|
||||
- name: Lint with flake8
|
||||
if: steps.are-non-ignored-files-changed.outputs.any_changed == 'true'
|
||||
|
||||
30
.github/workflows/sdk-pypi-release.yml
vendored
30
.github/workflows/sdk-pypi-release.yml
vendored
@@ -10,12 +10,40 @@ env:
|
||||
CACHE: "poetry"
|
||||
|
||||
jobs:
|
||||
repository-check:
|
||||
name: Repository check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is_repo: ${{ steps.repository_check.outputs.is_repo }}
|
||||
steps:
|
||||
- name: Repository check
|
||||
id: repository_check
|
||||
working-directory: /tmp
|
||||
run: |
|
||||
if [[ ${{ github.repository }} == "prowler-cloud/prowler" ]]
|
||||
then
|
||||
echo "is_repo=true" >> "${GITHUB_OUTPUT}"
|
||||
else
|
||||
echo "This action only runs for prowler-cloud/prowler"
|
||||
echo "is_repo=false" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
release-prowler-job:
|
||||
runs-on: ubuntu-latest
|
||||
needs: repository-check
|
||||
if: needs.repository-check.outputs.is_repo == 'true'
|
||||
env:
|
||||
POETRY_VIRTUALENVS_CREATE: "false"
|
||||
name: Release Prowler to PyPI
|
||||
steps:
|
||||
- name: Repository check
|
||||
working-directory: /tmp
|
||||
run: |
|
||||
if [[ "${{ github.repository }}" != "prowler-cloud/prowler" ]]; then
|
||||
echo "This action only runs for prowler-cloud/prowler"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Get Prowler version
|
||||
run: |
|
||||
PROWLER_VERSION="${{ env.RELEASE_TAG }}"
|
||||
@@ -40,7 +68,7 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pipx install poetry
|
||||
pipx install poetry==1.8.5
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
|
||||
@@ -23,6 +23,7 @@ env:
|
||||
# Tags
|
||||
LATEST_TAG: latest
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
STABLE_TAG: stable
|
||||
|
||||
WORKING_DIRECTORY: ./ui
|
||||
|
||||
@@ -31,22 +32,43 @@ env:
|
||||
PROWLERCLOUD_DOCKERHUB_IMAGE: prowler-ui
|
||||
|
||||
jobs:
|
||||
repository-check:
|
||||
name: Repository check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is_repo: ${{ steps.repository_check.outputs.is_repo }}
|
||||
steps:
|
||||
- name: Repository check
|
||||
id: repository_check
|
||||
working-directory: /tmp
|
||||
run: |
|
||||
if [[ ${{ github.repository }} == "prowler-cloud/prowler" ]]
|
||||
then
|
||||
echo "is_repo=true" >> "${GITHUB_OUTPUT}"
|
||||
else
|
||||
echo "This action only runs for prowler-cloud/prowler"
|
||||
echo "is_repo=false" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
|
||||
# Build Prowler OSS container
|
||||
container-build-push:
|
||||
needs: repository-check
|
||||
if: needs.repository-check.outputs.is_repo == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ env.WORKING_DIRECTORY }}
|
||||
|
||||
steps:
|
||||
- name: Repository check
|
||||
working-directory: /tmp
|
||||
run: |
|
||||
[[ ${{ github.repository }} != "prowler-cloud/prowler" ]] && echo "This action only runs for prowler-cloud/prowler"; exit 0
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set short git commit SHA
|
||||
id: vars
|
||||
run: |
|
||||
shortSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "SHORT_SHA=${shortSha}" >> $GITHUB_ENV
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
@@ -62,10 +84,13 @@ jobs:
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=${{ env.SHORT_SHA }}
|
||||
# Set push: false for testing
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.LATEST_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.SHORT_SHA }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
@@ -74,8 +99,20 @@ jobs:
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ${{ env.WORKING_DIRECTORY }}
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v${{ env.RELEASE_TAG }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.RELEASE_TAG }}
|
||||
${{ env.PROWLERCLOUD_DOCKERHUB_REPOSITORY }}/${{ env.PROWLERCLOUD_DOCKERHUB_IMAGE }}:${{ env.STABLE_TAG }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Trigger deployment
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.PROWLER_BOT_ACCESS_TOKEN }}
|
||||
repository: ${{ secrets.CLOUD_DISPATCH }}
|
||||
event-type: prowler-ui-deploy
|
||||
client-payload: '{"sha": "${{ github.sha }}", "short_sha": "${{ env.SHORT_SHA }}"}'
|
||||
|
||||
2
.github/workflows/ui-codeql.yml
vendored
2
.github/workflows/ui-codeql.yml
vendored
@@ -15,14 +15,12 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- "master"
|
||||
- "v4.*"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- "ui/**"
|
||||
pull_request:
|
||||
branches:
|
||||
- "master"
|
||||
- "v4.*"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- "ui/**"
|
||||
|
||||
28
.github/workflows/ui-pull-request.yml
vendored
28
.github/workflows/ui-pull-request.yml
vendored
@@ -1,11 +1,22 @@
|
||||
name: UI - Pull Request
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "master"
|
||||
- "v5.*"
|
||||
paths:
|
||||
- ".github/workflows/ui-pull-request.yml"
|
||||
- "ui/**"
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- "v5.*"
|
||||
paths:
|
||||
- 'ui/**'
|
||||
env:
|
||||
UI_WORKING_DIR: ./ui
|
||||
IMAGE_NAME: prowler-ui
|
||||
|
||||
jobs:
|
||||
test-and-coverage:
|
||||
@@ -32,3 +43,20 @@ jobs:
|
||||
- name: Build the application
|
||||
working-directory: ./ui
|
||||
run: npm run build
|
||||
test-container-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build Container
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ${{ env.UI_WORKING_DIR }}
|
||||
# Always build using `prod` target
|
||||
target: prod
|
||||
push: false
|
||||
tags: ${{ env.IMAGE_NAME }}:latest
|
||||
outputs: type=docker
|
||||
build-args: |
|
||||
NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=pk_test_51LwpXXXX
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -31,7 +31,7 @@ tags
|
||||
*.DS_Store
|
||||
|
||||
# Prowler output
|
||||
output/
|
||||
/output
|
||||
|
||||
# Prowler found secrets
|
||||
secrets-*/
|
||||
@@ -45,6 +45,7 @@ junit-reports/
|
||||
# Terraform
|
||||
.terraform*
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
|
||||
# .env
|
||||
ui/.env*
|
||||
|
||||
@@ -27,6 +27,7 @@ repos:
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
exclude: contrib
|
||||
|
||||
## PYTHON
|
||||
- repo: https://github.com/myint/autoflake
|
||||
rev: v2.3.1
|
||||
@@ -58,11 +59,28 @@ repos:
|
||||
args: ["--ignore=E266,W503,E203,E501,W605"]
|
||||
|
||||
- repo: https://github.com/python-poetry/poetry
|
||||
rev: 1.8.0
|
||||
rev: 2.1.1
|
||||
hooks:
|
||||
- id: poetry-check
|
||||
name: API - poetry-check
|
||||
args: ["--directory=./api"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-lock
|
||||
args: ["--no-update"]
|
||||
name: API - poetry-lock
|
||||
args: ["--directory=./api"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-check
|
||||
name: SDK - poetry-check
|
||||
args: ["--directory=./"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-lock
|
||||
name: SDK - poetry-lock
|
||||
args: ["--directory=./"]
|
||||
pass_filenames: false
|
||||
|
||||
|
||||
- repo: https://github.com/hadolint/hadolint
|
||||
rev: v2.13.0-beta
|
||||
@@ -90,7 +108,7 @@ repos:
|
||||
- id: bandit
|
||||
name: bandit
|
||||
description: "Bandit is a tool for finding common security issues in Python code"
|
||||
entry: bash -c 'bandit -q -lll -x '*_test.py,./contrib/' -r .'
|
||||
entry: bash -c 'bandit -q -lll -x '*_test.py,./contrib/,./.venv/' -r .'
|
||||
language: system
|
||||
files: '.*\.py'
|
||||
|
||||
@@ -103,7 +121,6 @@ repos:
|
||||
- id: vulture
|
||||
name: vulture
|
||||
description: "Vulture finds unused code in Python programs."
|
||||
entry: bash -c 'vulture --exclude "contrib" --min-confidence 100 .'
|
||||
exclude: 'api/src/backend/'
|
||||
entry: bash -c 'vulture --exclude "contrib,.venv,api/src/backend/api/tests/,api/src/backend/conftest.py,api/src/backend/tasks/tests/" --min-confidence 100 .'
|
||||
language: system
|
||||
files: '.*\.py'
|
||||
|
||||
26
Dockerfile
26
Dockerfile
@@ -1,10 +1,10 @@
|
||||
FROM python:3.12.8-alpine3.20
|
||||
FROM python:3.12.9-alpine3.20
|
||||
|
||||
LABEL maintainer="https://github.com/prowler-cloud/prowler"
|
||||
|
||||
# Update system dependencies and install essential tools
|
||||
#hadolint ignore=DL3018
|
||||
RUN apk --no-cache upgrade && apk --no-cache add curl git
|
||||
RUN apk --no-cache upgrade && apk --no-cache add curl git gcc python3-dev musl-dev linux-headers
|
||||
|
||||
# Create non-root user
|
||||
RUN mkdir -p /home/prowler && \
|
||||
@@ -18,21 +18,25 @@ WORKDIR /home/prowler
|
||||
COPY prowler/ /home/prowler/prowler/
|
||||
COPY dashboard/ /home/prowler/dashboard/
|
||||
COPY pyproject.toml /home/prowler
|
||||
COPY README.md /home/prowler
|
||||
COPY README.md /home/prowler/
|
||||
|
||||
# Install Python dependencies
|
||||
ENV HOME='/home/prowler'
|
||||
ENV PATH="$HOME/.local/bin:$PATH"
|
||||
RUN pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
||||
pip install --no-cache-dir .
|
||||
ENV PATH="${HOME}/.local/bin:${PATH}"
|
||||
#hadolint ignore=DL3013
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
pip install --no-cache-dir poetry
|
||||
|
||||
# By default poetry does not compile Python source files to bytecode during installation.
|
||||
# This speeds up the installation process, but the first execution may take a little more
|
||||
# time because Python then compiles source files to bytecode automatically. If you want to
|
||||
# compile source files to bytecode during installation, you can use the --compile option
|
||||
RUN poetry install --compile && \
|
||||
rm -rf ~/.cache/pip
|
||||
|
||||
# Remove deprecated dash dependencies
|
||||
RUN pip uninstall dash-html-components -y && \
|
||||
pip uninstall dash-core-components -y
|
||||
|
||||
# Remove Prowler directory and build files
|
||||
USER 0
|
||||
RUN rm -rf /home/prowler/prowler /home/prowler/pyproject.toml /home/prowler/README.md /home/prowler/build /home/prowler/prowler.egg-info
|
||||
|
||||
USER prowler
|
||||
ENTRYPOINT ["prowler"]
|
||||
ENTRYPOINT ["poetry", "run", "prowler"]
|
||||
|
||||
37
README.md
37
README.md
@@ -71,10 +71,13 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
|
||||
|
||||
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) |
|
||||
|---|---|---|---|---|
|
||||
| AWS | 561 | 81 -> `prowler aws --list-services` | 30 -> `prowler aws --list-compliance` | 9 -> `prowler aws --list-categories` |
|
||||
| GCP | 77 | 13 -> `prowler gcp --list-services` | 3 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
|
||||
| Azure | 139 | 18 -> `prowler azure --list-services` | 4 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
|
||||
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | 1 -> `prowler kubernetes --list-compliance` | 7 -> `prowler kubernetes --list-categories` |
|
||||
| AWS | 564 | 82 | 33 | 10 |
|
||||
| GCP | 77 | 13 | 6 | 3 |
|
||||
| Azure | 140 | 18 | 7 | 3 |
|
||||
| Kubernetes | 83 | 7 | 4 | 7 |
|
||||
| Microsoft365 | 5 | 2 | 1 | 0 |
|
||||
|
||||
> You can list the checks, services, compliance frameworks and categories with `prowler <provider> --list-checks`, `prowler <provider> --list-services`, `prowler <provider> --list-compliance` and `prowler <provider> --list-categories`.
|
||||
|
||||
# 💻 Installation
|
||||
|
||||
@@ -98,6 +101,7 @@ curl -LO https://raw.githubusercontent.com/prowler-cloud/prowler/refs/heads/mast
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
> Containers are built for `linux/amd64`. If your workstation's architecture is different, please set `DOCKER_DEFAULT_PLATFORM=linux/amd64` in your environment or use the `--platform linux/amd64` flag in the docker command.
|
||||
> Enjoy Prowler App at http://localhost:3000 by signing up with your email and password.
|
||||
|
||||
### From GitHub
|
||||
@@ -105,7 +109,7 @@ docker compose up -d
|
||||
**Requirements**
|
||||
|
||||
* `git` installed.
|
||||
* `poetry` installed: [poetry installation](https://python-poetry.org/docs/#installation).
|
||||
* `poetry` v2 installed: [poetry installation](https://python-poetry.org/docs/#installation).
|
||||
* `npm` installed: [npm installation](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm).
|
||||
* `Docker Compose` installed: https://docs.docker.com/compose/install/.
|
||||
|
||||
@@ -115,7 +119,7 @@ docker compose up -d
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler/api
|
||||
poetry install
|
||||
poetry shell
|
||||
eval $(poetry env activate)
|
||||
set -a
|
||||
source .env
|
||||
docker compose up postgres valkey -d
|
||||
@@ -123,6 +127,11 @@ cd src/backend
|
||||
python manage.py migrate --database admin
|
||||
gunicorn -c config/guniconf.py config.wsgi:application
|
||||
```
|
||||
> [!IMPORTANT]
|
||||
> Starting from Poetry v2.0.0, `poetry shell` has been deprecated in favor of `poetry env activate`.
|
||||
>
|
||||
> If your poetry version is below 2.0.0 you must keep using `poetry shell` to activate your environment.
|
||||
> In case you have any doubts, consult the Poetry environment activation guide: https://python-poetry.org/docs/managing-environments/#activating-the-environment
|
||||
|
||||
> Now, you can access the API documentation at http://localhost:8080/api/v1/docs.
|
||||
|
||||
@@ -132,7 +141,7 @@ gunicorn -c config/guniconf.py config.wsgi:application
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler/api
|
||||
poetry install
|
||||
poetry shell
|
||||
eval $(poetry env activate)
|
||||
set -a
|
||||
source .env
|
||||
cd src/backend
|
||||
@@ -145,7 +154,7 @@ python -m celery -A config.celery worker -l info -E
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler/api
|
||||
poetry install
|
||||
poetry shell
|
||||
eval $(poetry env activate)
|
||||
set -a
|
||||
source .env
|
||||
cd src/backend
|
||||
@@ -166,7 +175,7 @@ npm start
|
||||
|
||||
## Prowler CLI
|
||||
### Pip package
|
||||
Prowler CLI is available as a project in [PyPI](https://pypi.org/project/prowler-cloud/), thus can be installed using pip with Python >= 3.9, < 3.13:
|
||||
Prowler CLI is available as a project in [PyPI](https://pypi.org/project/prowler-cloud/), thus can be installed using pip with Python > 3.9.1, < 3.13:
|
||||
|
||||
```console
|
||||
pip install prowler
|
||||
@@ -196,15 +205,21 @@ The container images are available here:
|
||||
|
||||
### From GitHub
|
||||
|
||||
Python >= 3.9, < 3.13 is required with pip and poetry:
|
||||
Python > 3.9.1, < 3.13 is required with pip and poetry:
|
||||
|
||||
``` console
|
||||
git clone https://github.com/prowler-cloud/prowler
|
||||
cd prowler
|
||||
poetry shell
|
||||
eval $(poetry env activate)
|
||||
poetry install
|
||||
python prowler.py -v
|
||||
```
|
||||
> [!IMPORTANT]
|
||||
> Starting from Poetry v2.0.0, `poetry shell` has been deprecated in favor of `poetry env activate`.
|
||||
>
|
||||
> If your poetry version is below 2.0.0 you must keep using `poetry shell` to activate your environment.
|
||||
> In case you have any doubts, consult the Poetry environment activation guide: https://python-poetry.org/docs/managing-environments/#activating-the-environment
|
||||
|
||||
> If you want to clone Prowler from Windows, use `git config core.longpaths true` to allow long file paths.
|
||||
# 📐✏️ High level architecture
|
||||
|
||||
|
||||
@@ -22,6 +22,8 @@ DJANGO_SECRETS_ENCRYPTION_KEY=""
|
||||
# Decide whether to allow Django manage database table partitions
|
||||
DJANGO_MANAGE_DB_PARTITIONS=[True|False]
|
||||
DJANGO_CELERY_DEADLOCK_ATTEMPTS=5
|
||||
DJANGO_BROKER_VISIBILITY_TIMEOUT=86400
|
||||
DJANGO_SENTRY_DSN=
|
||||
|
||||
# PostgreSQL settings
|
||||
# If running django and celery on host, use 'localhost', else use 'postgres-db'
|
||||
@@ -38,3 +40,16 @@ POSTGRES_DB=prowler_db
|
||||
VALKEY_HOST=[localhost|valkey]
|
||||
VALKEY_PORT=6379
|
||||
VALKEY_DB=0
|
||||
|
||||
# Sentry settings
|
||||
SENTRY_ENVIRONMENT=local
|
||||
SENTRY_RELEASE=local
|
||||
|
||||
# Social login credentials
|
||||
DJANGO_GOOGLE_OAUTH_CLIENT_ID=""
|
||||
DJANGO_GOOGLE_OAUTH_CLIENT_SECRET=""
|
||||
DJANGO_GOOGLE_OAUTH_CALLBACK_URL=""
|
||||
|
||||
DJANGO_GITHUB_OAUTH_CLIENT_ID=""
|
||||
DJANGO_GITHUB_OAUTH_CLIENT_SECRET=""
|
||||
DJANGO_GITHUB_OAUTH_CALLBACK_URL=""
|
||||
|
||||
43
api/CHANGELOG.md
Normal file
43
api/CHANGELOG.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Prowler API Changelog
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [v1.6.0] (Prowler UNRELEASED)
|
||||
|
||||
### Added
|
||||
|
||||
- Support for developing new integrations [(#7167)](https://github.com/prowler-cloud/prowler/pull/7167).
|
||||
|
||||
---
|
||||
|
||||
## [v1.5.1] (Prowler v5.4.1)
|
||||
|
||||
### Fixed
|
||||
- Added a handled response in case local files are missing [(#7183)](https://github.com/prowler-cloud/prowler/pull/7183).
|
||||
- Fixed a race condition when deleting export files after the S3 upload [(#7172)](https://github.com/prowler-cloud/prowler/pull/7172).
|
||||
|
||||
---
|
||||
|
||||
## [v1.5.0] (Prowler v5.4.0)
|
||||
|
||||
### Added
|
||||
- Social login integration with Google and GitHub [(#6906)](https://github.com/prowler-cloud/prowler/pull/6906)
|
||||
- Add API scan report system, now all scans launched from the API will generate a compressed file with the report in OCSF, CSV and HTML formats [(#6878)](https://github.com/prowler-cloud/prowler/pull/6878).
|
||||
- Configurable Sentry integration [(#6874)](https://github.com/prowler-cloud/prowler/pull/6874)
|
||||
|
||||
### Changed
|
||||
- Optimized `GET /findings` endpoint to improve response time and size [(#7019)](https://github.com/prowler-cloud/prowler/pull/7019).
|
||||
|
||||
---
|
||||
|
||||
## [v1.4.0] (Prowler v5.3.0)
|
||||
|
||||
### Changed
|
||||
- Daily scheduled scan instances are now created beforehand with `SCHEDULED` state [(#6700)](https://github.com/prowler-cloud/prowler/pull/6700).
|
||||
- Findings endpoints now require at least one date filter [(#6800)](https://github.com/prowler-cloud/prowler/pull/6800).
|
||||
- Findings metadata endpoint received a performance improvement [(#6863)](https://github.com/prowler-cloud/prowler/pull/6863).
|
||||
- Increased the allowed length of the provider UID for Kubernetes providers [(#6869)](https://github.com/prowler-cloud/prowler/pull/6869).
|
||||
|
||||
---
|
||||
@@ -21,7 +21,8 @@ COPY src/backend/ ./backend/
|
||||
|
||||
ENV PATH="/home/prowler/.local/bin:$PATH"
|
||||
|
||||
RUN poetry install && \
|
||||
# Add `--no-root` to avoid installing the current project as a package
|
||||
RUN poetry install --no-root && \
|
||||
rm -rf ~/.cache/pip
|
||||
|
||||
COPY docker-entrypoint.sh ./docker-entrypoint.sh
|
||||
|
||||
@@ -269,3 +269,66 @@ poetry shell
|
||||
cd src/backend
|
||||
pytest
|
||||
```
|
||||
|
||||
# Custom commands
|
||||
|
||||
Django provides a way to create custom commands that can be run from the command line.
|
||||
|
||||
> These commands can be found in: ```prowler/api/src/backend/api/management/commands```
|
||||
|
||||
To run a custom command, you need to be in the `prowler/api/src/backend` directory and run:
|
||||
|
||||
```console
|
||||
poetry shell
|
||||
python manage.py <command_name>
|
||||
```
|
||||
|
||||
## Generate dummy data
|
||||
|
||||
```console
|
||||
python manage.py findings --tenant
|
||||
<TENANT_ID> --findings <NUM_FINDINGS> --re
|
||||
sources <NUM_RESOURCES> --batch <TRANSACTION_BATCH_SIZE> --alias <ALIAS>
|
||||
```
|
||||
|
||||
This command creates, for a given tenant, a provider, scan and a set of findings and resources related altogether.
|
||||
|
||||
> Scan progress and state are updated in real time.
|
||||
> - 0-33%: Create resources.
|
||||
> - 33-66%: Create findings.
|
||||
> - 66%: Create resource-finding mapping.
|
||||
>
|
||||
> The last step is required to access the findings details, since the UI needs that to print all the information.
|
||||
|
||||
### Example
|
||||
|
||||
```console
|
||||
~/backend $ poetry run python manage.py findings --tenant
|
||||
fffb1893-3fc7-4623-a5d9-fae47da1c528 --findings 25000 --re
|
||||
sources 1000 --batch 5000 --alias test-script
|
||||
|
||||
Starting data population
|
||||
Tenant: fffb1893-3fc7-4623-a5d9-fae47da1c528
|
||||
Alias: test-script
|
||||
Resources: 1000
|
||||
Findings: 25000
|
||||
Batch size: 5000
|
||||
|
||||
|
||||
Creating resources...
|
||||
100%|███████████████████████| 1/1 [00:00<00:00, 7.72it/s]
|
||||
Resources created successfully.
|
||||
|
||||
|
||||
Creating findings...
|
||||
100%|███████████████████████| 5/5 [00:05<00:00, 1.09s/it]
|
||||
Findings created successfully.
|
||||
|
||||
|
||||
Creating resource-finding mappings...
|
||||
100%|███████████████████████| 5/5 [00:02<00:00, 1.81it/s]
|
||||
Resource-finding mappings created successfully.
|
||||
|
||||
|
||||
Successfully populated test data.
|
||||
```
|
||||
|
||||
@@ -28,7 +28,7 @@ start_prod_server() {
|
||||
|
||||
start_worker() {
|
||||
echo "Starting the worker..."
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans -E
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans,scan-reports,deletion -E --max-tasks-per-child 1
|
||||
}
|
||||
|
||||
start_worker_beat() {
|
||||
|
||||
2235
api/poetry.lock
generated
2235
api/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -2,41 +2,47 @@
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
requires = ["poetry-core"]
|
||||
|
||||
[tool.poetry]
|
||||
authors = ["Prowler Team"]
|
||||
[project]
|
||||
authors = [{name = "Prowler Engineering", email = "engineering@prowler.com"}]
|
||||
dependencies = [
|
||||
"celery[pytest] (>=5.4.0,<6.0.0)",
|
||||
"dj-rest-auth[with_social,jwt] (==7.0.1)",
|
||||
"django==5.1.7",
|
||||
"django-celery-beat (>=2.7.0,<3.0.0)",
|
||||
"django-celery-results (>=2.5.1,<3.0.0)",
|
||||
"django-cors-headers==4.4.0",
|
||||
"django-environ==0.11.2",
|
||||
"django-filter==24.3",
|
||||
"django-guid==3.5.0",
|
||||
"django-postgres-extra (>=2.0.8,<3.0.0)",
|
||||
"djangorestframework==3.15.2",
|
||||
"djangorestframework-jsonapi==7.0.2",
|
||||
"djangorestframework-simplejwt (>=5.3.1,<6.0.0)",
|
||||
"drf-nested-routers (>=0.94.1,<1.0.0)",
|
||||
"drf-spectacular==0.27.2",
|
||||
"drf-spectacular-jsonapi==0.5.1",
|
||||
"gunicorn==23.0.0",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@master",
|
||||
"psycopg2-binary==2.9.9",
|
||||
"pytest-celery[redis] (>=1.0.1,<2.0.0)",
|
||||
"sentry-sdk[django] (>=2.20.0,<3.0.0)",
|
||||
"uuid6==2024.7.10"
|
||||
]
|
||||
description = "Prowler's API (Django/DRF)"
|
||||
license = "Apache-2.0"
|
||||
name = "prowler-api"
|
||||
package-mode = false
|
||||
version = "1.1.0"
|
||||
# Needed for the SDK compatibility
|
||||
requires-python = ">=3.11,<3.13"
|
||||
version = "1.6.0"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
celery = {extras = ["pytest"], version = "^5.4.0"}
|
||||
django = "5.1.1"
|
||||
django-celery-beat = "^2.7.0"
|
||||
django-celery-results = "^2.5.1"
|
||||
django-cors-headers = "4.4.0"
|
||||
django-environ = "0.11.2"
|
||||
django-filter = "24.3"
|
||||
django-guid = "3.5.0"
|
||||
django-postgres-extra = "^2.0.8"
|
||||
djangorestframework = "3.15.2"
|
||||
djangorestframework-jsonapi = "7.0.2"
|
||||
djangorestframework-simplejwt = "^5.3.1"
|
||||
drf-nested-routers = "^0.94.1"
|
||||
drf-spectacular = "0.27.2"
|
||||
drf-spectacular-jsonapi = "0.5.1"
|
||||
gunicorn = "23.0.0"
|
||||
prowler = {git = "https://github.com/prowler-cloud/prowler.git", tag = "5.0.0"}
|
||||
psycopg2-binary = "2.9.9"
|
||||
pytest-celery = {extras = ["redis"], version = "^1.0.1"}
|
||||
# Needed for prowler compatibility
|
||||
python = ">=3.11,<3.13"
|
||||
uuid6 = "2024.7.10"
|
||||
[project.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
bandit = "1.7.9"
|
||||
coverage = "7.5.4"
|
||||
django-silk = "5.3.2"
|
||||
docker = "7.1.0"
|
||||
freezegun = "1.5.1"
|
||||
mypy = "1.10.1"
|
||||
@@ -48,8 +54,6 @@ pytest-env = "1.1.3"
|
||||
pytest-randomly = "3.15.0"
|
||||
pytest-xdist = "3.6.1"
|
||||
ruff = "0.5.0"
|
||||
safety = "3.2.3"
|
||||
vulture = "2.11"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
safety = "3.2.9"
|
||||
tqdm = "4.67.1"
|
||||
vulture = "2.14"
|
||||
|
||||
61
api/src/backend/api/adapters.py
Normal file
61
api/src/backend/api/adapters.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
|
||||
from django.db import transaction
|
||||
|
||||
from api.db_router import MainRouter
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Membership, Role, Tenant, User, UserRoleRelationship
|
||||
|
||||
|
||||
class ProwlerSocialAccountAdapter(DefaultSocialAccountAdapter):
|
||||
@staticmethod
|
||||
def get_user_by_email(email: str):
|
||||
try:
|
||||
return User.objects.get(email=email)
|
||||
except User.DoesNotExist:
|
||||
return None
|
||||
|
||||
def pre_social_login(self, request, sociallogin):
|
||||
# Link existing accounts with the same email address
|
||||
email = sociallogin.account.extra_data.get("email")
|
||||
if email:
|
||||
existing_user = self.get_user_by_email(email)
|
||||
if existing_user:
|
||||
sociallogin.connect(request, existing_user)
|
||||
|
||||
def save_user(self, request, sociallogin, form=None):
|
||||
"""
|
||||
Called after the user data is fully populated from the provider
|
||||
and is about to be saved to the DB for the first time.
|
||||
"""
|
||||
with transaction.atomic(using=MainRouter.admin_db):
|
||||
user = super().save_user(request, sociallogin, form)
|
||||
user.save(using=MainRouter.admin_db)
|
||||
social_account_name = sociallogin.account.extra_data.get("name")
|
||||
if social_account_name:
|
||||
user.name = social_account_name
|
||||
user.save(using=MainRouter.admin_db)
|
||||
|
||||
tenant = Tenant.objects.using(MainRouter.admin_db).create(
|
||||
name=f"{user.email.split('@')[0]} default tenant"
|
||||
)
|
||||
with rls_transaction(str(tenant.id)):
|
||||
Membership.objects.using(MainRouter.admin_db).create(
|
||||
user=user, tenant=tenant, role=Membership.RoleChoices.OWNER
|
||||
)
|
||||
role = Role.objects.using(MainRouter.admin_db).create(
|
||||
name="admin",
|
||||
tenant_id=tenant.id,
|
||||
manage_users=True,
|
||||
manage_account=True,
|
||||
manage_billing=True,
|
||||
manage_providers=True,
|
||||
manage_integrations=True,
|
||||
manage_scans=True,
|
||||
unlimited_visibility=True,
|
||||
)
|
||||
UserRoleRelationship.objects.using(MainRouter.admin_db).create(
|
||||
user=user,
|
||||
role=role,
|
||||
tenant_id=tenant.id,
|
||||
)
|
||||
return user
|
||||
@@ -1,18 +1,29 @@
|
||||
ALLOWED_APPS = ("django", "socialaccount", "account", "authtoken", "silk")
|
||||
|
||||
|
||||
class MainRouter:
|
||||
default_db = "default"
|
||||
admin_db = "admin"
|
||||
|
||||
def db_for_read(self, model, **hints): # noqa: F841
|
||||
model_table_name = model._meta.db_table
|
||||
if model_table_name.startswith("django_"):
|
||||
if model_table_name.startswith("django_") or any(
|
||||
model_table_name.startswith(f"{app}_") for app in ALLOWED_APPS
|
||||
):
|
||||
return self.admin_db
|
||||
return None
|
||||
|
||||
def db_for_write(self, model, **hints): # noqa: F841
|
||||
model_table_name = model._meta.db_table
|
||||
if model_table_name.startswith("django_"):
|
||||
if any(model_table_name.startswith(f"{app}_") for app in ALLOWED_APPS):
|
||||
return self.admin_db
|
||||
return None
|
||||
|
||||
def allow_migrate(self, db, app_label, model_name=None, **hints): # noqa: F841
|
||||
return db == self.admin_db
|
||||
|
||||
def allow_relation(self, obj1, obj2, **hints): # noqa: F841
|
||||
# Allow relations if both objects are in either "default" or "admin" db connectors
|
||||
if {obj1._state.db, obj2._state.db} <= {self.default_db, self.admin_db}:
|
||||
return True
|
||||
return None
|
||||
|
||||
@@ -5,7 +5,6 @@ from datetime import datetime, timedelta, timezone
|
||||
|
||||
from django.conf import settings
|
||||
from django.contrib.auth.models import BaseUserManager
|
||||
from django.core.paginator import Paginator
|
||||
from django.db import connection, models, transaction
|
||||
from psycopg2 import connect as psycopg2_connect
|
||||
from psycopg2.extensions import AsIs, new_type, register_adapter, register_type
|
||||
@@ -120,15 +119,18 @@ def batch_delete(queryset, batch_size=5000):
|
||||
total_deleted = 0
|
||||
deletion_summary = {}
|
||||
|
||||
paginator = Paginator(queryset.order_by("id").only("id"), batch_size)
|
||||
|
||||
for page_num in paginator.page_range:
|
||||
batch_ids = [obj.id for obj in paginator.page(page_num).object_list]
|
||||
while True:
|
||||
# Get a batch of IDs to delete
|
||||
batch_ids = set(
|
||||
queryset.values_list("id", flat=True).order_by("id")[:batch_size]
|
||||
)
|
||||
if not batch_ids:
|
||||
# No more objects to delete
|
||||
break
|
||||
|
||||
deleted_count, deleted_info = queryset.filter(id__in=batch_ids).delete()
|
||||
|
||||
total_deleted += deleted_count
|
||||
|
||||
for model_label, count in deleted_info.items():
|
||||
deletion_summary[model_label] = deletion_summary.get(model_label, 0) + count
|
||||
|
||||
@@ -316,3 +318,15 @@ class InvitationStateEnum(EnumType):
|
||||
class InvitationStateEnumField(PostgresEnumField):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__("invitation_state", *args, **kwargs)
|
||||
|
||||
|
||||
# Postgres enum definition for Integration type
|
||||
|
||||
|
||||
class IntegrationTypeEnum(EnumType):
|
||||
enum_type_name = "integration_type"
|
||||
|
||||
|
||||
class IntegrationTypeEnumField(PostgresEnumField):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__("integration_type", *args, **kwargs)
|
||||
|
||||
@@ -7,7 +7,7 @@ from rest_framework_json_api.serializers import ValidationError
|
||||
from api.db_utils import POSTGRES_TENANT_VAR, SET_CONFIG_QUERY
|
||||
|
||||
|
||||
def set_tenant(func):
|
||||
def set_tenant(func=None, *, keep_tenant=False):
|
||||
"""
|
||||
Decorator to set the tenant context for a Celery task based on the provided tenant_id.
|
||||
|
||||
@@ -40,20 +40,29 @@ def set_tenant(func):
|
||||
# The tenant context will be set before the task logic executes.
|
||||
"""
|
||||
|
||||
@wraps(func)
|
||||
@transaction.atomic
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
tenant_id = kwargs.pop("tenant_id")
|
||||
except KeyError:
|
||||
raise KeyError("This task requires the tenant_id")
|
||||
try:
|
||||
uuid.UUID(tenant_id)
|
||||
except ValueError:
|
||||
raise ValidationError("Tenant ID must be a valid UUID")
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(SET_CONFIG_QUERY, [POSTGRES_TENANT_VAR, tenant_id])
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
@transaction.atomic
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
if not keep_tenant:
|
||||
tenant_id = kwargs.pop("tenant_id")
|
||||
else:
|
||||
tenant_id = kwargs["tenant_id"]
|
||||
except KeyError:
|
||||
raise KeyError("This task requires the tenant_id")
|
||||
try:
|
||||
uuid.UUID(tenant_id)
|
||||
except ValueError:
|
||||
raise ValidationError("Tenant ID must be a valid UUID")
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(SET_CONFIG_QUERY, [POSTGRES_TENANT_VAR, tenant_id])
|
||||
|
||||
return func(*args, **kwargs)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
return wrapper
|
||||
|
||||
if func is None:
|
||||
return decorator
|
||||
else:
|
||||
return decorator(func)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from datetime import date, datetime, timezone
|
||||
from datetime import date, datetime, timedelta, timezone
|
||||
|
||||
from django.conf import settings
|
||||
from django.db.models import Q
|
||||
@@ -24,6 +24,7 @@ from api.db_utils import (
|
||||
from api.models import (
|
||||
ComplianceOverview,
|
||||
Finding,
|
||||
Integration,
|
||||
Invitation,
|
||||
Membership,
|
||||
PermissionChoices,
|
||||
@@ -319,13 +320,41 @@ class FindingFilter(FilterSet):
|
||||
field_name="resources__type", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# resource_tag_key = CharFilter(field_name="resources__tags__key")
|
||||
# resource_tag_key__in = CharInFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_key__icontains = CharFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tag_value = CharFilter(field_name="resources__tags__value")
|
||||
# resource_tag_value__in = CharInFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_value__icontains = CharFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tags = CharInFilter(
|
||||
# method="filter_resource_tag",
|
||||
# lookup_expr="in",
|
||||
# help_text="Filter by resource tags `key:value` pairs.\nMultiple values may be "
|
||||
# "separated by commas.",
|
||||
# )
|
||||
|
||||
scan = UUIDFilter(method="filter_scan_id")
|
||||
scan__in = UUIDInFilter(method="filter_scan_id_in")
|
||||
|
||||
inserted_at = DateFilter(method="filter_inserted_at", lookup_expr="date")
|
||||
inserted_at__date = DateFilter(method="filter_inserted_at", lookup_expr="date")
|
||||
inserted_at__gte = DateFilter(method="filter_inserted_at_gte")
|
||||
inserted_at__lte = DateFilter(method="filter_inserted_at_lte")
|
||||
inserted_at__gte = DateFilter(
|
||||
method="filter_inserted_at_gte",
|
||||
help_text=f"Maximum date range is {settings.FINDINGS_MAX_DAYS_IN_RANGE} days.",
|
||||
)
|
||||
inserted_at__lte = DateFilter(
|
||||
method="filter_inserted_at_lte",
|
||||
help_text=f"Maximum date range is {settings.FINDINGS_MAX_DAYS_IN_RANGE} days.",
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Finding
|
||||
@@ -353,6 +382,52 @@ class FindingFilter(FilterSet):
|
||||
},
|
||||
}
|
||||
|
||||
def filter_queryset(self, queryset):
|
||||
if not (self.data.get("scan") or self.data.get("scan__in")) and not (
|
||||
self.data.get("inserted_at")
|
||||
or self.data.get("inserted_at__date")
|
||||
or self.data.get("inserted_at__gte")
|
||||
or self.data.get("inserted_at__lte")
|
||||
):
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
"detail": "At least one date filter is required: filter[inserted_at], filter[inserted_at.gte], "
|
||||
"or filter[inserted_at.lte].",
|
||||
"status": 400,
|
||||
"source": {"pointer": "/data/attributes/inserted_at"},
|
||||
"code": "required",
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
gte_date = (
|
||||
datetime.strptime(self.data.get("inserted_at__gte"), "%Y-%m-%d").date()
|
||||
if self.data.get("inserted_at__gte")
|
||||
else datetime.now(timezone.utc).date()
|
||||
)
|
||||
lte_date = (
|
||||
datetime.strptime(self.data.get("inserted_at__lte"), "%Y-%m-%d").date()
|
||||
if self.data.get("inserted_at__lte")
|
||||
else datetime.now(timezone.utc).date()
|
||||
)
|
||||
|
||||
if abs(lte_date - gte_date) > timedelta(
|
||||
days=settings.FINDINGS_MAX_DAYS_IN_RANGE
|
||||
):
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
"detail": f"The date range cannot exceed {settings.FINDINGS_MAX_DAYS_IN_RANGE} days.",
|
||||
"status": 400,
|
||||
"source": {"pointer": "/data/attributes/inserted_at"},
|
||||
"code": "invalid",
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
return super().filter_queryset(queryset)
|
||||
|
||||
# Convert filter values to UUIDv7 values for use with partitioning
|
||||
def filter_scan_id(self, queryset, name, value):
|
||||
try:
|
||||
@@ -373,9 +448,7 @@ class FindingFilter(FilterSet):
|
||||
)
|
||||
|
||||
return (
|
||||
queryset.filter(id__gte=start)
|
||||
.filter(id__lt=end)
|
||||
.filter(scan__id=value_uuid)
|
||||
queryset.filter(id__gte=start).filter(id__lt=end).filter(scan_id=value_uuid)
|
||||
)
|
||||
|
||||
def filter_scan_id_in(self, queryset, name, value):
|
||||
@@ -400,31 +473,42 @@ class FindingFilter(FilterSet):
|
||||
]
|
||||
)
|
||||
if start == end:
|
||||
return queryset.filter(id__gte=start).filter(scan__id__in=uuid_list)
|
||||
return queryset.filter(id__gte=start).filter(scan_id__in=uuid_list)
|
||||
else:
|
||||
return (
|
||||
queryset.filter(id__gte=start)
|
||||
.filter(id__lt=end)
|
||||
.filter(scan__id__in=uuid_list)
|
||||
.filter(scan_id__in=uuid_list)
|
||||
)
|
||||
|
||||
def filter_inserted_at(self, queryset, name, value):
|
||||
value = self.maybe_date_to_datetime(value)
|
||||
start = uuid7_start(datetime_to_uuid7(value))
|
||||
datetime_value = self.maybe_date_to_datetime(value)
|
||||
start = uuid7_start(datetime_to_uuid7(datetime_value))
|
||||
end = uuid7_start(datetime_to_uuid7(datetime_value + timedelta(days=1)))
|
||||
|
||||
return queryset.filter(id__gte=start).filter(inserted_at__date=value)
|
||||
return queryset.filter(id__gte=start, id__lt=end)
|
||||
|
||||
def filter_inserted_at_gte(self, queryset, name, value):
|
||||
value = self.maybe_date_to_datetime(value)
|
||||
start = uuid7_start(datetime_to_uuid7(value))
|
||||
datetime_value = self.maybe_date_to_datetime(value)
|
||||
start = uuid7_start(datetime_to_uuid7(datetime_value))
|
||||
|
||||
return queryset.filter(id__gte=start).filter(inserted_at__gte=value)
|
||||
return queryset.filter(id__gte=start)
|
||||
|
||||
def filter_inserted_at_lte(self, queryset, name, value):
|
||||
value = self.maybe_date_to_datetime(value)
|
||||
end = uuid7_start(datetime_to_uuid7(value))
|
||||
datetime_value = self.maybe_date_to_datetime(value)
|
||||
end = uuid7_start(datetime_to_uuid7(datetime_value + timedelta(days=1)))
|
||||
|
||||
return queryset.filter(id__lte=end).filter(inserted_at__lte=value)
|
||||
return queryset.filter(id__lt=end)
|
||||
|
||||
def filter_resource_tag(self, queryset, name, value):
|
||||
overall_query = Q()
|
||||
for key_value_pair in value:
|
||||
tag_key, tag_value = key_value_pair.split(":", 1)
|
||||
overall_query |= Q(
|
||||
resources__tags__key__icontains=tag_key,
|
||||
resources__tags__value__icontains=tag_value,
|
||||
)
|
||||
return queryset.filter(overall_query).distinct()
|
||||
|
||||
@staticmethod
|
||||
def maybe_date_to_datetime(value):
|
||||
@@ -565,3 +649,19 @@ class ServiceOverviewFilter(ScanSummaryFilter):
|
||||
}
|
||||
)
|
||||
return super().is_valid()
|
||||
|
||||
|
||||
class IntegrationFilter(FilterSet):
|
||||
inserted_at = DateFilter(field_name="inserted_at", lookup_expr="date")
|
||||
integration_type = ChoiceFilter(choices=Integration.IntegrationChoices.choices)
|
||||
integration_type__in = ChoiceInFilter(
|
||||
choices=Integration.IntegrationChoices.choices,
|
||||
field_name="integration_type",
|
||||
lookup_expr="in",
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Integration
|
||||
fields = {
|
||||
"inserted_at": ["date", "gte", "lte"],
|
||||
}
|
||||
|
||||
@@ -122,6 +122,22 @@
|
||||
"scanner_args": {}
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.provider",
|
||||
"pk": "7791914f-d646-4fe2-b2ed-73f2c6499a36",
|
||||
"fields": {
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:45:26.352Z",
|
||||
"updated_at": "2024-10-18T11:16:23.533Z",
|
||||
"provider": "kubernetes",
|
||||
"uid": "gke_lucky-coast-419309_us-central1_autopilot-cluster-2",
|
||||
"alias": "k8s_testing_2",
|
||||
"connected": true,
|
||||
"connection_last_checked_at": "2024-10-18T11:16:23.503Z",
|
||||
"metadata": {},
|
||||
"scanner_args": {}
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.providersecret",
|
||||
"pk": "11491b47-75ae-4f71-ad8d-3e630a72182e",
|
||||
|
||||
@@ -11,9 +11,7 @@
|
||||
"unique_resource_count": 1,
|
||||
"duration": 5,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled"
|
||||
]
|
||||
"checks_to_execute": ["accessanalyzer_enabled"]
|
||||
},
|
||||
"inserted_at": "2024-09-01T17:25:27.050Z",
|
||||
"started_at": "2024-09-01T17:25:27.050Z",
|
||||
@@ -33,9 +31,7 @@
|
||||
"unique_resource_count": 1,
|
||||
"duration": 20,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled"
|
||||
]
|
||||
"checks_to_execute": ["accessanalyzer_enabled"]
|
||||
},
|
||||
"inserted_at": "2024-09-02T17:24:27.050Z",
|
||||
"started_at": "2024-09-02T17:24:27.050Z",
|
||||
@@ -55,9 +51,7 @@
|
||||
"unique_resource_count": 10,
|
||||
"duration": 10,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"cloudsql_instance_automated_backups"
|
||||
]
|
||||
"checks_to_execute": ["cloudsql_instance_automated_backups"]
|
||||
},
|
||||
"inserted_at": "2024-09-02T19:26:27.050Z",
|
||||
"started_at": "2024-09-02T19:26:27.050Z",
|
||||
@@ -77,9 +71,7 @@
|
||||
"unique_resource_count": 1,
|
||||
"duration": 35,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled"
|
||||
]
|
||||
"checks_to_execute": ["accessanalyzer_enabled"]
|
||||
},
|
||||
"inserted_at": "2024-09-02T19:27:27.050Z",
|
||||
"started_at": "2024-09-02T19:27:27.050Z",
|
||||
@@ -97,9 +89,7 @@
|
||||
"name": "test scheduled aws scan",
|
||||
"state": "available",
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"cloudformation_stack_outputs_find_secrets"
|
||||
]
|
||||
"checks_to_execute": ["cloudformation_stack_outputs_find_secrets"]
|
||||
},
|
||||
"scheduled_at": "2030-09-02T19:20:27.050Z",
|
||||
"inserted_at": "2024-09-02T19:24:27.050Z",
|
||||
@@ -178,9 +168,7 @@
|
||||
"unique_resource_count": 19,
|
||||
"progress": 100,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled"
|
||||
]
|
||||
"checks_to_execute": ["accessanalyzer_enabled"]
|
||||
},
|
||||
"duration": 7,
|
||||
"scheduled_at": null,
|
||||
@@ -190,6 +178,56 @@
|
||||
"completed_at": "2024-10-18T10:46:05.127Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.scan",
|
||||
"pk": "6dd8925f-a52d-48de-a546-d2d90db30ab1",
|
||||
"fields": {
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"name": "real scan azure",
|
||||
"provider": "1b59e032-3eb6-4694-93a5-df84cd9b3ce2",
|
||||
"trigger": "manual",
|
||||
"state": "completed",
|
||||
"unique_resource_count": 20,
|
||||
"progress": 100,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled",
|
||||
"account_security_contact_information_is_registered"
|
||||
]
|
||||
},
|
||||
"duration": 4,
|
||||
"scheduled_at": null,
|
||||
"inserted_at": "2024-10-18T11:16:21.358Z",
|
||||
"updated_at": "2024-10-18T11:16:26.060Z",
|
||||
"started_at": "2024-10-18T11:16:21.593Z",
|
||||
"completed_at": "2024-10-18T11:16:26.060Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.scan",
|
||||
"pk": "4ca7ce89-3236-41a8-a369-8937bc152af5",
|
||||
"fields": {
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"name": "real scan k8s",
|
||||
"provider": "7791914f-d646-4fe2-b2ed-73f2c6499a36",
|
||||
"trigger": "manual",
|
||||
"state": "completed",
|
||||
"unique_resource_count": 20,
|
||||
"progress": 100,
|
||||
"scanner_args": {
|
||||
"checks_to_execute": [
|
||||
"accessanalyzer_enabled",
|
||||
"account_security_contact_information_is_registered"
|
||||
]
|
||||
},
|
||||
"duration": 4,
|
||||
"scheduled_at": null,
|
||||
"inserted_at": "2024-10-18T11:16:21.358Z",
|
||||
"updated_at": "2024-10-18T11:16:26.060Z",
|
||||
"started_at": "2024-10-18T11:16:21.593Z",
|
||||
"completed_at": "2024-10-18T11:16:26.060Z"
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "api.scan",
|
||||
"pk": "01929f57-c0ee-7553-be0b-cbde006fb6f7",
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.823Z",
|
||||
"updated_at": "2024-10-18T10:46:04.841Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.823Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -61,6 +62,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.855Z",
|
||||
"updated_at": "2024-10-18T10:46:04.858Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.855Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -116,6 +118,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.869Z",
|
||||
"updated_at": "2024-10-18T10:46:04.876Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.869Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -171,6 +174,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.888Z",
|
||||
"updated_at": "2024-10-18T10:46:04.892Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.888Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -226,6 +230,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.901Z",
|
||||
"updated_at": "2024-10-18T10:46:04.905Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.901Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -281,6 +286,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.915Z",
|
||||
"updated_at": "2024-10-18T10:46:04.919Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.915Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -336,6 +342,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.929Z",
|
||||
"updated_at": "2024-10-18T10:46:04.934Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.929Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -391,6 +398,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.944Z",
|
||||
"updated_at": "2024-10-18T10:46:04.947Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.944Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -446,6 +454,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.957Z",
|
||||
"updated_at": "2024-10-18T10:46:04.962Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.957Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
|
||||
"delta": "new",
|
||||
"status": "PASS",
|
||||
@@ -501,6 +510,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.971Z",
|
||||
"updated_at": "2024-10-18T10:46:04.975Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.971Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -556,6 +566,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.984Z",
|
||||
"updated_at": "2024-10-18T10:46:04.989Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.984Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -611,6 +622,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.999Z",
|
||||
"updated_at": "2024-10-18T10:46:05.003Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.999Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -666,6 +678,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.013Z",
|
||||
"updated_at": "2024-10-18T10:46:05.018Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.013Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -721,6 +734,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.029Z",
|
||||
"updated_at": "2024-10-18T10:46:05.033Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.029Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -776,6 +790,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.045Z",
|
||||
"updated_at": "2024-10-18T10:46:05.050Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.045Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -831,6 +846,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.061Z",
|
||||
"updated_at": "2024-10-18T10:46:05.065Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.061Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -886,6 +902,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.080Z",
|
||||
"updated_at": "2024-10-18T10:46:05.085Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.080Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -941,6 +958,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.099Z",
|
||||
"updated_at": "2024-10-18T10:46:05.104Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.099Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -996,6 +1014,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.115Z",
|
||||
"updated_at": "2024-10-18T10:46:05.121Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.115Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -1051,6 +1070,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.489Z",
|
||||
"updated_at": "2024-10-18T11:16:24.506Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.823Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1106,6 +1126,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.518Z",
|
||||
"updated_at": "2024-10-18T11:16:24.521Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.855Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1161,6 +1182,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.526Z",
|
||||
"updated_at": "2024-10-18T11:16:24.529Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.869Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1216,6 +1238,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.535Z",
|
||||
"updated_at": "2024-10-18T11:16:24.538Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.888Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1271,6 +1294,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.544Z",
|
||||
"updated_at": "2024-10-18T11:16:24.546Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.901Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1326,6 +1350,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.551Z",
|
||||
"updated_at": "2024-10-18T11:16:24.554Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.915Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1381,6 +1406,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.560Z",
|
||||
"updated_at": "2024-10-18T11:16:24.562Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.929Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1436,6 +1462,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.567Z",
|
||||
"updated_at": "2024-10-18T11:16:24.569Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.944Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1491,6 +1518,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.573Z",
|
||||
"updated_at": "2024-10-18T11:16:24.575Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.957Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
|
||||
"delta": null,
|
||||
"status": "PASS",
|
||||
@@ -1546,6 +1574,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.580Z",
|
||||
"updated_at": "2024-10-18T11:16:24.582Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.971Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1601,6 +1630,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.587Z",
|
||||
"updated_at": "2024-10-18T11:16:24.589Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.984Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1656,6 +1686,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.595Z",
|
||||
"updated_at": "2024-10-18T11:16:24.597Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.999Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1711,6 +1742,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.602Z",
|
||||
"updated_at": "2024-10-18T11:16:24.604Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.013Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1766,6 +1798,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.610Z",
|
||||
"updated_at": "2024-10-18T11:16:24.612Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.029Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1821,6 +1854,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.617Z",
|
||||
"updated_at": "2024-10-18T11:16:24.620Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.045Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1876,6 +1910,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.625Z",
|
||||
"updated_at": "2024-10-18T11:16:24.627Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.061Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1931,6 +1966,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.632Z",
|
||||
"updated_at": "2024-10-18T11:16:24.634Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.080Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1986,6 +2022,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.639Z",
|
||||
"updated_at": "2024-10-18T11:16:24.642Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.099Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -2041,6 +2078,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.646Z",
|
||||
"updated_at": "2024-10-18T11:16:24.648Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.115Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -2096,6 +2134,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:26.033Z",
|
||||
"updated_at": "2024-10-18T11:16:26.045Z",
|
||||
"first_seen_at": "2024-10-18T11:16:26.033Z",
|
||||
"uid": "prowler-aws-account_security_contact_information_is_registered-112233445566-us-east-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "MANUAL",
|
||||
|
||||
237
api/src/backend/api/management/commands/findings.py
Normal file
237
api/src/backend/api/management/commands/findings.py
Normal file
@@ -0,0 +1,237 @@
|
||||
import random
|
||||
from datetime import datetime, timezone
|
||||
from math import ceil
|
||||
from uuid import uuid4
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from tqdm import tqdm
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import (
|
||||
Finding,
|
||||
Provider,
|
||||
Resource,
|
||||
ResourceFindingMapping,
|
||||
Scan,
|
||||
StatusChoices,
|
||||
)
|
||||
from prowler.lib.check.models import CheckMetadata
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "Populates the database with test data for performance testing."
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
"--tenant",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Tenant id for which the data will be populated.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--resources",
|
||||
type=int,
|
||||
required=True,
|
||||
help="The number of resources to create.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--findings",
|
||||
type=int,
|
||||
required=True,
|
||||
help="The number of findings to create.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--batch", type=int, required=True, help="The batch size for bulk creation."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--alias",
|
||||
type=str,
|
||||
required=False,
|
||||
help="Optional alias for the provider and scan",
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
tenant_id = options["tenant"]
|
||||
num_resources = options["resources"]
|
||||
num_findings = options["findings"]
|
||||
batch_size = options["batch"]
|
||||
alias = options["alias"] or "Testing"
|
||||
uid_token = str(uuid4())
|
||||
|
||||
self.stdout.write(self.style.NOTICE("Starting data population"))
|
||||
self.stdout.write(self.style.NOTICE(f"\tTenant: {tenant_id}"))
|
||||
self.stdout.write(self.style.NOTICE(f"\tAlias: {alias}"))
|
||||
self.stdout.write(self.style.NOTICE(f"\tResources: {num_resources}"))
|
||||
self.stdout.write(self.style.NOTICE(f"\tFindings: {num_findings}"))
|
||||
self.stdout.write(self.style.NOTICE(f"\tBatch size: {batch_size}\n\n"))
|
||||
|
||||
# Resource metadata
|
||||
possible_regions = [
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2",
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
"sa-east-1",
|
||||
]
|
||||
possible_services = []
|
||||
possible_types = []
|
||||
|
||||
bulk_check_metadata = CheckMetadata.get_bulk(provider="aws")
|
||||
for check_metadata in bulk_check_metadata.values():
|
||||
if check_metadata.ServiceName not in possible_services:
|
||||
possible_services.append(check_metadata.ServiceName)
|
||||
if (
|
||||
check_metadata.ResourceType
|
||||
and check_metadata.ResourceType not in possible_types
|
||||
):
|
||||
possible_types.append(check_metadata.ResourceType)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
provider, _ = Provider.all_objects.get_or_create(
|
||||
tenant_id=tenant_id,
|
||||
provider="aws",
|
||||
connected=True,
|
||||
uid=str(random.randint(100000000000, 999999999999)),
|
||||
defaults={
|
||||
"alias": alias,
|
||||
},
|
||||
)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
scan = Scan.all_objects.create(
|
||||
tenant_id=tenant_id,
|
||||
provider=provider,
|
||||
name=alias,
|
||||
trigger="manual",
|
||||
state="executing",
|
||||
progress=0,
|
||||
started_at=datetime.now(timezone.utc),
|
||||
)
|
||||
scan_state = "completed"
|
||||
|
||||
try:
|
||||
# Create resources
|
||||
resources = []
|
||||
|
||||
for i in range(num_resources):
|
||||
resources.append(
|
||||
Resource(
|
||||
tenant_id=tenant_id,
|
||||
provider_id=provider.id,
|
||||
uid=f"testing-{uid_token}-{i}",
|
||||
name=f"Testing {uid_token}-{i}",
|
||||
region=random.choice(possible_regions),
|
||||
service=random.choice(possible_services),
|
||||
type=random.choice(possible_types),
|
||||
)
|
||||
)
|
||||
|
||||
num_batches = ceil(len(resources) / batch_size)
|
||||
self.stdout.write(self.style.WARNING("Creating resources..."))
|
||||
for i in tqdm(range(0, len(resources), batch_size), total=num_batches):
|
||||
with rls_transaction(tenant_id):
|
||||
Resource.all_objects.bulk_create(resources[i : i + batch_size])
|
||||
self.stdout.write(self.style.SUCCESS("Resources created successfully.\n\n"))
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
scan.progress = 33
|
||||
scan.save()
|
||||
|
||||
# Create Findings
|
||||
findings = []
|
||||
possible_deltas = ["new", "changed", None]
|
||||
possible_severities = ["critical", "high", "medium", "low"]
|
||||
findings_resources_mapping = []
|
||||
|
||||
for i in range(num_findings):
|
||||
severity = random.choice(possible_severities)
|
||||
check_id = random.randint(1, 1000)
|
||||
assigned_resource_num = random.randint(0, len(resources) - 1)
|
||||
assigned_resource = resources[assigned_resource_num]
|
||||
findings_resources_mapping.append(assigned_resource_num)
|
||||
|
||||
findings.append(
|
||||
Finding(
|
||||
tenant_id=tenant_id,
|
||||
scan=scan,
|
||||
uid=f"testing-{uid_token}-{i}",
|
||||
delta=random.choice(possible_deltas),
|
||||
check_id=f"check-{check_id}",
|
||||
status=random.choice(list(StatusChoices)),
|
||||
severity=severity,
|
||||
impact=severity,
|
||||
raw_result={},
|
||||
check_metadata={
|
||||
"checktitle": f"Test title for check {check_id}",
|
||||
"risk": f"Testing risk {uid_token}-{i}",
|
||||
"provider": "aws",
|
||||
"severity": severity,
|
||||
"categories": ["category1", "category2", "category3"],
|
||||
"description": "This is a random description that should not matter for testing purposes.",
|
||||
"servicename": assigned_resource.service,
|
||||
"resourcetype": assigned_resource.type,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
num_batches = ceil(len(findings) / batch_size)
|
||||
self.stdout.write(self.style.WARNING("Creating findings..."))
|
||||
for i in tqdm(range(0, len(findings), batch_size), total=num_batches):
|
||||
with rls_transaction(tenant_id):
|
||||
Finding.all_objects.bulk_create(findings[i : i + batch_size])
|
||||
self.stdout.write(self.style.SUCCESS("Findings created successfully.\n\n"))
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
scan.progress = 66
|
||||
scan.save()
|
||||
|
||||
# Create ResourceFindingMapping
|
||||
mappings = []
|
||||
for index, f in enumerate(findings):
|
||||
mappings.append(
|
||||
ResourceFindingMapping(
|
||||
tenant_id=tenant_id,
|
||||
resource=resources[findings_resources_mapping[index]],
|
||||
finding=f,
|
||||
)
|
||||
)
|
||||
|
||||
num_batches = ceil(len(mappings) / batch_size)
|
||||
self.stdout.write(
|
||||
self.style.WARNING("Creating resource-finding mappings...")
|
||||
)
|
||||
for i in tqdm(range(0, len(mappings), batch_size), total=num_batches):
|
||||
with rls_transaction(tenant_id):
|
||||
ResourceFindingMapping.objects.bulk_create(
|
||||
mappings[i : i + batch_size]
|
||||
)
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(
|
||||
"Resource-finding mappings created successfully.\n\n"
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
self.stdout.write(self.style.ERROR(f"Failed to populate test data: {e}"))
|
||||
scan_state = "failed"
|
||||
finally:
|
||||
scan.completed_at = datetime.now(timezone.utc)
|
||||
scan.duration = int(
|
||||
(datetime.now(timezone.utc) - scan.started_at).total_seconds()
|
||||
)
|
||||
scan.progress = 100
|
||||
scan.state = scan_state
|
||||
scan.unique_resource_count = num_resources
|
||||
with rls_transaction(tenant_id):
|
||||
scan.save()
|
||||
|
||||
self.stdout.write(self.style.NOTICE("Successfully populated test data."))
|
||||
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 5.1.1 on 2024-12-20 13:16
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0002_token_migrations"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RemoveConstraint(
|
||||
model_name="provider",
|
||||
name="unique_provider_uids",
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="provider",
|
||||
constraint=models.UniqueConstraint(
|
||||
fields=("tenant_id", "provider", "uid", "is_deleted"),
|
||||
name="unique_provider_uids",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -1,15 +1,17 @@
|
||||
# Generated by Django 5.1.1 on 2024-12-05 12:29
|
||||
|
||||
import api.rls
|
||||
import django.db.models.deletion
|
||||
import uuid
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
|
||||
import api.rls
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0002_token_migrations"),
|
||||
("api", "0003_update_provider_unique_constraint_with_is_deleted"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
@@ -1,4 +1,5 @@
|
||||
from django.db import migrations
|
||||
|
||||
from api.db_router import MainRouter
|
||||
|
||||
|
||||
@@ -35,7 +36,7 @@ def create_admin_role(apps, schema_editor):
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0003_rbac"),
|
||||
("api", "0004_rbac"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
15
api/src/backend/api/migrations/0006_findings_first_seen.py
Normal file
15
api/src/backend/api/migrations/0006_findings_first_seen.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0005_rbac_missing_admin_roles"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="finding",
|
||||
name="first_seen_at",
|
||||
field=models.DateTimeField(editable=False, null=True),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,25 @@
|
||||
# Generated by Django 5.1.5 on 2025-01-28 15:03
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0006_findings_first_seen"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddIndex(
|
||||
model_name="scan",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "provider_id", "state", "inserted_at"],
|
||||
name="scans_prov_state_insert_idx",
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="scansummary",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id"], name="scan_summaries_tenant_scan_idx"
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,64 @@
|
||||
import json
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Scan, StateChoices
|
||||
|
||||
|
||||
def migrate_daily_scheduled_scan_tasks(apps, schema_editor):
|
||||
for daily_scheduled_scan_task in PeriodicTask.objects.filter(
|
||||
task="scan-perform-scheduled"
|
||||
):
|
||||
task_kwargs = json.loads(daily_scheduled_scan_task.kwargs)
|
||||
tenant_id = task_kwargs["tenant_id"]
|
||||
provider_id = task_kwargs["provider_id"]
|
||||
|
||||
current_time = datetime.now(timezone.utc)
|
||||
scheduled_time_today = datetime.combine(
|
||||
current_time.date(),
|
||||
daily_scheduled_scan_task.start_time.time(),
|
||||
tzinfo=timezone.utc,
|
||||
)
|
||||
|
||||
if current_time < scheduled_time_today:
|
||||
next_scan_date = scheduled_time_today
|
||||
else:
|
||||
next_scan_date = scheduled_time_today + timedelta(days=1)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
state=StateChoices.SCHEDULED,
|
||||
scheduled_at=next_scan_date,
|
||||
scheduler_task_id=daily_scheduled_scan_task.id,
|
||||
)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0007_scan_and_scan_summaries_indexes"),
|
||||
("django_celery_beat", "0019_alter_periodictasks_options"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="scan",
|
||||
name="scheduler_task",
|
||||
field=models.ForeignKey(
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
to="django_celery_beat.periodictask",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(migrate_daily_scheduled_scan_tasks),
|
||||
]
|
||||
@@ -0,0 +1,22 @@
|
||||
# Generated by Django 5.1.5 on 2025-02-07 09:42
|
||||
|
||||
import django.core.validators
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0008_daily_scheduled_tasks_update"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name="provider",
|
||||
name="uid",
|
||||
field=models.CharField(
|
||||
max_length=250,
|
||||
validators=[django.core.validators.MinLengthValidator(3)],
|
||||
verbose_name="Unique identifier for the provider, set by the provider",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,109 @@
|
||||
from functools import partial
|
||||
|
||||
from django.db import connection, migrations
|
||||
|
||||
|
||||
def create_index_on_partitions(
|
||||
apps, schema_editor, parent_table: str, index_name: str, index_details: str
|
||||
):
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT inhrelid::regclass::text
|
||||
FROM pg_inherits
|
||||
WHERE inhparent = %s::regclass;
|
||||
""",
|
||||
[parent_table],
|
||||
)
|
||||
partitions = [row[0] for row in cursor.fetchall()]
|
||||
# Iterate over partitions and create index concurrently.
|
||||
# Note: PostgreSQL does not allow CONCURRENTLY inside a transaction,
|
||||
# so we need atomic = False for this migration.
|
||||
for partition in partitions:
|
||||
sql = (
|
||||
f"CREATE INDEX CONCURRENTLY IF NOT EXISTS {partition.replace('.', '_')}_{index_name} ON {partition} "
|
||||
f"{index_details};"
|
||||
)
|
||||
schema_editor.execute(sql)
|
||||
|
||||
|
||||
def drop_index_on_partitions(apps, schema_editor, parent_table: str, index_name: str):
|
||||
with schema_editor.connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT inhrelid::regclass::text
|
||||
FROM pg_inherits
|
||||
WHERE inhparent = %s::regclass;
|
||||
""",
|
||||
[parent_table],
|
||||
)
|
||||
partitions = [row[0] for row in cursor.fetchall()]
|
||||
|
||||
# Iterate over partitions and drop index concurrently.
|
||||
for partition in partitions:
|
||||
partition_index = f"{partition.replace('.', '_')}_{index_name}"
|
||||
sql = f"DROP INDEX CONCURRENTLY IF EXISTS {partition_index};"
|
||||
schema_editor.execute(sql)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0009_increase_provider_uid_maximum_length"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="findings_tenant_and_id_idx",
|
||||
index_details="(tenant_id, id)",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="findings_tenant_and_id_idx",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_tenant_scan_idx",
|
||||
index_details="(tenant_id, scan_id)",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_tenant_scan_idx",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_tenant_scan_id_idx",
|
||||
index_details="(tenant_id, scan_id, id)",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_tenant_scan_id_idx",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(
|
||||
partial(
|
||||
create_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_delta_new_idx",
|
||||
index_details="(tenant_id, id) where delta = 'new'",
|
||||
),
|
||||
reverse_code=partial(
|
||||
drop_index_on_partitions,
|
||||
parent_table="findings",
|
||||
index_name="find_delta_new_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,49 @@
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0010_findings_performance_indexes_partitions"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "id"], name="findings_tenant_and_id_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id"], name="find_tenant_scan_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id", "id"], name="find_tenant_scan_id_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="finding",
|
||||
index=models.Index(
|
||||
condition=models.Q(("delta", "new")),
|
||||
fields=["tenant_id", "id"],
|
||||
name="find_delta_new_idx",
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="resourcetagmapping",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "resource_id"], name="resource_tag_tenant_idx"
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="resource",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "service", "region", "type"],
|
||||
name="resource_tenant_metadata_idx",
|
||||
),
|
||||
),
|
||||
]
|
||||
15
api/src/backend/api/migrations/0012_scan_report_output.py
Normal file
15
api/src/backend/api/migrations/0012_scan_report_output.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0011_findings_performance_indexes_parent"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="scan",
|
||||
name="output_location",
|
||||
field=models.CharField(blank=True, max_length=200, null=True),
|
||||
),
|
||||
]
|
||||
35
api/src/backend/api/migrations/0013_integrations_enum.py
Normal file
35
api/src/backend/api/migrations/0013_integrations_enum.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# Generated by Django 5.1.5 on 2025-03-03 15:46
|
||||
|
||||
from functools import partial
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
from api.db_utils import IntegrationTypeEnum, PostgresEnumMigration, register_enum
|
||||
from api.models import Integration
|
||||
|
||||
IntegrationTypeEnumMigration = PostgresEnumMigration(
|
||||
enum_name="integration_type",
|
||||
enum_values=tuple(
|
||||
integration_type[0]
|
||||
for integration_type in Integration.IntegrationChoices.choices
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0012_scan_report_output"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(
|
||||
IntegrationTypeEnumMigration.create_enum_type,
|
||||
reverse_code=IntegrationTypeEnumMigration.drop_enum_type,
|
||||
),
|
||||
migrations.RunPython(
|
||||
partial(register_enum, enum_class=IntegrationTypeEnum),
|
||||
reverse_code=migrations.RunPython.noop,
|
||||
),
|
||||
]
|
||||
131
api/src/backend/api/migrations/0014_integrations.py
Normal file
131
api/src/backend/api/migrations/0014_integrations.py
Normal file
@@ -0,0 +1,131 @@
|
||||
# Generated by Django 5.1.5 on 2025-03-03 15:46
|
||||
|
||||
import uuid
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
||||
import api.db_utils
|
||||
import api.rls
|
||||
from api.rls import RowLevelSecurityConstraint
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0013_integrations_enum"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name="Integration",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.UUIDField(
|
||||
default=uuid.uuid4,
|
||||
editable=False,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
),
|
||||
),
|
||||
("inserted_at", models.DateTimeField(auto_now_add=True)),
|
||||
("updated_at", models.DateTimeField(auto_now=True)),
|
||||
("enabled", models.BooleanField(default=False)),
|
||||
("connected", models.BooleanField(blank=True, null=True)),
|
||||
(
|
||||
"connection_last_checked_at",
|
||||
models.DateTimeField(blank=True, null=True),
|
||||
),
|
||||
(
|
||||
"integration_type",
|
||||
api.db_utils.IntegrationTypeEnumField(
|
||||
choices=[
|
||||
("amazon_s3", "Amazon S3"),
|
||||
("saml", "SAML"),
|
||||
("aws_security_hub", "AWS Security Hub"),
|
||||
("jira", "JIRA"),
|
||||
("slack", "Slack"),
|
||||
]
|
||||
),
|
||||
),
|
||||
("configuration", models.JSONField(default=dict)),
|
||||
("_credentials", models.BinaryField(db_column="credentials")),
|
||||
(
|
||||
"tenant",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
|
||||
),
|
||||
),
|
||||
],
|
||||
options={"db_table": "integrations", "abstract": False},
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="integration",
|
||||
constraint=RowLevelSecurityConstraint(
|
||||
"tenant_id",
|
||||
name="rls_on_integration",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name="IntegrationProviderRelationship",
|
||||
fields=[
|
||||
(
|
||||
"id",
|
||||
models.UUIDField(
|
||||
default=uuid.uuid4,
|
||||
editable=False,
|
||||
primary_key=True,
|
||||
serialize=False,
|
||||
),
|
||||
),
|
||||
("inserted_at", models.DateTimeField(auto_now_add=True)),
|
||||
(
|
||||
"integration",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
to="api.integration",
|
||||
),
|
||||
),
|
||||
(
|
||||
"provider",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE, to="api.provider"
|
||||
),
|
||||
),
|
||||
(
|
||||
"tenant",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE, to="api.tenant"
|
||||
),
|
||||
),
|
||||
],
|
||||
options={
|
||||
"db_table": "integration_provider_mappings",
|
||||
"constraints": [
|
||||
models.UniqueConstraint(
|
||||
fields=("integration_id", "provider_id"),
|
||||
name="unique_integration_provider_rel",
|
||||
),
|
||||
],
|
||||
},
|
||||
),
|
||||
migrations.AddConstraint(
|
||||
model_name="IntegrationProviderRelationship",
|
||||
constraint=RowLevelSecurityConstraint(
|
||||
"tenant_id",
|
||||
name="rls_on_integrationproviderrelationship",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name="integration",
|
||||
name="providers",
|
||||
field=models.ManyToManyField(
|
||||
blank=True,
|
||||
related_name="integrations",
|
||||
through="api.IntegrationProviderRelationship",
|
||||
to="api.provider",
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -11,6 +11,7 @@ from django.core.validators import MinLengthValidator
|
||||
from django.db import models
|
||||
from django.db.models import Q
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from django_celery_results.models import TaskResult
|
||||
from psqlextra.manager import PostgresManager
|
||||
from psqlextra.models import PostgresPartitionedModel
|
||||
@@ -20,6 +21,7 @@ from uuid6 import uuid7
|
||||
from api.db_utils import (
|
||||
CustomUserManager,
|
||||
FindingDeltaEnumField,
|
||||
IntegrationTypeEnumField,
|
||||
InvitationStateEnumField,
|
||||
MemberRoleEnumField,
|
||||
ProviderEnumField,
|
||||
@@ -226,13 +228,13 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
@staticmethod
|
||||
def validate_kubernetes_uid(value):
|
||||
if not re.match(
|
||||
r"(^[a-z0-9]([-a-z0-9]{1,61}[a-z0-9])?$)|(^arn:aws(-cn|-us-gov|-iso|-iso-b)?:[a-zA-Z0-9\-]+:([a-z]{2}-[a-z]+-\d{1})?:(\d{12})?:[a-zA-Z0-9\-_\/:\.\*]+(:\d+)?$)",
|
||||
r"^[a-z0-9][A-Za-z0-9_.:\/-]{1,250}$",
|
||||
value,
|
||||
):
|
||||
raise ModelValidationError(
|
||||
detail="The value must either be a valid Kubernetes UID (up to 63 characters, "
|
||||
"starting and ending with a lowercase letter or number, containing only "
|
||||
"lowercase alphanumeric characters and hyphens) or a valid EKS ARN.",
|
||||
"lowercase alphanumeric characters and hyphens) or a valid AWS EKS Cluster ARN, GCP GKE Context Name or Azure AKS Cluster Name.",
|
||||
code="kubernetes-uid",
|
||||
pointer="/data/attributes/uid",
|
||||
)
|
||||
@@ -246,7 +248,7 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
)
|
||||
uid = models.CharField(
|
||||
"Unique identifier for the provider, set by the provider",
|
||||
max_length=63,
|
||||
max_length=250,
|
||||
blank=False,
|
||||
validators=[MinLengthValidator(3)],
|
||||
)
|
||||
@@ -271,7 +273,7 @@ class Provider(RowLevelSecurityProtectedModel):
|
||||
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=("tenant_id", "provider", "uid"),
|
||||
fields=("tenant_id", "provider", "uid", "is_deleted"),
|
||||
name="unique_provider_uids",
|
||||
),
|
||||
RowLevelSecurityConstraint(
|
||||
@@ -410,6 +412,10 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
started_at = models.DateTimeField(null=True, blank=True)
|
||||
completed_at = models.DateTimeField(null=True, blank=True)
|
||||
next_scan_at = models.DateTimeField(null=True, blank=True)
|
||||
scheduler_task = models.ForeignKey(
|
||||
PeriodicTask, on_delete=models.CASCADE, null=True, blank=True
|
||||
)
|
||||
output_location = models.CharField(blank=True, null=True, max_length=200)
|
||||
# TODO: mutelist foreign key
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
@@ -428,6 +434,10 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
fields=["provider", "state", "trigger", "scheduled_at"],
|
||||
name="scans_prov_state_trig_sche_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id", "state", "inserted_at"],
|
||||
name="scans_prov_state_insert_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -515,8 +525,8 @@ class Resource(RowLevelSecurityProtectedModel):
|
||||
through="ResourceTagMapping",
|
||||
)
|
||||
|
||||
def get_tags(self) -> dict:
|
||||
return {tag.key: tag.value for tag in self.tags.all()}
|
||||
def get_tags(self, tenant_id: str) -> dict:
|
||||
return {tag.key: tag.value for tag in self.tags.filter(tenant_id=tenant_id)}
|
||||
|
||||
def clear_tags(self):
|
||||
self.tags.clear()
|
||||
@@ -544,6 +554,10 @@ class Resource(RowLevelSecurityProtectedModel):
|
||||
fields=["uid", "region", "service", "name"],
|
||||
name="resource_uid_reg_serv_name_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "service", "region", "type"],
|
||||
name="resource_tenant_metadata_idx",
|
||||
),
|
||||
GinIndex(fields=["text_search"], name="gin_resources_search_idx"),
|
||||
]
|
||||
|
||||
@@ -591,6 +605,12 @@ class ResourceTagMapping(RowLevelSecurityProtectedModel):
|
||||
),
|
||||
]
|
||||
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["tenant_id", "resource_id"], name="resource_tag_tenant_idx"
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
"""
|
||||
@@ -615,6 +635,7 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
id = models.UUIDField(primary_key=True, default=uuid7, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
updated_at = models.DateTimeField(auto_now=True, editable=False)
|
||||
first_seen_at = models.DateTimeField(editable=False, null=True)
|
||||
|
||||
uid = models.CharField(max_length=300)
|
||||
delta = FindingDeltaEnumField(
|
||||
@@ -688,7 +709,17 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
],
|
||||
name="findings_filter_idx",
|
||||
),
|
||||
models.Index(fields=["tenant_id", "id"], name="findings_tenant_and_id_idx"),
|
||||
GinIndex(fields=["text_search"], name="gin_findings_search_idx"),
|
||||
models.Index(fields=["tenant_id", "scan_id"], name="find_tenant_scan_idx"),
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id", "id"], name="find_tenant_scan_id_idx"
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "id"],
|
||||
condition=Q(delta="new"),
|
||||
name="find_delta_new_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -1099,6 +1130,89 @@ class ScanSummary(RowLevelSecurityProtectedModel):
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id"],
|
||||
name="scan_summaries_tenant_scan_idx",
|
||||
)
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "scan-summaries"
|
||||
|
||||
|
||||
class Integration(RowLevelSecurityProtectedModel):
|
||||
class IntegrationChoices(models.TextChoices):
|
||||
S3 = "amazon_s3", _("Amazon S3")
|
||||
SAML = "saml", _("SAML")
|
||||
AWS_SECURITY_HUB = "aws_security_hub", _("AWS Security Hub")
|
||||
JIRA = "jira", _("JIRA")
|
||||
SLACK = "slack", _("Slack")
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
updated_at = models.DateTimeField(auto_now=True, editable=False)
|
||||
enabled = models.BooleanField(default=False)
|
||||
connected = models.BooleanField(null=True, blank=True)
|
||||
connection_last_checked_at = models.DateTimeField(null=True, blank=True)
|
||||
integration_type = IntegrationTypeEnumField(choices=IntegrationChoices.choices)
|
||||
configuration = models.JSONField(default=dict)
|
||||
_credentials = models.BinaryField(db_column="credentials")
|
||||
|
||||
providers = models.ManyToManyField(
|
||||
Provider,
|
||||
related_name="integrations",
|
||||
through="IntegrationProviderRelationship",
|
||||
blank=True,
|
||||
)
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
db_table = "integrations"
|
||||
|
||||
constraints = [
|
||||
RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_%(class)s",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "integrations"
|
||||
|
||||
@property
|
||||
def credentials(self):
|
||||
if isinstance(self._credentials, memoryview):
|
||||
encrypted_bytes = self._credentials.tobytes()
|
||||
elif isinstance(self._credentials, str):
|
||||
encrypted_bytes = self._credentials.encode()
|
||||
else:
|
||||
encrypted_bytes = self._credentials
|
||||
decrypted_data = fernet.decrypt(encrypted_bytes)
|
||||
return json.loads(decrypted_data.decode())
|
||||
|
||||
@credentials.setter
|
||||
def credentials(self, value):
|
||||
encrypted_data = fernet.encrypt(json.dumps(value).encode())
|
||||
self._credentials = encrypted_data
|
||||
|
||||
|
||||
class IntegrationProviderRelationship(RowLevelSecurityProtectedModel):
|
||||
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
|
||||
integration = models.ForeignKey(Integration, on_delete=models.CASCADE)
|
||||
provider = models.ForeignKey(Provider, on_delete=models.CASCADE)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
class Meta:
|
||||
db_table = "integration_provider_mappings"
|
||||
constraints = [
|
||||
models.UniqueConstraint(
|
||||
fields=["integration_id", "provider_id"],
|
||||
name="unique_integration_provider_rel",
|
||||
),
|
||||
RowLevelSecurityConstraint(
|
||||
field="tenant_id",
|
||||
name="rls_on_%(class)s",
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
from enum import Enum
|
||||
from rest_framework.permissions import BasePermission
|
||||
from api.models import Provider, Role, User
|
||||
from api.db_router import MainRouter
|
||||
from typing import Optional
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from rest_framework.permissions import BasePermission
|
||||
|
||||
from api.db_router import MainRouter
|
||||
from api.models import Provider, Role, User
|
||||
|
||||
|
||||
class Permissions(Enum):
|
||||
@@ -63,8 +65,11 @@ def get_providers(role: Role) -> QuerySet[Provider]:
|
||||
A QuerySet of Provider objects filtered by the role's provider groups.
|
||||
If the role has no provider groups, returns an empty queryset.
|
||||
"""
|
||||
tenant = role.tenant
|
||||
provider_groups = role.provider_groups.all()
|
||||
if not provider_groups.exists():
|
||||
return Provider.objects.none()
|
||||
|
||||
return Provider.objects.filter(provider_groups__in=provider_groups).distinct()
|
||||
return Provider.objects.filter(
|
||||
tenant=tenant, provider_groups__in=provider_groups
|
||||
).distinct()
|
||||
|
||||
@@ -2,8 +2,7 @@ from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.db import DEFAULT_DB_ALIAS
|
||||
from django.db import models
|
||||
from django.db import DEFAULT_DB_ALIAS, models
|
||||
from django.db.backends.ddl_references import Statement, Table
|
||||
|
||||
from api.db_utils import DB_USER, POSTGRES_TENANT_VAR
|
||||
@@ -59,11 +58,11 @@ class RowLevelSecurityConstraint(models.BaseConstraint):
|
||||
drop_sql_query = """
|
||||
ALTER TABLE %(table_name)s NO FORCE ROW LEVEL SECURITY;
|
||||
ALTER TABLE %(table_name)s DISABLE ROW LEVEL SECURITY;
|
||||
REVOKE ALL ON TABLE %(table_name) TO %(db_user)s;
|
||||
REVOKE ALL ON TABLE %(table_name)s FROM %(db_user)s;
|
||||
"""
|
||||
|
||||
drop_policy_sql_query = """
|
||||
DROP POLICY IF EXISTS %(db_user)s_%(table_name)s_{statement} on %(table_name)s;
|
||||
DROP POLICY IF EXISTS %(db_user)s_%(raw_table_name)s_{statement} ON %(table_name)s;
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -88,9 +87,7 @@ class RowLevelSecurityConstraint(models.BaseConstraint):
|
||||
f"{grant_queries}{self.grant_sql_query.format(statement=statement)}"
|
||||
)
|
||||
|
||||
full_create_sql_query = (
|
||||
f"{self.rls_sql_query}" f"{policy_queries}" f"{grant_queries}"
|
||||
)
|
||||
full_create_sql_query = f"{self.rls_sql_query}{policy_queries}{grant_queries}"
|
||||
|
||||
table_name = model._meta.db_table
|
||||
if self.partition_name:
|
||||
@@ -107,16 +104,20 @@ class RowLevelSecurityConstraint(models.BaseConstraint):
|
||||
|
||||
def remove_sql(self, model: Any, schema_editor: Any) -> Any:
|
||||
field_column = schema_editor.quote_name(self.target_field)
|
||||
raw_table_name = model._meta.db_table
|
||||
table_name = raw_table_name
|
||||
if self.partition_name:
|
||||
raw_table_name = f"{raw_table_name}_{self.partition_name}"
|
||||
table_name = raw_table_name
|
||||
|
||||
full_drop_sql_query = (
|
||||
f"{self.drop_sql_query}"
|
||||
f"{''.join([self.drop_policy_sql_query.format(statement) for statement in self.statements])}"
|
||||
f"{''.join([self.drop_policy_sql_query.format(statement=statement) for statement in self.statements])}"
|
||||
)
|
||||
table_name = model._meta.db_table
|
||||
if self.partition_name:
|
||||
table_name = f"{table_name}_{self.partition_name}"
|
||||
return Statement(
|
||||
full_drop_sql_query,
|
||||
table_name=Table(table_name, schema_editor.quote_name),
|
||||
raw_table_name=raw_table_name,
|
||||
field_column=field_column,
|
||||
db_user=DB_USER,
|
||||
partition_name=self.partition_name,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,9 @@
|
||||
import pytest
|
||||
from conftest import TEST_PASSWORD, get_api_tokens, get_authorization_header
|
||||
from django.urls import reverse
|
||||
from rest_framework.test import APIClient
|
||||
|
||||
from conftest import TEST_PASSWORD, get_api_tokens, get_authorization_header
|
||||
from api.models import Membership, User
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -96,3 +97,204 @@ def test_refresh_token(create_test_user, tenants_fixture):
|
||||
format="vnd.api+json",
|
||||
)
|
||||
assert new_refresh_response.status_code == 200
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_user_me_when_inviting_users(create_test_user, tenants_fixture, roles_fixture):
|
||||
client = APIClient()
|
||||
|
||||
role = roles_fixture[0]
|
||||
|
||||
user1_email = "user1@testing.com"
|
||||
user2_email = "user2@testing.com"
|
||||
|
||||
password = "thisisapassword123"
|
||||
|
||||
user1_response = client.post(
|
||||
reverse("user-list"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "users",
|
||||
"attributes": {
|
||||
"name": "user1",
|
||||
"email": user1_email,
|
||||
"password": password,
|
||||
},
|
||||
}
|
||||
},
|
||||
format="vnd.api+json",
|
||||
)
|
||||
assert user1_response.status_code == 201
|
||||
|
||||
user1_access_token, _ = get_api_tokens(client, user1_email, password)
|
||||
user1_headers = get_authorization_header(user1_access_token)
|
||||
|
||||
user2_invitation = client.post(
|
||||
reverse("invitation-list"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "invitations",
|
||||
"attributes": {"email": user2_email},
|
||||
"relationships": {
|
||||
"roles": {
|
||||
"data": [
|
||||
{
|
||||
"type": "roles",
|
||||
"id": str(role.id),
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
format="vnd.api+json",
|
||||
headers=user1_headers,
|
||||
)
|
||||
assert user2_invitation.status_code == 201
|
||||
invitation_token = user2_invitation.json()["data"]["attributes"]["token"]
|
||||
|
||||
user2_response = client.post(
|
||||
reverse("user-list") + f"?invitation_token={invitation_token}",
|
||||
data={
|
||||
"data": {
|
||||
"type": "users",
|
||||
"attributes": {
|
||||
"name": "user2",
|
||||
"email": user2_email,
|
||||
"password": password,
|
||||
},
|
||||
}
|
||||
},
|
||||
format="vnd.api+json",
|
||||
)
|
||||
assert user2_response.status_code == 201
|
||||
|
||||
user2_access_token, _ = get_api_tokens(client, user2_email, password)
|
||||
user2_headers = get_authorization_header(user2_access_token)
|
||||
|
||||
user1_me = client.get(reverse("user-me"), headers=user1_headers)
|
||||
assert user1_me.status_code == 200
|
||||
assert user1_me.json()["data"]["attributes"]["email"] == user1_email
|
||||
|
||||
user2_me = client.get(reverse("user-me"), headers=user2_headers)
|
||||
assert user2_me.status_code == 200
|
||||
assert user2_me.json()["data"]["attributes"]["email"] == user2_email
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestTokenSwitchTenant:
|
||||
def test_switch_tenant_with_valid_token(self, tenants_fixture, providers_fixture):
|
||||
client = APIClient()
|
||||
|
||||
test_user = "test_email@prowler.com"
|
||||
test_password = "test_password"
|
||||
|
||||
# Check that we can create a new user without any kind of authentication
|
||||
user_creation_response = client.post(
|
||||
reverse("user-list"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "users",
|
||||
"attributes": {
|
||||
"name": "test",
|
||||
"email": test_user,
|
||||
"password": test_password,
|
||||
},
|
||||
}
|
||||
},
|
||||
format="vnd.api+json",
|
||||
)
|
||||
assert user_creation_response.status_code == 201
|
||||
|
||||
# Create a new relationship between this user and another tenant
|
||||
tenant_id = tenants_fixture[0].id
|
||||
user_instance = User.objects.get(email=test_user)
|
||||
Membership.objects.create(user=user_instance, tenant_id=tenant_id)
|
||||
|
||||
# Check that using our new user's credentials we can authenticate and get the providers
|
||||
access_token, _ = get_api_tokens(client, test_user, test_password)
|
||||
auth_headers = get_authorization_header(access_token)
|
||||
|
||||
user_me_response = client.get(
|
||||
reverse("user-me"),
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert user_me_response.status_code == 200
|
||||
# Assert this user belongs to two tenants
|
||||
assert (
|
||||
user_me_response.json()["data"]["relationships"]["memberships"]["meta"][
|
||||
"count"
|
||||
]
|
||||
== 2
|
||||
)
|
||||
|
||||
provider_response = client.get(
|
||||
reverse("provider-list"),
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert provider_response.status_code == 200
|
||||
# Empty response since there are no providers in this tenant
|
||||
assert not provider_response.json()["data"]
|
||||
|
||||
switch_tenant_response = client.post(
|
||||
reverse("token-switch"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "tokens-switch-tenant",
|
||||
"attributes": {"tenant_id": tenant_id},
|
||||
}
|
||||
},
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert switch_tenant_response.status_code == 200
|
||||
new_access_token = switch_tenant_response.json()["data"]["attributes"]["access"]
|
||||
new_auth_headers = get_authorization_header(new_access_token)
|
||||
|
||||
provider_response = client.get(
|
||||
reverse("provider-list"),
|
||||
headers=new_auth_headers,
|
||||
)
|
||||
assert provider_response.status_code == 200
|
||||
# Now it must be data because we switched to another tenant with providers
|
||||
assert provider_response.json()["data"]
|
||||
|
||||
def test_switch_tenant_with_invalid_token(self, create_test_user, tenants_fixture):
|
||||
client = APIClient()
|
||||
|
||||
access_token, refresh_token = get_api_tokens(
|
||||
client, create_test_user.email, TEST_PASSWORD
|
||||
)
|
||||
auth_headers = get_authorization_header(access_token)
|
||||
|
||||
invalid_token_response = client.post(
|
||||
reverse("token-switch"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "tokens-switch-tenant",
|
||||
"attributes": {"tenant_id": "invalid_tenant_id"},
|
||||
}
|
||||
},
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert invalid_token_response.status_code == 400
|
||||
assert invalid_token_response.json()["errors"][0]["code"] == "invalid"
|
||||
assert (
|
||||
invalid_token_response.json()["errors"][0]["detail"]
|
||||
== "Must be a valid UUID."
|
||||
)
|
||||
|
||||
invalid_tenant_response = client.post(
|
||||
reverse("token-switch"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "tokens-switch-tenant",
|
||||
"attributes": {"tenant_id": tenants_fixture[-1].id},
|
||||
}
|
||||
},
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert invalid_tenant_response.status_code == 400
|
||||
assert invalid_tenant_response.json()["errors"][0]["code"] == "invalid"
|
||||
assert invalid_tenant_response.json()["errors"][0]["detail"] == (
|
||||
"Tenant does not exist or user is not a " "member."
|
||||
)
|
||||
|
||||
85
api/src/backend/api/tests/integration/test_providers.py
Normal file
85
api/src/backend/api/tests/integration/test_providers.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
from conftest import get_api_tokens, get_authorization_header
|
||||
from django.urls import reverse
|
||||
from rest_framework.test import APIClient
|
||||
|
||||
from api.models import Provider
|
||||
|
||||
|
||||
@patch("api.v1.views.Task.objects.get")
|
||||
@patch("api.v1.views.delete_provider_task.delay")
|
||||
@pytest.mark.django_db
|
||||
def test_delete_provider_without_executing_task(
|
||||
mock_delete_task, mock_task_get, create_test_user, tenants_fixture, tasks_fixture
|
||||
):
|
||||
client = APIClient()
|
||||
|
||||
test_user = "test_email@prowler.com"
|
||||
test_password = "test_password"
|
||||
|
||||
prowler_task = tasks_fixture[0]
|
||||
task_mock = Mock()
|
||||
task_mock.id = prowler_task.id
|
||||
mock_delete_task.return_value = task_mock
|
||||
mock_task_get.return_value = prowler_task
|
||||
|
||||
user_creation_response = client.post(
|
||||
reverse("user-list"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "users",
|
||||
"attributes": {
|
||||
"name": "test",
|
||||
"email": test_user,
|
||||
"password": test_password,
|
||||
},
|
||||
}
|
||||
},
|
||||
format="vnd.api+json",
|
||||
)
|
||||
assert user_creation_response.status_code == 201
|
||||
|
||||
access_token, _ = get_api_tokens(client, test_user, test_password)
|
||||
auth_headers = get_authorization_header(access_token)
|
||||
|
||||
create_provider_response = client.post(
|
||||
reverse("provider-list"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "providers",
|
||||
"attributes": {
|
||||
"provider": Provider.ProviderChoices.AWS,
|
||||
"uid": "123456789012",
|
||||
},
|
||||
}
|
||||
},
|
||||
format="vnd.api+json",
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert create_provider_response.status_code == 201
|
||||
provider_id = create_provider_response.json()["data"]["id"]
|
||||
provider_uid = create_provider_response.json()["data"]["attributes"]["uid"]
|
||||
|
||||
remove_provider = client.delete(
|
||||
reverse("provider-detail", kwargs={"pk": provider_id}),
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert remove_provider.status_code == 202
|
||||
|
||||
recreate_provider_response = client.post(
|
||||
reverse("provider-list"),
|
||||
data={
|
||||
"data": {
|
||||
"type": "providers",
|
||||
"attributes": {
|
||||
"provider": Provider.ProviderChoices.AWS,
|
||||
"uid": provider_uid,
|
||||
},
|
||||
}
|
||||
},
|
||||
format="vnd.api+json",
|
||||
headers=auth_headers,
|
||||
)
|
||||
assert recreate_provider_response.status_code == 201
|
||||
@@ -2,7 +2,15 @@ from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from unittest.mock import patch
|
||||
|
||||
from api.db_utils import enum_to_choices, one_week_from_now, generate_random_token
|
||||
import pytest
|
||||
|
||||
from api.db_utils import (
|
||||
batch_delete,
|
||||
enum_to_choices,
|
||||
generate_random_token,
|
||||
one_week_from_now,
|
||||
)
|
||||
from api.models import Provider
|
||||
|
||||
|
||||
class TestEnumToChoices:
|
||||
@@ -106,3 +114,26 @@ class TestGenerateRandomToken:
|
||||
token = generate_random_token(length=5, symbols="")
|
||||
# Default symbols
|
||||
assert len(token) == 5
|
||||
|
||||
|
||||
class TestBatchDelete:
|
||||
@pytest.fixture
|
||||
def create_test_providers(self, tenants_fixture):
|
||||
tenant = tenants_fixture[0]
|
||||
provider_id = 123456789012
|
||||
provider_count = 10
|
||||
for i in range(provider_count):
|
||||
Provider.objects.create(
|
||||
tenant=tenant,
|
||||
uid=f"{provider_id + i}",
|
||||
provider=Provider.ProviderChoices.AWS,
|
||||
)
|
||||
return provider_count
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_batch_delete(self, create_test_providers):
|
||||
_, summary = batch_delete(
|
||||
Provider.objects.all(), batch_size=create_test_providers // 2
|
||||
)
|
||||
assert Provider.objects.all().count() == 0
|
||||
assert summary == {"api.Provider": create_test_providers}
|
||||
|
||||
@@ -7,9 +7,10 @@ from api.models import Resource, ResourceTag
|
||||
class TestResourceModel:
|
||||
def test_setting_tags(self, providers_fixture):
|
||||
provider, *_ = providers_fixture
|
||||
tenant_id = provider.tenant_id
|
||||
|
||||
resource = Resource.objects.create(
|
||||
tenant_id=provider.tenant_id,
|
||||
tenant_id=tenant_id,
|
||||
provider=provider,
|
||||
uid="arn:aws:ec2:us-east-1:123456789012:instance/i-1234567890abcdef0",
|
||||
name="My Instance 1",
|
||||
@@ -20,12 +21,12 @@ class TestResourceModel:
|
||||
|
||||
tags = [
|
||||
ResourceTag.objects.create(
|
||||
tenant_id=provider.tenant_id,
|
||||
tenant_id=tenant_id,
|
||||
key="key",
|
||||
value="value",
|
||||
),
|
||||
ResourceTag.objects.create(
|
||||
tenant_id=provider.tenant_id,
|
||||
tenant_id=tenant_id,
|
||||
key="key2",
|
||||
value="value2",
|
||||
),
|
||||
@@ -33,9 +34,9 @@ class TestResourceModel:
|
||||
|
||||
resource.upsert_or_delete_tags(tags)
|
||||
|
||||
assert len(tags) == len(resource.tags.all())
|
||||
assert len(tags) == len(resource.tags.filter(tenant_id=tenant_id))
|
||||
|
||||
tags_dict = resource.get_tags()
|
||||
tags_dict = resource.get_tags(tenant_id=tenant_id)
|
||||
|
||||
for tag in tags:
|
||||
assert tag.key in tags_dict
|
||||
@@ -43,47 +44,51 @@ class TestResourceModel:
|
||||
|
||||
def test_adding_tags(self, resources_fixture):
|
||||
resource, *_ = resources_fixture
|
||||
tenant_id = str(resource.tenant_id)
|
||||
|
||||
tags = [
|
||||
ResourceTag.objects.create(
|
||||
tenant_id=resource.tenant_id,
|
||||
tenant_id=tenant_id,
|
||||
key="env",
|
||||
value="test",
|
||||
),
|
||||
]
|
||||
before_count = len(resource.tags.all())
|
||||
before_count = len(resource.tags.filter(tenant_id=tenant_id))
|
||||
|
||||
resource.upsert_or_delete_tags(tags)
|
||||
|
||||
assert before_count + 1 == len(resource.tags.all())
|
||||
assert before_count + 1 == len(resource.tags.filter(tenant_id=tenant_id))
|
||||
|
||||
tags_dict = resource.get_tags()
|
||||
tags_dict = resource.get_tags(tenant_id=tenant_id)
|
||||
|
||||
assert "env" in tags_dict
|
||||
assert tags_dict["env"] == "test"
|
||||
|
||||
def test_adding_duplicate_tags(self, resources_fixture):
|
||||
resource, *_ = resources_fixture
|
||||
tenant_id = str(resource.tenant_id)
|
||||
|
||||
tags = resource.tags.all()
|
||||
tags = resource.tags.filter(tenant_id=tenant_id)
|
||||
|
||||
before_count = len(resource.tags.all())
|
||||
before_count = len(resource.tags.filter(tenant_id=tenant_id))
|
||||
|
||||
resource.upsert_or_delete_tags(tags)
|
||||
|
||||
# should be the same number of tags
|
||||
assert before_count == len(resource.tags.all())
|
||||
assert before_count == len(resource.tags.filter(tenant_id=tenant_id))
|
||||
|
||||
def test_add_tags_none(self, resources_fixture):
|
||||
resource, *_ = resources_fixture
|
||||
tenant_id = str(resource.tenant_id)
|
||||
resource.upsert_or_delete_tags(None)
|
||||
|
||||
assert len(resource.tags.all()) == 0
|
||||
assert resource.get_tags() == {}
|
||||
assert len(resource.tags.filter(tenant_id=tenant_id)) == 0
|
||||
assert resource.get_tags(tenant_id=tenant_id) == {}
|
||||
|
||||
def test_clear_tags(self, resources_fixture):
|
||||
resource, *_ = resources_fixture
|
||||
tenant_id = str(resource.tenant_id)
|
||||
resource.clear_tags()
|
||||
|
||||
assert len(resource.tags.all()) == 0
|
||||
assert resource.get_tags() == {}
|
||||
assert len(resource.tags.filter(tenant_id=tenant_id)) == 0
|
||||
assert resource.get_tags(tenant_id=tenant_id) == {}
|
||||
|
||||
@@ -1,7 +1,19 @@
|
||||
from unittest.mock import ANY, Mock, patch
|
||||
|
||||
import pytest
|
||||
from django.urls import reverse
|
||||
from rest_framework import status
|
||||
from unittest.mock import patch, ANY, Mock
|
||||
|
||||
from api.models import (
|
||||
Membership,
|
||||
ProviderGroup,
|
||||
ProviderGroupMembership,
|
||||
Role,
|
||||
RoleProviderGroupRelationship,
|
||||
User,
|
||||
UserRoleRelationship,
|
||||
)
|
||||
from api.v1.serializers import TokenSerializer
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -304,3 +316,96 @@ class TestProviderViewSet:
|
||||
reverse("provider-connection", kwargs={"pk": provider.id})
|
||||
)
|
||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestLimitedVisibility:
|
||||
TEST_EMAIL = "rbac@rbac.com"
|
||||
TEST_PASSWORD = "thisisapassword123"
|
||||
|
||||
@pytest.fixture
|
||||
def limited_admin_user(
|
||||
self, django_db_setup, django_db_blocker, tenants_fixture, providers_fixture
|
||||
):
|
||||
with django_db_blocker.unblock():
|
||||
tenant = tenants_fixture[0]
|
||||
provider = providers_fixture[0]
|
||||
user = User.objects.create_user(
|
||||
name="testing",
|
||||
email=self.TEST_EMAIL,
|
||||
password=self.TEST_PASSWORD,
|
||||
)
|
||||
Membership.objects.create(
|
||||
user=user,
|
||||
tenant=tenant,
|
||||
role=Membership.RoleChoices.OWNER,
|
||||
)
|
||||
|
||||
role = Role.objects.create(
|
||||
name="limited_visibility",
|
||||
tenant=tenant,
|
||||
manage_users=True,
|
||||
manage_account=True,
|
||||
manage_billing=True,
|
||||
manage_providers=True,
|
||||
manage_integrations=True,
|
||||
manage_scans=True,
|
||||
unlimited_visibility=False,
|
||||
)
|
||||
UserRoleRelationship.objects.create(
|
||||
user=user,
|
||||
role=role,
|
||||
tenant=tenant,
|
||||
)
|
||||
|
||||
provider_group = ProviderGroup.objects.create(
|
||||
name="limited_visibility_group",
|
||||
tenant=tenant,
|
||||
)
|
||||
ProviderGroupMembership.objects.create(
|
||||
tenant=tenant,
|
||||
provider=provider,
|
||||
provider_group=provider_group,
|
||||
)
|
||||
|
||||
RoleProviderGroupRelationship.objects.create(
|
||||
tenant=tenant, role=role, provider_group=provider_group
|
||||
)
|
||||
|
||||
return user
|
||||
|
||||
@pytest.fixture
|
||||
def authenticated_client_rbac_limited(
|
||||
self, limited_admin_user, tenants_fixture, client
|
||||
):
|
||||
client.user = limited_admin_user
|
||||
tenant_id = tenants_fixture[0].id
|
||||
serializer = TokenSerializer(
|
||||
data={
|
||||
"type": "tokens",
|
||||
"email": self.TEST_EMAIL,
|
||||
"password": self.TEST_PASSWORD,
|
||||
"tenant_id": tenant_id,
|
||||
}
|
||||
)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
access_token = serializer.validated_data["access"]
|
||||
client.defaults["HTTP_AUTHORIZATION"] = f"Bearer {access_token}"
|
||||
return client
|
||||
|
||||
def test_integrations(
|
||||
self, authenticated_client_rbac_limited, integrations_fixture, providers_fixture
|
||||
):
|
||||
# Integration 2 is related to provider1 and provider 2
|
||||
# This user cannot see provider 2
|
||||
integration = integrations_fixture[1]
|
||||
|
||||
response = authenticated_client_rbac_limited.get(
|
||||
reverse("integration-detail", kwargs={"pk": integration.id})
|
||||
)
|
||||
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert integration.providers.count() == 2
|
||||
assert (
|
||||
response.json()["data"]["relationships"]["providers"]["meta"]["count"] == 1
|
||||
)
|
||||
|
||||
@@ -274,9 +274,10 @@ class TestValidateInvitation:
|
||||
expired_time = datetime.now(timezone.utc) - timedelta(days=1)
|
||||
invitation.expires_at = expired_time
|
||||
|
||||
with patch("api.utils.Invitation.objects.using") as mock_using, patch(
|
||||
"api.utils.datetime"
|
||||
) as mock_datetime:
|
||||
with (
|
||||
patch("api.utils.Invitation.objects.using") as mock_using,
|
||||
patch("api.utils.datetime") as mock_datetime,
|
||||
):
|
||||
mock_db = mock_using.return_value
|
||||
mock_db.get.return_value = invitation
|
||||
mock_datetime.now.return_value = datetime.now(timezone.utc)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,15 +1,25 @@
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from allauth.socialaccount.providers.oauth2.client import OAuth2Client
|
||||
from rest_framework.exceptions import NotFound, ValidationError
|
||||
|
||||
from api.db_router import MainRouter
|
||||
from api.exceptions import InvitationTokenExpiredException
|
||||
from api.models import Invitation, Provider
|
||||
from prowler.providers.aws.aws_provider import AwsProvider
|
||||
from prowler.providers.azure.azure_provider import AzureProvider
|
||||
from prowler.providers.common.models import Connection
|
||||
from prowler.providers.gcp.gcp_provider import GcpProvider
|
||||
from prowler.providers.kubernetes.kubernetes_provider import KubernetesProvider
|
||||
from rest_framework.exceptions import ValidationError, NotFound
|
||||
|
||||
from api.db_router import MainRouter
|
||||
from api.exceptions import InvitationTokenExpiredException
|
||||
from api.models import Provider, Invitation
|
||||
|
||||
class CustomOAuth2Client(OAuth2Client):
|
||||
def __init__(self, client_id, secret, *args, **kwargs):
|
||||
# Remove any duplicate "scope_delimiter" from kwargs
|
||||
# Bug present in dj-rest-auth after version v7.0.1
|
||||
# https://github.com/iMerica/dj-rest-auth/issues/673
|
||||
kwargs.pop("scope_delimiter", None)
|
||||
super().__init__(client_id, secret, *args, **kwargs)
|
||||
|
||||
|
||||
def merge_dicts(default_dict: dict, replacement_dict: dict) -> dict:
|
||||
|
||||
@@ -106,7 +106,7 @@ def uuid7_end(uuid_obj: UUID, offset_months: int = 1) -> UUID:
|
||||
|
||||
Args:
|
||||
uuid_obj: A UUIDv7 object.
|
||||
offset_days: Number of months to offset from the given UUID's date. Defaults to 1 to handle if
|
||||
offset_months: Number of months to offset from the given UUID's date. Defaults to 1 to handle if
|
||||
partitions are not being used, if so the value will be the one set at FINDINGS_TABLE_PARTITION_MONTHS.
|
||||
|
||||
Returns:
|
||||
|
||||
122
api/src/backend/api/v1/serializer_utils/integrations.py
Normal file
122
api/src/backend/api/v1/serializer_utils/integrations.py
Normal file
@@ -0,0 +1,122 @@
|
||||
from drf_spectacular.utils import extend_schema_field
|
||||
from rest_framework_json_api import serializers
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
|
||||
|
||||
class BaseValidateSerializer(serializers.Serializer):
|
||||
def validate(self, data):
|
||||
if hasattr(self, "initial_data"):
|
||||
initial_data = set(self.initial_data.keys()) - {"id", "type"}
|
||||
unknown_keys = initial_data - set(self.fields.keys())
|
||||
if unknown_keys:
|
||||
raise ValidationError(f"Invalid fields: {unknown_keys}")
|
||||
return data
|
||||
|
||||
|
||||
# Integrations
|
||||
|
||||
|
||||
class S3ConfigSerializer(BaseValidateSerializer):
|
||||
bucket_name = serializers.CharField()
|
||||
output_directory = serializers.CharField()
|
||||
|
||||
class Meta:
|
||||
resource_name = "integrations"
|
||||
|
||||
|
||||
class AWSCredentialSerializer(BaseValidateSerializer):
|
||||
role_arn = serializers.CharField(required=False)
|
||||
external_id = serializers.CharField(required=False)
|
||||
role_session_name = serializers.CharField(required=False)
|
||||
session_duration = serializers.IntegerField(
|
||||
required=False, min_value=900, max_value=43200
|
||||
)
|
||||
aws_access_key_id = serializers.CharField(required=False)
|
||||
aws_secret_access_key = serializers.CharField(required=False)
|
||||
aws_session_token = serializers.CharField(required=False)
|
||||
|
||||
class Meta:
|
||||
resource_name = "integrations"
|
||||
|
||||
|
||||
@extend_schema_field(
|
||||
{
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"title": "AWS Credentials",
|
||||
"properties": {
|
||||
"role_arn": {
|
||||
"type": "string",
|
||||
"description": "The Amazon Resource Name (ARN) of the role to assume. Required for AWS role "
|
||||
"assumption.",
|
||||
},
|
||||
"external_id": {
|
||||
"type": "string",
|
||||
"description": "An identifier to enhance security for role assumption.",
|
||||
},
|
||||
"aws_access_key_id": {
|
||||
"type": "string",
|
||||
"description": "The AWS access key ID. Only required if the environment lacks pre-configured "
|
||||
"AWS credentials.",
|
||||
},
|
||||
"aws_secret_access_key": {
|
||||
"type": "string",
|
||||
"description": "The AWS secret access key. Required if 'aws_access_key_id' is provided or if "
|
||||
"no AWS credentials are pre-configured.",
|
||||
},
|
||||
"aws_session_token": {
|
||||
"type": "string",
|
||||
"description": "The session token for temporary credentials, if applicable.",
|
||||
},
|
||||
"session_duration": {
|
||||
"type": "integer",
|
||||
"minimum": 900,
|
||||
"maximum": 43200,
|
||||
"default": 3600,
|
||||
"description": "The duration (in seconds) for the role session.",
|
||||
},
|
||||
"role_session_name": {
|
||||
"type": "string",
|
||||
"description": "An identifier for the role session, useful for tracking sessions in AWS logs. "
|
||||
"The regex used to validate this parameter is a string of characters consisting of "
|
||||
"upper- and lower-case alphanumeric characters with no spaces. You can also include "
|
||||
"underscores or any of the following characters: =,.@-\n\n"
|
||||
"Examples:\n"
|
||||
"- MySession123\n"
|
||||
"- User_Session-1\n"
|
||||
"- Test.Session@2",
|
||||
"pattern": "^[a-zA-Z0-9=,.@_-]+$",
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
}
|
||||
)
|
||||
class IntegrationCredentialField(serializers.JSONField):
|
||||
pass
|
||||
|
||||
|
||||
@extend_schema_field(
|
||||
{
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"title": "Amazon S3",
|
||||
"properties": {
|
||||
"bucket_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the S3 bucket where files will be stored.",
|
||||
},
|
||||
"output_directory": {
|
||||
"type": "string",
|
||||
"description": "The directory path within the bucket where files will be saved.",
|
||||
},
|
||||
},
|
||||
"required": ["bucket_name", "output_directory"],
|
||||
},
|
||||
]
|
||||
}
|
||||
)
|
||||
class IntegrationConfigField(serializers.JSONField):
|
||||
pass
|
||||
@@ -16,6 +16,8 @@ from rest_framework_simplejwt.tokens import RefreshToken
|
||||
from api.models import (
|
||||
ComplianceOverview,
|
||||
Finding,
|
||||
Integration,
|
||||
IntegrationProviderRelationship,
|
||||
Invitation,
|
||||
InvitationRoleRelationship,
|
||||
Membership,
|
||||
@@ -34,11 +36,75 @@ from api.models import (
|
||||
UserRoleRelationship,
|
||||
)
|
||||
from api.rls import Tenant
|
||||
from api.v1.serializer_utils.integrations import (
|
||||
AWSCredentialSerializer,
|
||||
IntegrationConfigField,
|
||||
IntegrationCredentialField,
|
||||
S3ConfigSerializer,
|
||||
)
|
||||
|
||||
# Tokens
|
||||
|
||||
|
||||
class TokenSerializer(TokenObtainPairSerializer):
|
||||
def generate_tokens(user: User, tenant_id: str) -> dict:
|
||||
try:
|
||||
refresh = RefreshToken.for_user(user)
|
||||
except InvalidKeyError:
|
||||
# Handle invalid key error
|
||||
raise ValidationError(
|
||||
{
|
||||
"detail": "Token generation failed due to invalid key configuration. Provide valid "
|
||||
"DJANGO_TOKEN_SIGNING_KEY and DJANGO_TOKEN_VERIFYING_KEY in the environment."
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValidationError({"detail": str(e)})
|
||||
|
||||
# Post-process the tokens
|
||||
# Set the tenant_id
|
||||
refresh["tenant_id"] = tenant_id
|
||||
|
||||
# Set the nbf (not before) claim to the iat (issued at) claim. At this moment, simplejwt does not provide a
|
||||
# way to set the nbf claim
|
||||
refresh.payload["nbf"] = refresh["iat"]
|
||||
|
||||
# Get the access token
|
||||
access = refresh.access_token
|
||||
|
||||
if settings.SIMPLE_JWT["UPDATE_LAST_LOGIN"]:
|
||||
update_last_login(None, user)
|
||||
|
||||
return {"access": str(access), "refresh": str(refresh)}
|
||||
|
||||
|
||||
class BaseTokenSerializer(TokenObtainPairSerializer):
|
||||
def custom_validate(self, attrs, social: bool = False):
|
||||
email = attrs.get("email")
|
||||
password = attrs.get("password")
|
||||
tenant_id = str(attrs.get("tenant_id", ""))
|
||||
|
||||
# Authenticate user
|
||||
user = (
|
||||
User.objects.get(email=email)
|
||||
if social
|
||||
else authenticate(username=email, password=password)
|
||||
)
|
||||
if user is None:
|
||||
raise ValidationError("Invalid credentials")
|
||||
|
||||
if tenant_id:
|
||||
if not user.is_member_of_tenant(tenant_id):
|
||||
raise ValidationError("Tenant does not exist or user is not a member.")
|
||||
else:
|
||||
first_membership = user.memberships.order_by("date_joined").first()
|
||||
if first_membership is None:
|
||||
raise ValidationError("User has no memberships.")
|
||||
tenant_id = str(first_membership.tenant_id)
|
||||
|
||||
return generate_tokens(user, tenant_id)
|
||||
|
||||
|
||||
class TokenSerializer(BaseTokenSerializer):
|
||||
email = serializers.EmailField(write_only=True)
|
||||
password = serializers.CharField(write_only=True)
|
||||
tenant_id = serializers.UUIDField(
|
||||
@@ -56,53 +122,25 @@ class TokenSerializer(TokenObtainPairSerializer):
|
||||
resource_name = "tokens"
|
||||
|
||||
def validate(self, attrs):
|
||||
email = attrs.get("email")
|
||||
password = attrs.get("password")
|
||||
tenant_id = str(attrs.get("tenant_id", ""))
|
||||
return super().custom_validate(attrs)
|
||||
|
||||
# Authenticate user
|
||||
user = authenticate(username=email, password=password)
|
||||
if user is None:
|
||||
raise ValidationError("Invalid credentials")
|
||||
|
||||
if tenant_id:
|
||||
if not user.is_member_of_tenant(tenant_id):
|
||||
raise ValidationError("Tenant does not exist or user is not a member.")
|
||||
else:
|
||||
first_membership = user.memberships.order_by("date_joined").first()
|
||||
if first_membership is None:
|
||||
raise ValidationError("User has no memberships.")
|
||||
tenant_id = str(first_membership.tenant_id)
|
||||
class TokenSocialLoginSerializer(BaseTokenSerializer):
|
||||
email = serializers.EmailField(write_only=True)
|
||||
|
||||
# Generate tokens
|
||||
try:
|
||||
refresh = RefreshToken.for_user(user)
|
||||
except InvalidKeyError:
|
||||
# Handle invalid key error
|
||||
raise ValidationError(
|
||||
{
|
||||
"detail": "Token generation failed due to invalid key configuration. Provide valid "
|
||||
"DJANGO_TOKEN_SIGNING_KEY and DJANGO_TOKEN_VERIFYING_KEY in the environment."
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
raise ValidationError({"detail": str(e)})
|
||||
# Output tokens
|
||||
refresh = serializers.CharField(read_only=True)
|
||||
access = serializers.CharField(read_only=True)
|
||||
|
||||
# Post-process the tokens
|
||||
# Set the tenant_id
|
||||
refresh["tenant_id"] = tenant_id
|
||||
class JSONAPIMeta:
|
||||
resource_name = "tokens"
|
||||
|
||||
# Set the nbf (not before) claim to the iat (issued at) claim. At this moment, simplejwt does not provide a
|
||||
# way to set the nbf claim
|
||||
refresh.payload["nbf"] = refresh["iat"]
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.fields.pop("password", None)
|
||||
|
||||
# Get the access token
|
||||
access = refresh.access_token
|
||||
|
||||
if settings.SIMPLE_JWT["UPDATE_LAST_LOGIN"]:
|
||||
update_last_login(None, user)
|
||||
|
||||
return {"access": str(access), "refresh": str(refresh)}
|
||||
def validate(self, attrs):
|
||||
return super().custom_validate(attrs, social=True)
|
||||
|
||||
|
||||
# TODO: Check if we can change the parent class to TokenRefreshSerializer from rest_framework_simplejwt.serializers
|
||||
@@ -140,6 +178,30 @@ class TokenRefreshSerializer(serializers.Serializer):
|
||||
raise ValidationError({"refresh": "Invalid or expired token"})
|
||||
|
||||
|
||||
class TokenSwitchTenantSerializer(serializers.Serializer):
|
||||
tenant_id = serializers.UUIDField(
|
||||
write_only=True, help_text="The tenant ID for which to request a new token."
|
||||
)
|
||||
access = serializers.CharField(read_only=True)
|
||||
refresh = serializers.CharField(read_only=True)
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "tokens-switch-tenant"
|
||||
|
||||
def validate(self, attrs):
|
||||
request = self.context["request"]
|
||||
user = request.user
|
||||
|
||||
if not user.is_authenticated:
|
||||
raise ValidationError("Invalid or expired token.")
|
||||
|
||||
tenant_id = str(attrs.get("tenant_id"))
|
||||
if not user.is_member_of_tenant(tenant_id):
|
||||
raise ValidationError("Tenant does not exist or user is not a member.")
|
||||
|
||||
return generate_tokens(user, tenant_id)
|
||||
|
||||
|
||||
# Base
|
||||
|
||||
|
||||
@@ -235,13 +297,10 @@ class UserCreateSerializer(BaseWriteSerializer):
|
||||
|
||||
class UserUpdateSerializer(BaseWriteSerializer):
|
||||
password = serializers.CharField(write_only=True, required=False)
|
||||
roles = serializers.ResourceRelatedField(
|
||||
queryset=Role.objects.all(), many=True, required=False
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = User
|
||||
fields = ["id", "name", "password", "email", "company_name", "roles"]
|
||||
fields = ["id", "name", "password", "email", "company_name"]
|
||||
extra_kwargs = {
|
||||
"id": {"read_only": True},
|
||||
}
|
||||
@@ -506,7 +565,6 @@ class ProviderGroupCreateSerializer(ProviderGroupSerializer):
|
||||
"updated_at",
|
||||
"providers",
|
||||
"roles",
|
||||
"url",
|
||||
]
|
||||
|
||||
def create(self, validated_data):
|
||||
@@ -695,6 +753,43 @@ class ProviderSerializer(RLSSerializer):
|
||||
}
|
||||
|
||||
|
||||
class ProviderIncludeSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the Provider model.
|
||||
"""
|
||||
|
||||
provider = ProviderEnumSerializerField()
|
||||
connection = serializers.SerializerMethodField(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = Provider
|
||||
fields = [
|
||||
"id",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"provider",
|
||||
"uid",
|
||||
"alias",
|
||||
"connection",
|
||||
# "scanner_args",
|
||||
]
|
||||
|
||||
@extend_schema_field(
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"connected": {"type": "boolean"},
|
||||
"last_checked_at": {"type": "string", "format": "date-time"},
|
||||
},
|
||||
}
|
||||
)
|
||||
def get_connection(self, obj):
|
||||
return {
|
||||
"connected": obj.connected,
|
||||
"last_checked_at": obj.connection_last_checked_at,
|
||||
}
|
||||
|
||||
|
||||
class ProviderCreateSerializer(RLSSerializer, BaseWriteSerializer):
|
||||
class Meta:
|
||||
model = Provider
|
||||
@@ -757,6 +852,35 @@ class ScanSerializer(RLSSerializer):
|
||||
]
|
||||
|
||||
|
||||
class ScanIncludeSerializer(RLSSerializer):
|
||||
trigger = serializers.ChoiceField(
|
||||
choices=Scan.TriggerChoices.choices, read_only=True
|
||||
)
|
||||
state = StateEnumSerializerField(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = Scan
|
||||
fields = [
|
||||
"id",
|
||||
"name",
|
||||
"trigger",
|
||||
"state",
|
||||
"unique_resource_count",
|
||||
"progress",
|
||||
# "scanner_args",
|
||||
"duration",
|
||||
"inserted_at",
|
||||
"started_at",
|
||||
"completed_at",
|
||||
"scheduled_at",
|
||||
"provider",
|
||||
]
|
||||
|
||||
included_serializers = {
|
||||
"provider": "api.v1.serializers.ProviderIncludeSerializer",
|
||||
}
|
||||
|
||||
|
||||
class ScanCreateSerializer(RLSSerializer, BaseWriteSerializer):
|
||||
class Meta:
|
||||
model = Scan
|
||||
@@ -823,6 +947,14 @@ class ScanTaskSerializer(RLSSerializer):
|
||||
]
|
||||
|
||||
|
||||
class ScanReportSerializer(serializers.Serializer):
|
||||
id = serializers.CharField(source="scan")
|
||||
|
||||
class Meta:
|
||||
resource_name = "scan-reports"
|
||||
fields = ["id"]
|
||||
|
||||
|
||||
class ResourceTagSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the ResourceTag model
|
||||
@@ -878,7 +1010,52 @@ class ResourceSerializer(RLSSerializer):
|
||||
}
|
||||
)
|
||||
def get_tags(self, obj):
|
||||
return obj.get_tags()
|
||||
return obj.get_tags(self.context.get("tenant_id"))
|
||||
|
||||
def get_fields(self):
|
||||
"""`type` is a Python reserved keyword."""
|
||||
fields = super().get_fields()
|
||||
type_ = fields.pop("type_")
|
||||
fields["type"] = type_
|
||||
return fields
|
||||
|
||||
|
||||
class ResourceIncludeSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the Resource model.
|
||||
"""
|
||||
|
||||
tags = serializers.SerializerMethodField()
|
||||
type_ = serializers.CharField(read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = Resource
|
||||
fields = [
|
||||
"id",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"uid",
|
||||
"name",
|
||||
"region",
|
||||
"service",
|
||||
"type_",
|
||||
"tags",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"id": {"read_only": True},
|
||||
"inserted_at": {"read_only": True},
|
||||
"updated_at": {"read_only": True},
|
||||
}
|
||||
|
||||
@extend_schema_field(
|
||||
{
|
||||
"type": "object",
|
||||
"description": "Tags associated with the resource",
|
||||
"example": {"env": "prod", "owner": "johndoe"},
|
||||
}
|
||||
)
|
||||
def get_tags(self, obj):
|
||||
return obj.get_tags(self.context.get("tenant_id"))
|
||||
|
||||
def get_fields(self):
|
||||
"""`type` is a Python reserved keyword."""
|
||||
@@ -909,6 +1086,7 @@ class FindingSerializer(RLSSerializer):
|
||||
"raw_result",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"first_seen_at",
|
||||
"url",
|
||||
# Relationships
|
||||
"scan",
|
||||
@@ -916,11 +1094,12 @@ class FindingSerializer(RLSSerializer):
|
||||
]
|
||||
|
||||
included_serializers = {
|
||||
"scan": ScanSerializer,
|
||||
"resources": ResourceSerializer,
|
||||
"scan": ScanIncludeSerializer,
|
||||
"resources": ResourceIncludeSerializer,
|
||||
}
|
||||
|
||||
|
||||
# To be removed when the related endpoint is removed as well
|
||||
class FindingDynamicFilterSerializer(serializers.Serializer):
|
||||
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
@@ -929,6 +1108,19 @@ class FindingDynamicFilterSerializer(serializers.Serializer):
|
||||
resource_name = "finding-dynamic-filters"
|
||||
|
||||
|
||||
class FindingMetadataSerializer(serializers.Serializer):
|
||||
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
resource_types = serializers.ListField(
|
||||
child=serializers.CharField(), allow_empty=True
|
||||
)
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# tags = serializers.JSONField(help_text="Tags are described as key-value pairs.")
|
||||
|
||||
class Meta:
|
||||
resource_name = "findings-metadata"
|
||||
|
||||
|
||||
# Provider secrets
|
||||
class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
|
||||
@staticmethod
|
||||
@@ -1001,7 +1193,7 @@ class KubernetesProviderSecret(serializers.Serializer):
|
||||
|
||||
class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
role_arn = serializers.CharField()
|
||||
external_id = serializers.CharField(required=False)
|
||||
external_id = serializers.CharField()
|
||||
role_session_name = serializers.CharField(required=False)
|
||||
session_duration = serializers.IntegerField(
|
||||
required=False, min_value=900, max_value=43200
|
||||
@@ -1048,6 +1240,10 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"description": "The Amazon Resource Name (ARN) of the role to assume. Required for AWS role "
|
||||
"assumption.",
|
||||
},
|
||||
"external_id": {
|
||||
"type": "string",
|
||||
"description": "An identifier to enhance security for role assumption.",
|
||||
},
|
||||
"aws_access_key_id": {
|
||||
"type": "string",
|
||||
"description": "The AWS access key ID. Only required if the environment lacks pre-configured "
|
||||
@@ -1069,11 +1265,6 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"default": 3600,
|
||||
"description": "The duration (in seconds) for the role session.",
|
||||
},
|
||||
"external_id": {
|
||||
"type": "string",
|
||||
"description": "An optional identifier to enhance security for role assumption; may be "
|
||||
"required by the role administrator.",
|
||||
},
|
||||
"role_session_name": {
|
||||
"type": "string",
|
||||
"description": "An identifier for the role session, useful for tracking sessions in AWS logs. "
|
||||
@@ -1087,7 +1278,7 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"pattern": "^[a-zA-Z0-9=,.@_-]+$",
|
||||
},
|
||||
},
|
||||
"required": ["role_arn"],
|
||||
"required": ["role_arn", "external_id"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
@@ -1237,6 +1428,12 @@ class InvitationSerializer(RLSSerializer):
|
||||
|
||||
roles = serializers.ResourceRelatedField(many=True, queryset=Role.objects.all())
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
if tenant_id is not None:
|
||||
self.fields["roles"].queryset = Role.objects.filter(tenant_id=tenant_id)
|
||||
|
||||
class Meta:
|
||||
model = Invitation
|
||||
fields = [
|
||||
@@ -1256,6 +1453,12 @@ class InvitationSerializer(RLSSerializer):
|
||||
class InvitationBaseWriteSerializer(BaseWriteSerializer):
|
||||
roles = serializers.ResourceRelatedField(many=True, queryset=Role.objects.all())
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
if tenant_id is not None:
|
||||
self.fields["roles"].queryset = Role.objects.filter(tenant_id=tenant_id)
|
||||
|
||||
def validate_email(self, value):
|
||||
user = User.objects.filter(email=value).first()
|
||||
tenant_id = self.context["tenant_id"]
|
||||
@@ -1316,6 +1519,10 @@ class InvitationCreateSerializer(InvitationBaseWriteSerializer, RLSSerializer):
|
||||
|
||||
|
||||
class InvitationUpdateSerializer(InvitationBaseWriteSerializer):
|
||||
roles = serializers.ResourceRelatedField(
|
||||
required=False, many=True, queryset=Role.objects.all()
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Invitation
|
||||
fields = ["id", "email", "expires_at", "state", "token", "roles"]
|
||||
@@ -1329,14 +1536,18 @@ class InvitationUpdateSerializer(InvitationBaseWriteSerializer):
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
invitation = super().update(instance, validated_data)
|
||||
if "roles" in validated_data:
|
||||
roles = validated_data.pop("roles")
|
||||
instance.roles.clear()
|
||||
for role in roles:
|
||||
InvitationRoleRelationship.objects.create(
|
||||
role=role, invitation=invitation, tenant_id=tenant_id
|
||||
new_relationships = [
|
||||
InvitationRoleRelationship(
|
||||
role=r, invitation=instance, tenant_id=tenant_id
|
||||
)
|
||||
for r in roles
|
||||
]
|
||||
InvitationRoleRelationship.objects.bulk_create(new_relationships)
|
||||
|
||||
invitation = super().update(instance, validated_data)
|
||||
|
||||
return invitation
|
||||
|
||||
@@ -1363,6 +1574,17 @@ class RoleSerializer(RLSSerializer, BaseWriteSerializer):
|
||||
queryset=ProviderGroup.objects.all(), many=True, required=False
|
||||
)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
if tenant_id is not None:
|
||||
self.fields["users"].queryset = User.objects.filter(
|
||||
membership__tenant__id=tenant_id
|
||||
)
|
||||
self.fields["provider_groups"].queryset = ProviderGroup.objects.filter(
|
||||
tenant_id=self.context.get("tenant_id")
|
||||
)
|
||||
|
||||
def get_permission_state(self, obj) -> str:
|
||||
return obj.permission_state
|
||||
|
||||
@@ -1390,9 +1612,11 @@ class RoleSerializer(RLSSerializer, BaseWriteSerializer):
|
||||
"name",
|
||||
"manage_users",
|
||||
"manage_account",
|
||||
"manage_billing",
|
||||
"manage_providers",
|
||||
# Disable for the first release
|
||||
# "manage_billing",
|
||||
# /Disable for the first release
|
||||
"manage_integrations",
|
||||
"manage_providers",
|
||||
"manage_scans",
|
||||
"permission_state",
|
||||
"unlimited_visibility",
|
||||
@@ -1693,7 +1917,7 @@ class OverviewProviderSerializer(serializers.Serializer):
|
||||
"properties": {
|
||||
"pass": {"type": "integer"},
|
||||
"fail": {"type": "integer"},
|
||||
"manual": {"type": "integer"},
|
||||
"muted": {"type": "integer"},
|
||||
"total": {"type": "integer"},
|
||||
},
|
||||
}
|
||||
@@ -1702,7 +1926,7 @@ class OverviewProviderSerializer(serializers.Serializer):
|
||||
return {
|
||||
"pass": obj["findings_passed"],
|
||||
"fail": obj["findings_failed"],
|
||||
"manual": obj["findings_manual"],
|
||||
"muted": obj["findings_muted"],
|
||||
"total": obj["total_findings"],
|
||||
}
|
||||
|
||||
@@ -1797,3 +2021,201 @@ class ScheduleDailyCreateSerializer(serializers.Serializer):
|
||||
if unknown_keys:
|
||||
raise ValidationError(f"Invalid fields: {unknown_keys}")
|
||||
return data
|
||||
|
||||
|
||||
# Integrations
|
||||
|
||||
|
||||
class BaseWriteIntegrationSerializer(BaseWriteSerializer):
|
||||
@staticmethod
|
||||
def validate_integration_data(
|
||||
integration_type: str,
|
||||
providers: list[Provider], # noqa
|
||||
configuration: dict,
|
||||
credentials: dict,
|
||||
):
|
||||
if integration_type == Integration.IntegrationChoices.S3:
|
||||
config_serializer = S3ConfigSerializer
|
||||
credentials_serializers = [AWSCredentialSerializer]
|
||||
# TODO: This will be required for AWS Security Hub
|
||||
# if providers and not all(
|
||||
# provider.provider == Provider.ProviderChoices.AWS
|
||||
# for provider in providers
|
||||
# ):
|
||||
# raise serializers.ValidationError(
|
||||
# {"providers": "All providers must be AWS for the S3 integration."}
|
||||
# )
|
||||
else:
|
||||
raise serializers.ValidationError(
|
||||
{
|
||||
"integration_type": f"Integration type not supported yet: {integration_type}"
|
||||
}
|
||||
)
|
||||
|
||||
config_serializer(data=configuration).is_valid(raise_exception=True)
|
||||
|
||||
for cred_serializer in credentials_serializers:
|
||||
try:
|
||||
cred_serializer(data=credentials).is_valid(raise_exception=True)
|
||||
break
|
||||
except ValidationError:
|
||||
continue
|
||||
else:
|
||||
raise ValidationError(
|
||||
{"credentials": "Invalid credentials for the integration type."}
|
||||
)
|
||||
|
||||
|
||||
class IntegrationSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the Integration model.
|
||||
"""
|
||||
|
||||
providers = serializers.ResourceRelatedField(
|
||||
queryset=Provider.objects.all(), many=True
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Integration
|
||||
fields = [
|
||||
"id",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"enabled",
|
||||
"connected",
|
||||
"connection_last_checked_at",
|
||||
"integration_type",
|
||||
"configuration",
|
||||
"providers",
|
||||
"url",
|
||||
]
|
||||
|
||||
included_serializers = {
|
||||
"providers": "api.v1.serializers.ProviderIncludeSerializer",
|
||||
}
|
||||
|
||||
def to_representation(self, instance):
|
||||
representation = super().to_representation(instance)
|
||||
allowed_providers = self.context.get("allowed_providers")
|
||||
if allowed_providers:
|
||||
allowed_provider_ids = {str(provider.id) for provider in allowed_providers}
|
||||
representation["providers"] = [
|
||||
provider
|
||||
for provider in representation["providers"]
|
||||
if provider["id"] in allowed_provider_ids
|
||||
]
|
||||
return representation
|
||||
|
||||
|
||||
class IntegrationCreateSerializer(BaseWriteIntegrationSerializer):
|
||||
credentials = IntegrationCredentialField(write_only=True)
|
||||
configuration = IntegrationConfigField()
|
||||
providers = serializers.ResourceRelatedField(
|
||||
queryset=Provider.objects.all(), many=True, required=False
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Integration
|
||||
fields = [
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"enabled",
|
||||
"connected",
|
||||
"connection_last_checked_at",
|
||||
"integration_type",
|
||||
"configuration",
|
||||
"credentials",
|
||||
"providers",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"inserted_at": {"read_only": True},
|
||||
"updated_at": {"read_only": True},
|
||||
"connected": {"read_only": True},
|
||||
"enabled": {"read_only": True},
|
||||
"connection_last_checked_at": {"read_only": True},
|
||||
}
|
||||
|
||||
def validate(self, attrs):
|
||||
integration_type = attrs.get("integration_type")
|
||||
providers = attrs.get("providers")
|
||||
configuration = attrs.get("configuration")
|
||||
credentials = attrs.get("credentials")
|
||||
|
||||
validated_attrs = super().validate(attrs)
|
||||
self.validate_integration_data(
|
||||
integration_type, providers, configuration, credentials
|
||||
)
|
||||
return validated_attrs
|
||||
|
||||
def create(self, validated_data):
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
|
||||
providers = validated_data.pop("providers", [])
|
||||
integration = Integration.objects.create(tenant_id=tenant_id, **validated_data)
|
||||
|
||||
through_model_instances = [
|
||||
IntegrationProviderRelationship(
|
||||
integration=integration,
|
||||
provider=provider,
|
||||
tenant_id=tenant_id,
|
||||
)
|
||||
for provider in providers
|
||||
]
|
||||
IntegrationProviderRelationship.objects.bulk_create(through_model_instances)
|
||||
|
||||
return integration
|
||||
|
||||
|
||||
class IntegrationUpdateSerializer(BaseWriteIntegrationSerializer):
|
||||
credentials = IntegrationCredentialField(write_only=True, required=False)
|
||||
configuration = IntegrationConfigField(required=False)
|
||||
providers = serializers.ResourceRelatedField(
|
||||
queryset=Provider.objects.all(), many=True, required=False
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Integration
|
||||
fields = [
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"enabled",
|
||||
"connected",
|
||||
"connection_last_checked_at",
|
||||
"integration_type",
|
||||
"configuration",
|
||||
"credentials",
|
||||
"providers",
|
||||
]
|
||||
extra_kwargs = {
|
||||
"inserted_at": {"read_only": True},
|
||||
"updated_at": {"read_only": True},
|
||||
"connected": {"read_only": True},
|
||||
"connection_last_checked_at": {"read_only": True},
|
||||
"integration_type": {"read_only": True},
|
||||
}
|
||||
|
||||
def validate(self, attrs):
|
||||
integration_type = self.instance.integration_type
|
||||
providers = attrs.get("providers")
|
||||
configuration = attrs.get("configuration") or self.instance.configuration
|
||||
credentials = attrs.get("credentials") or self.instance.credentials
|
||||
|
||||
validated_attrs = super().validate(attrs)
|
||||
self.validate_integration_data(
|
||||
integration_type, providers, configuration, credentials
|
||||
)
|
||||
return validated_attrs
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
tenant_id = self.context.get("tenant_id")
|
||||
if validated_data.get("providers") is not None:
|
||||
instance.providers.clear()
|
||||
new_relationships = [
|
||||
IntegrationProviderRelationship(
|
||||
integration=instance, provider=provider, tenant_id=tenant_id
|
||||
)
|
||||
for provider in validated_data["providers"]
|
||||
]
|
||||
IntegrationProviderRelationship.objects.bulk_create(new_relationships)
|
||||
|
||||
return super().update(instance, validated_data)
|
||||
|
||||
@@ -3,28 +3,32 @@ from drf_spectacular.views import SpectacularRedocView
|
||||
from rest_framework_nested import routers
|
||||
|
||||
from api.v1.views import (
|
||||
ComplianceOverviewViewSet,
|
||||
CustomTokenObtainView,
|
||||
CustomTokenRefreshView,
|
||||
CustomTokenSwitchTenantView,
|
||||
FindingViewSet,
|
||||
MembershipViewSet,
|
||||
ProviderGroupViewSet,
|
||||
ProviderGroupProvidersRelationshipView,
|
||||
ProviderSecretViewSet,
|
||||
InvitationViewSet,
|
||||
GithubSocialLoginView,
|
||||
GoogleSocialLoginView,
|
||||
IntegrationViewSet,
|
||||
InvitationAcceptViewSet,
|
||||
RoleViewSet,
|
||||
RoleProviderGroupRelationshipView,
|
||||
UserRoleRelationshipView,
|
||||
InvitationViewSet,
|
||||
MembershipViewSet,
|
||||
OverviewViewSet,
|
||||
ComplianceOverviewViewSet,
|
||||
ProviderGroupProvidersRelationshipView,
|
||||
ProviderGroupViewSet,
|
||||
ProviderSecretViewSet,
|
||||
ProviderViewSet,
|
||||
ResourceViewSet,
|
||||
RoleProviderGroupRelationshipView,
|
||||
RoleViewSet,
|
||||
ScanViewSet,
|
||||
ScheduleViewSet,
|
||||
SchemaView,
|
||||
TaskViewSet,
|
||||
TenantMembersViewSet,
|
||||
TenantViewSet,
|
||||
UserRoleRelationshipView,
|
||||
UserViewSet,
|
||||
)
|
||||
|
||||
@@ -44,6 +48,7 @@ router.register(
|
||||
)
|
||||
router.register(r"overviews", OverviewViewSet, basename="overview")
|
||||
router.register(r"schedules", ScheduleViewSet, basename="schedule")
|
||||
router.register(r"integrations", IntegrationViewSet, basename="integration")
|
||||
|
||||
tenants_router = routers.NestedSimpleRouter(router, r"tenants", lookup="tenant")
|
||||
tenants_router.register(
|
||||
@@ -56,6 +61,7 @@ users_router.register(r"memberships", MembershipViewSet, basename="user-membersh
|
||||
urlpatterns = [
|
||||
path("tokens", CustomTokenObtainView.as_view(), name="token-obtain"),
|
||||
path("tokens/refresh", CustomTokenRefreshView.as_view(), name="token-refresh"),
|
||||
path("tokens/switch", CustomTokenSwitchTenantView.as_view(), name="token-switch"),
|
||||
path(
|
||||
"providers/secrets",
|
||||
ProviderSecretViewSet.as_view({"get": "list", "post": "create"}),
|
||||
@@ -106,6 +112,8 @@ urlpatterns = [
|
||||
),
|
||||
name="provider_group-providers-relationship",
|
||||
),
|
||||
path("tokens/google", GoogleSocialLoginView.as_view(), name="token-google"),
|
||||
path("tokens/github", GithubSocialLoginView.as_view(), name="token-github"),
|
||||
path("", include(router.urls)),
|
||||
path("", include(tenants_router.urls)),
|
||||
path("", include(users_router.urls)),
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,21 @@
|
||||
from celery import Celery, Task
|
||||
from config.env import env
|
||||
|
||||
BROKER_VISIBILITY_TIMEOUT = env.int("DJANGO_BROKER_VISIBILITY_TIMEOUT", default=86400)
|
||||
|
||||
celery_app = Celery("tasks")
|
||||
|
||||
celery_app.config_from_object("django.conf:settings", namespace="CELERY")
|
||||
celery_app.conf.update(result_extended=True, result_expires=None)
|
||||
|
||||
celery_app.conf.broker_transport_options = {
|
||||
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT
|
||||
}
|
||||
celery_app.conf.result_backend_transport_options = {
|
||||
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT
|
||||
}
|
||||
celery_app.conf.visibility_timeout = BROKER_VISIBILITY_TIMEOUT
|
||||
|
||||
celery_app.autodiscover_tasks(["api"])
|
||||
|
||||
|
||||
|
||||
@@ -2,9 +2,8 @@ import json
|
||||
import logging
|
||||
from enum import StrEnum
|
||||
|
||||
from django_guid.log_filters import CorrelationId
|
||||
|
||||
from config.env import env
|
||||
from django_guid.log_filters import CorrelationId
|
||||
|
||||
|
||||
class BackendLogger(StrEnum):
|
||||
@@ -39,9 +38,9 @@ class NDJSONFormatter(logging.Formatter):
|
||||
"funcName": record.funcName,
|
||||
"process": record.process,
|
||||
"thread": record.thread,
|
||||
"transaction_id": record.transaction_id
|
||||
if hasattr(record, "transaction_id")
|
||||
else None,
|
||||
"transaction_id": (
|
||||
record.transaction_id if hasattr(record, "transaction_id") else None
|
||||
),
|
||||
}
|
||||
|
||||
# Add REST API extra fields
|
||||
|
||||
@@ -4,6 +4,8 @@ from config.custom_logging import LOGGING # noqa
|
||||
from config.env import BASE_DIR, env # noqa
|
||||
from config.settings.celery import * # noqa
|
||||
from config.settings.partitions import * # noqa
|
||||
from config.settings.sentry import * # noqa
|
||||
from config.settings.social_login import * # noqa
|
||||
|
||||
SECRET_KEY = env("SECRET_KEY", default="secret")
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=False)
|
||||
@@ -29,6 +31,13 @@ INSTALLED_APPS = [
|
||||
"django_celery_results",
|
||||
"django_celery_beat",
|
||||
"rest_framework_simplejwt.token_blacklist",
|
||||
"allauth",
|
||||
"allauth.account",
|
||||
"allauth.socialaccount",
|
||||
"allauth.socialaccount.providers.google",
|
||||
"allauth.socialaccount.providers.github",
|
||||
"dj_rest_auth.registration",
|
||||
"rest_framework.authtoken",
|
||||
]
|
||||
|
||||
MIDDLEWARE = [
|
||||
@@ -42,8 +51,11 @@ MIDDLEWARE = [
|
||||
"django.contrib.messages.middleware.MessageMiddleware",
|
||||
"django.middleware.clickjacking.XFrameOptionsMiddleware",
|
||||
"api.middleware.APILoggingMiddleware",
|
||||
"allauth.account.middleware.AccountMiddleware",
|
||||
]
|
||||
|
||||
SITE_ID = 1
|
||||
|
||||
CORS_ALLOWED_ORIGINS = ["http://localhost", "http://127.0.0.1"]
|
||||
|
||||
ROOT_URLCONF = "config.urls"
|
||||
@@ -207,3 +219,20 @@ CACHE_STALE_WHILE_REVALIDATE = env.int("DJANGO_STALE_WHILE_REVALIDATE", 60)
|
||||
|
||||
|
||||
TESTING = False
|
||||
|
||||
FINDINGS_MAX_DAYS_IN_RANGE = env.int("DJANGO_FINDINGS_MAX_DAYS_IN_RANGE", 7)
|
||||
|
||||
|
||||
# API export settings
|
||||
DJANGO_TMP_OUTPUT_DIRECTORY = env.str(
|
||||
"DJANGO_TMP_OUTPUT_DIRECTORY", "/tmp/prowler_api_output"
|
||||
)
|
||||
DJANGO_FINDINGS_BATCH_SIZE = env.str("DJANGO_FINDINGS_BATCH_SIZE", 1000)
|
||||
|
||||
DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET = env.str("DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET", "")
|
||||
DJANGO_OUTPUT_S3_AWS_ACCESS_KEY_ID = env.str("DJANGO_OUTPUT_S3_AWS_ACCESS_KEY_ID", "")
|
||||
DJANGO_OUTPUT_S3_AWS_SECRET_ACCESS_KEY = env.str(
|
||||
"DJANGO_OUTPUT_S3_AWS_SECRET_ACCESS_KEY", ""
|
||||
)
|
||||
DJANGO_OUTPUT_S3_AWS_SESSION_TOKEN = env.str("DJANGO_OUTPUT_S3_AWS_SESSION_TOKEN", "")
|
||||
DJANGO_OUTPUT_S3_AWS_DEFAULT_REGION = env.str("DJANGO_OUTPUT_S3_AWS_DEFAULT_REGION", "")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=True)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["*"])
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=False)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=False)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
|
||||
|
||||
|
||||
88
api/src/backend/config/settings/sentry.py
Normal file
88
api/src/backend/config/settings/sentry.py
Normal file
@@ -0,0 +1,88 @@
|
||||
import sentry_sdk
|
||||
from config.env import env
|
||||
|
||||
IGNORED_EXCEPTIONS = [
|
||||
# Provider is not connected due to credentials errors
|
||||
"is not connected",
|
||||
# Authentication Errors from AWS
|
||||
"InvalidToken",
|
||||
"AccessDeniedException",
|
||||
"AuthorizationErrorException",
|
||||
"UnrecognizedClientException",
|
||||
"UnauthorizedOperation",
|
||||
"AuthFailure",
|
||||
"InvalidClientTokenId",
|
||||
"AccessDenied",
|
||||
"No Shodan API Key", # Shodan Check
|
||||
"RequestLimitExceeded", # For now we don't want to log the RequestLimitExceeded errors
|
||||
"ThrottlingException",
|
||||
"Rate exceeded",
|
||||
"SubscriptionRequiredException",
|
||||
"UnknownOperationException",
|
||||
"OptInRequired",
|
||||
"ReadTimeout",
|
||||
"LimitExceeded",
|
||||
"ConnectTimeoutError",
|
||||
"ExpiredToken",
|
||||
"IncompleteSignature",
|
||||
"RegionDisabledException",
|
||||
"TooManyRequestsException",
|
||||
"SignatureDoesNotMatch",
|
||||
"InvalidParameterValueException",
|
||||
"InvalidInputException",
|
||||
"ValidationException",
|
||||
"AWSSecretAccessKeyInvalidError",
|
||||
"InvalidAction",
|
||||
"Pool is closed", # The following comes from urllib3: eu-west-1 -- HTTPClientError[126]: An HTTP Client raised an unhandled exception: AWSHTTPSConnectionPool(host='hostname.s3.eu-west-1.amazonaws.com', port=443): Pool is closed.
|
||||
# Authentication Errors from GCP
|
||||
"ClientAuthenticationError",
|
||||
"AuthorizationFailed",
|
||||
"Reauthentication is needed",
|
||||
"Permission denied to get service",
|
||||
"API has not been used in project",
|
||||
"HttpError 404 when requesting",
|
||||
"GCPNoAccesibleProjectsError",
|
||||
# Authentication Errors from Azure
|
||||
"ClientAuthenticationError",
|
||||
"AuthorizationFailed",
|
||||
"Subscription Not Registered",
|
||||
"AzureNotValidClientIdError",
|
||||
"AzureNotValidClientSecretError",
|
||||
"AzureNotValidTenantIdError",
|
||||
"AzureTenantIdAndClientSecretNotBelongingToClientIdError",
|
||||
"AzureTenantIdAndClientIdNotBelongingToClientSecretError",
|
||||
"AzureClientIdAndClientSecretNotBelongingToTenantIdError",
|
||||
"AzureHTTPResponseError",
|
||||
"Error with credentials provided",
|
||||
]
|
||||
|
||||
|
||||
def before_send(event, hint):
|
||||
"""
|
||||
before_send handles the Sentry events in order to sent them or not
|
||||
"""
|
||||
# Ignore logs with the ignored_exceptions
|
||||
# https://docs.python.org/3/library/logging.html#logrecord-objects
|
||||
if "log_record" in hint:
|
||||
log_msg = hint["log_record"].msg
|
||||
log_lvl = hint["log_record"].levelno
|
||||
|
||||
# Handle Error events and discard the rest
|
||||
if log_lvl == 40 and any(ignored in log_msg for ignored in IGNORED_EXCEPTIONS):
|
||||
return
|
||||
return event
|
||||
|
||||
|
||||
sentry_sdk.init(
|
||||
dsn=env.str("DJANGO_SENTRY_DSN", ""),
|
||||
# Add data like request headers and IP for users,
|
||||
# see https://docs.sentry.io/platforms/python/data-management/data-collected/ for more info
|
||||
before_send=before_send,
|
||||
send_default_pii=True,
|
||||
_experiments={
|
||||
# Set continuous_profiling_auto_start to True
|
||||
# to automatically start the profiler on when
|
||||
# possible.
|
||||
"continuous_profiling_auto_start": True,
|
||||
},
|
||||
)
|
||||
53
api/src/backend/config/settings/social_login.py
Normal file
53
api/src/backend/config/settings/social_login.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from config.env import env
|
||||
|
||||
# Provider Oauth settings
|
||||
GOOGLE_OAUTH_CLIENT_ID = env("SOCIAL_GOOGLE_OAUTH_CLIENT_ID", default="")
|
||||
GOOGLE_OAUTH_CLIENT_SECRET = env("SOCIAL_GOOGLE_OAUTH_CLIENT_SECRET", default="")
|
||||
GOOGLE_OAUTH_CALLBACK_URL = env("SOCIAL_GOOGLE_OAUTH_CALLBACK_URL", default="")
|
||||
|
||||
GITHUB_OAUTH_CLIENT_ID = env("SOCIAL_GITHUB_OAUTH_CLIENT_ID", default="")
|
||||
GITHUB_OAUTH_CLIENT_SECRET = env("SOCIAL_GITHUB_OAUTH_CLIENT_SECRET", default="")
|
||||
GITHUB_OAUTH_CALLBACK_URL = env("SOCIAL_GITHUB_OAUTH_CALLBACK_URL", default="")
|
||||
|
||||
# Allauth settings
|
||||
ACCOUNT_LOGIN_METHODS = {"email"} # Use Email / Password authentication
|
||||
ACCOUNT_USERNAME_REQUIRED = False
|
||||
ACCOUNT_EMAIL_REQUIRED = True
|
||||
ACCOUNT_EMAIL_VERIFICATION = "none" # Do not require email confirmation
|
||||
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
|
||||
REST_AUTH = {
|
||||
"TOKEN_MODEL": None,
|
||||
"REST_USE_JWT": True,
|
||||
}
|
||||
# django-allauth (social)
|
||||
# Authenticate if local account with this email address already exists
|
||||
SOCIALACCOUNT_EMAIL_AUTHENTICATION = True
|
||||
# Connect local account and social account if local account with that email address already exists
|
||||
SOCIALACCOUNT_EMAIL_AUTHENTICATION_AUTO_CONNECT = True
|
||||
SOCIALACCOUNT_ADAPTER = "api.adapters.ProwlerSocialAccountAdapter"
|
||||
SOCIALACCOUNT_PROVIDERS = {
|
||||
"google": {
|
||||
"APP": {
|
||||
"client_id": GOOGLE_OAUTH_CLIENT_ID,
|
||||
"secret": GOOGLE_OAUTH_CLIENT_SECRET,
|
||||
"key": "",
|
||||
},
|
||||
"SCOPE": [
|
||||
"email",
|
||||
"profile",
|
||||
],
|
||||
"AUTH_PARAMS": {
|
||||
"access_type": "online",
|
||||
},
|
||||
},
|
||||
"github": {
|
||||
"APP": {
|
||||
"client_id": GITHUB_OAUTH_CLIENT_ID,
|
||||
"secret": GITHUB_OAUTH_CLIENT_SECRET,
|
||||
},
|
||||
"SCOPE": [
|
||||
"user",
|
||||
"read:org",
|
||||
],
|
||||
},
|
||||
}
|
||||
@@ -15,6 +15,8 @@ from api.db_utils import rls_transaction
|
||||
from api.models import (
|
||||
ComplianceOverview,
|
||||
Finding,
|
||||
Integration,
|
||||
IntegrationProviderRelationship,
|
||||
Invitation,
|
||||
Membership,
|
||||
Provider,
|
||||
@@ -88,16 +90,14 @@ def create_test_user(django_db_setup, django_db_blocker):
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def create_test_user_rbac(django_db_setup, django_db_blocker):
|
||||
def create_test_user_rbac(django_db_setup, django_db_blocker, tenants_fixture):
|
||||
with django_db_blocker.unblock():
|
||||
user = User.objects.create_user(
|
||||
name="testing",
|
||||
email="rbac@rbac.com",
|
||||
password=TEST_PASSWORD,
|
||||
)
|
||||
tenant = Tenant.objects.create(
|
||||
name="Tenant Test",
|
||||
)
|
||||
tenant = tenants_fixture[0]
|
||||
Membership.objects.create(
|
||||
user=user,
|
||||
tenant=tenant,
|
||||
@@ -123,16 +123,14 @@ def create_test_user_rbac(django_db_setup, django_db_blocker):
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def create_test_user_rbac_no_roles(django_db_setup, django_db_blocker):
|
||||
def create_test_user_rbac_no_roles(django_db_setup, django_db_blocker, tenants_fixture):
|
||||
with django_db_blocker.unblock():
|
||||
user = User.objects.create_user(
|
||||
name="testing",
|
||||
email="rbac_noroles@rbac.com",
|
||||
password=TEST_PASSWORD,
|
||||
)
|
||||
tenant = Tenant.objects.create(
|
||||
name="Tenant Test",
|
||||
)
|
||||
tenant = tenants_fixture[0]
|
||||
Membership.objects.create(
|
||||
user=user,
|
||||
tenant=tenant,
|
||||
@@ -180,10 +178,16 @@ def create_test_user_rbac_limited(django_db_setup, django_db_blocker):
|
||||
@pytest.fixture
|
||||
def authenticated_client_rbac(create_test_user_rbac, tenants_fixture, client):
|
||||
client.user = create_test_user_rbac
|
||||
tenant_id = tenants_fixture[0].id
|
||||
serializer = TokenSerializer(
|
||||
data={"type": "tokens", "email": "rbac@rbac.com", "password": TEST_PASSWORD}
|
||||
data={
|
||||
"type": "tokens",
|
||||
"email": "rbac@rbac.com",
|
||||
"password": TEST_PASSWORD,
|
||||
"tenant_id": tenant_id,
|
||||
}
|
||||
)
|
||||
serializer.is_valid()
|
||||
serializer.is_valid(raise_exception=True)
|
||||
access_token = serializer.validated_data["access"]
|
||||
client.defaults["HTTP_AUTHORIZATION"] = f"Bearer {access_token}"
|
||||
return client
|
||||
@@ -303,7 +307,7 @@ def set_user_admin_roles_fixture(create_test_user, tenants_fixture):
|
||||
@pytest.fixture
|
||||
def invitations_fixture(create_test_user, tenants_fixture):
|
||||
user = create_test_user
|
||||
*_, tenant = tenants_fixture
|
||||
tenant = tenants_fixture[0]
|
||||
valid_invitation = Invitation.objects.create(
|
||||
email="testing@prowler.com",
|
||||
state=Invitation.State.PENDING,
|
||||
@@ -393,6 +397,23 @@ def provider_groups_fixture(tenants_fixture):
|
||||
return pgroup1, pgroup2, pgroup3
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def admin_role_fixture(tenants_fixture):
|
||||
tenant, *_ = tenants_fixture
|
||||
|
||||
return Role.objects.get_or_create(
|
||||
name="admin",
|
||||
tenant_id=tenant.id,
|
||||
manage_users=True,
|
||||
manage_account=True,
|
||||
manage_billing=True,
|
||||
manage_providers=True,
|
||||
manage_integrations=True,
|
||||
manage_scans=True,
|
||||
unlimited_visibility=True,
|
||||
)[0]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def roles_fixture(tenants_fixture):
|
||||
tenant, *_ = tenants_fixture
|
||||
@@ -467,7 +488,7 @@ def scans_fixture(tenants_fixture, providers_fixture):
|
||||
name="Scan 1",
|
||||
provider=provider,
|
||||
trigger=Scan.TriggerChoices.MANUAL,
|
||||
state=StateChoices.AVAILABLE,
|
||||
state=StateChoices.COMPLETED,
|
||||
tenant_id=tenant.id,
|
||||
started_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
@@ -607,6 +628,7 @@ def findings_fixture(scans_fixture, resources_fixture):
|
||||
"CheckId": "test_check_id",
|
||||
"Description": "test description apple sauce",
|
||||
},
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
|
||||
finding1.add_resources([resource1])
|
||||
@@ -632,6 +654,7 @@ def findings_fixture(scans_fixture, resources_fixture):
|
||||
"CheckId": "test_check_id",
|
||||
"Description": "test description orange juice",
|
||||
},
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
|
||||
finding2.add_resources([resource2])
|
||||
@@ -856,6 +879,46 @@ def scan_summaries_fixture(tenants_fixture, providers_fixture):
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def integrations_fixture(providers_fixture):
|
||||
provider1, provider2, *_ = providers_fixture
|
||||
tenant_id = provider1.tenant_id
|
||||
integration1 = Integration.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
enabled=True,
|
||||
connected=True,
|
||||
integration_type="amazon_s3",
|
||||
configuration={"key": "value"},
|
||||
credentials={"psswd": "1234"},
|
||||
)
|
||||
IntegrationProviderRelationship.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
integration=integration1,
|
||||
provider=provider1,
|
||||
)
|
||||
|
||||
integration2 = Integration.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
enabled=True,
|
||||
connected=True,
|
||||
integration_type="amazon_s3",
|
||||
configuration={"key": "value"},
|
||||
credentials={"psswd": "1234"},
|
||||
)
|
||||
IntegrationProviderRelationship.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
integration=integration2,
|
||||
provider=provider1,
|
||||
)
|
||||
IntegrationProviderRelationship.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
integration=integration2,
|
||||
provider=provider2,
|
||||
)
|
||||
|
||||
return integration1, integration2
|
||||
|
||||
|
||||
def get_authorization_header(access_token: str) -> dict:
|
||||
return {"Authorization": f"Bearer {access_token}"}
|
||||
|
||||
|
||||
@@ -5,10 +5,14 @@ from django_celery_beat.models import IntervalSchedule, PeriodicTask
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
from tasks.tasks import perform_scheduled_scan_task
|
||||
|
||||
from api.models import Provider
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Provider, Scan, StateChoices
|
||||
|
||||
|
||||
def schedule_provider_scan(provider_instance: Provider):
|
||||
tenant_id = str(provider_instance.tenant_id)
|
||||
provider_id = str(provider_instance.id)
|
||||
|
||||
schedule, _ = IntervalSchedule.objects.get_or_create(
|
||||
every=24,
|
||||
period=IntervalSchedule.HOURS,
|
||||
@@ -17,23 +21,9 @@ def schedule_provider_scan(provider_instance: Provider):
|
||||
# Create a unique name for the periodic task
|
||||
task_name = f"scan-perform-scheduled-{provider_instance.id}"
|
||||
|
||||
# Schedule the task
|
||||
_, created = PeriodicTask.objects.get_or_create(
|
||||
interval=schedule,
|
||||
name=task_name,
|
||||
task="scan-perform-scheduled",
|
||||
kwargs=json.dumps(
|
||||
{
|
||||
"tenant_id": str(provider_instance.tenant_id),
|
||||
"provider_id": str(provider_instance.id),
|
||||
}
|
||||
),
|
||||
one_off=False,
|
||||
defaults={
|
||||
"start_time": datetime.now(timezone.utc) + timedelta(hours=24),
|
||||
},
|
||||
)
|
||||
if not created:
|
||||
if PeriodicTask.objects.filter(
|
||||
interval=schedule, name=task_name, task="scan-perform-scheduled"
|
||||
).exists():
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
@@ -45,9 +35,36 @@ def schedule_provider_scan(provider_instance: Provider):
|
||||
]
|
||||
)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
scheduled_scan = Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
state=StateChoices.AVAILABLE,
|
||||
scheduled_at=datetime.now(timezone.utc),
|
||||
)
|
||||
|
||||
# Schedule the task
|
||||
periodic_task_instance = PeriodicTask.objects.create(
|
||||
interval=schedule,
|
||||
name=task_name,
|
||||
task="scan-perform-scheduled",
|
||||
kwargs=json.dumps(
|
||||
{
|
||||
"tenant_id": tenant_id,
|
||||
"provider_id": provider_id,
|
||||
}
|
||||
),
|
||||
one_off=False,
|
||||
start_time=datetime.now(timezone.utc) + timedelta(hours=24),
|
||||
)
|
||||
scheduled_scan.scheduler_task_id = periodic_task_instance.id
|
||||
scheduled_scan.save()
|
||||
|
||||
return perform_scheduled_scan_task.apply_async(
|
||||
kwargs={
|
||||
"tenant_id": str(provider_instance.tenant_id),
|
||||
"provider_id": str(provider_instance.id),
|
||||
"provider_id": provider_id,
|
||||
},
|
||||
)
|
||||
|
||||
156
api/src/backend/tasks/jobs/export.py
Normal file
156
api/src/backend/tasks/jobs/export.py
Normal file
@@ -0,0 +1,156 @@
|
||||
import os
|
||||
import zipfile
|
||||
|
||||
import boto3
|
||||
import config.django.base as base
|
||||
from botocore.exceptions import ClientError, NoCredentialsError, ParamValidationError
|
||||
from celery.utils.log import get_task_logger
|
||||
from django.conf import settings
|
||||
|
||||
from prowler.config.config import (
|
||||
csv_file_suffix,
|
||||
html_file_suffix,
|
||||
json_ocsf_file_suffix,
|
||||
output_file_timestamp,
|
||||
)
|
||||
from prowler.lib.outputs.csv.csv import CSV
|
||||
from prowler.lib.outputs.html.html import HTML
|
||||
from prowler.lib.outputs.ocsf.ocsf import OCSF
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
# Predefined mapping for output formats and their configurations
|
||||
OUTPUT_FORMATS_MAPPING = {
|
||||
"csv": {
|
||||
"class": CSV,
|
||||
"suffix": csv_file_suffix,
|
||||
"kwargs": {},
|
||||
},
|
||||
"json-ocsf": {"class": OCSF, "suffix": json_ocsf_file_suffix, "kwargs": {}},
|
||||
"html": {"class": HTML, "suffix": html_file_suffix, "kwargs": {"stats": {}}},
|
||||
}
|
||||
|
||||
|
||||
def _compress_output_files(output_directory: str) -> str:
|
||||
"""
|
||||
Compress output files from all configured output formats into a ZIP archive.
|
||||
Args:
|
||||
output_directory (str): The directory where the output files are located.
|
||||
The function looks up all known suffixes in OUTPUT_FORMATS_MAPPING
|
||||
and compresses those files into a single ZIP.
|
||||
Returns:
|
||||
str: The full path to the newly created ZIP archive.
|
||||
"""
|
||||
zip_path = f"{output_directory}.zip"
|
||||
|
||||
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
|
||||
for suffix in [config["suffix"] for config in OUTPUT_FORMATS_MAPPING.values()]:
|
||||
zipf.write(
|
||||
f"{output_directory}{suffix}",
|
||||
f"output/{output_directory.split('/')[-1]}{suffix}",
|
||||
)
|
||||
|
||||
return zip_path
|
||||
|
||||
|
||||
def get_s3_client():
|
||||
"""
|
||||
Create and return a boto3 S3 client using AWS credentials from environment variables.
|
||||
|
||||
This function attempts to initialize an S3 client by reading the AWS access key, secret key,
|
||||
session token, and region from environment variables. It then validates the client by listing
|
||||
available S3 buckets. If an error occurs during this process (for example, due to missing or
|
||||
invalid credentials), it falls back to creating an S3 client without explicitly provided credentials,
|
||||
which may rely on other configuration sources (e.g., IAM roles).
|
||||
|
||||
Returns:
|
||||
boto3.client: A configured S3 client instance.
|
||||
|
||||
Raises:
|
||||
ClientError, NoCredentialsError, or ParamValidationError if both attempts to create a client fail.
|
||||
"""
|
||||
s3_client = None
|
||||
try:
|
||||
s3_client = boto3.client(
|
||||
"s3",
|
||||
aws_access_key_id=settings.DJANGO_OUTPUT_S3_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.DJANGO_OUTPUT_S3_AWS_SECRET_ACCESS_KEY,
|
||||
aws_session_token=settings.DJANGO_OUTPUT_S3_AWS_SESSION_TOKEN,
|
||||
region_name=settings.DJANGO_OUTPUT_S3_AWS_DEFAULT_REGION,
|
||||
)
|
||||
s3_client.list_buckets()
|
||||
except (ClientError, NoCredentialsError, ParamValidationError, ValueError):
|
||||
s3_client = boto3.client("s3")
|
||||
s3_client.list_buckets()
|
||||
|
||||
return s3_client
|
||||
|
||||
|
||||
def _upload_to_s3(tenant_id: str, zip_path: str, scan_id: str) -> str:
|
||||
"""
|
||||
Upload the specified ZIP file to an S3 bucket.
|
||||
If the S3 bucket environment variables are not configured,
|
||||
the function returns None without performing an upload.
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier, used as part of the S3 key prefix.
|
||||
zip_path (str): The local file system path to the ZIP file to be uploaded.
|
||||
scan_id (str): The scan identifier, used as part of the S3 key prefix.
|
||||
Returns:
|
||||
str: The S3 URI of the uploaded file (e.g., "s3://<bucket>/<key>") if successful.
|
||||
None: If the required environment variables for the S3 bucket are not set.
|
||||
Raises:
|
||||
botocore.exceptions.ClientError: If the upload attempt to S3 fails for any reason.
|
||||
"""
|
||||
if not base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET:
|
||||
return
|
||||
|
||||
try:
|
||||
s3 = get_s3_client()
|
||||
s3_key = f"{tenant_id}/{scan_id}/{os.path.basename(zip_path)}"
|
||||
s3.upload_file(
|
||||
Filename=zip_path,
|
||||
Bucket=base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET,
|
||||
Key=s3_key,
|
||||
)
|
||||
return f"s3://{base.DJANGO_OUTPUT_S3_AWS_OUTPUT_BUCKET}/{s3_key}"
|
||||
except (ClientError, NoCredentialsError, ParamValidationError, ValueError) as e:
|
||||
logger.error(f"S3 upload failed: {str(e)}")
|
||||
|
||||
|
||||
def _generate_output_directory(
|
||||
output_directory, prowler_provider: object, tenant_id: str, scan_id: str
|
||||
) -> str:
|
||||
"""
|
||||
Generate a file system path for the output directory of a prowler scan.
|
||||
|
||||
This function constructs the output directory path by combining a base
|
||||
temporary output directory, the tenant ID, the scan ID, and details about
|
||||
the prowler provider along with a timestamp. The resulting path is used to
|
||||
store the output files of a prowler scan.
|
||||
|
||||
Note:
|
||||
This function depends on one external variable:
|
||||
- `output_file_timestamp`: A timestamp (as a string) used to uniquely identify the output.
|
||||
|
||||
Args:
|
||||
output_directory (str): The base output directory.
|
||||
prowler_provider (object): An identifier or descriptor for the prowler provider.
|
||||
Typically, this is a string indicating the provider (e.g., "aws").
|
||||
tenant_id (str): The unique identifier for the tenant.
|
||||
scan_id (str): The unique identifier for the scan.
|
||||
|
||||
Returns:
|
||||
str: The constructed file system path for the prowler scan output directory.
|
||||
|
||||
Example:
|
||||
>>> _generate_output_directory("/tmp", "aws", "tenant-1234", "scan-5678")
|
||||
'/tmp/tenant-1234/aws/scan-5678/prowler-output-2023-02-15T12:34:56'
|
||||
"""
|
||||
path = (
|
||||
f"{output_directory}/{tenant_id}/{scan_id}/prowler-output-"
|
||||
f"{prowler_provider}-{output_file_timestamp}"
|
||||
)
|
||||
os.makedirs("/".join(path.split("/")[:-1]), exist_ok=True)
|
||||
|
||||
return path
|
||||
@@ -116,7 +116,6 @@ def perform_prowler_scan(
|
||||
ValueError: If the provider cannot be connected.
|
||||
|
||||
"""
|
||||
generate_compliance = False
|
||||
check_status_by_region = {}
|
||||
exception = None
|
||||
unique_resources = set()
|
||||
@@ -145,7 +144,6 @@ def perform_prowler_scan(
|
||||
)
|
||||
provider_instance.save()
|
||||
|
||||
generate_compliance = provider_instance.provider != Provider.ProviderChoices.GCP
|
||||
prowler_scan = ProwlerScan(provider=prowler_provider, checks=checks_to_execute)
|
||||
|
||||
resource_cache = {}
|
||||
@@ -154,6 +152,9 @@ def perform_prowler_scan(
|
||||
|
||||
for progress, findings in prowler_scan.scan():
|
||||
for finding in findings:
|
||||
if finding is None:
|
||||
logger.error(f"None finding detected on scan {scan_id}.")
|
||||
continue
|
||||
for attempt in range(CELERY_DEADLOCK_ATTEMPTS):
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
@@ -178,7 +179,10 @@ def perform_prowler_scan(
|
||||
|
||||
# Update resource fields if necessary
|
||||
updated_fields = []
|
||||
if resource_instance.region != finding.region:
|
||||
if (
|
||||
finding.region
|
||||
and resource_instance.region != finding.region
|
||||
):
|
||||
resource_instance.region = finding.region
|
||||
updated_fields.append("region")
|
||||
if resource_instance.service != finding.service_name:
|
||||
@@ -221,24 +225,33 @@ def perform_prowler_scan(
|
||||
# Process finding
|
||||
with rls_transaction(tenant_id):
|
||||
finding_uid = finding.uid
|
||||
last_first_seen_at = None
|
||||
if finding_uid not in last_status_cache:
|
||||
most_recent_finding = (
|
||||
Finding.objects.filter(uid=finding_uid)
|
||||
.order_by("-id")
|
||||
.values("status")
|
||||
Finding.all_objects.filter(
|
||||
tenant_id=tenant_id, uid=finding_uid
|
||||
)
|
||||
.order_by("-inserted_at")
|
||||
.values("status", "first_seen_at")
|
||||
.first()
|
||||
)
|
||||
last_status = (
|
||||
most_recent_finding["status"]
|
||||
if most_recent_finding
|
||||
else None
|
||||
)
|
||||
last_status_cache[finding_uid] = last_status
|
||||
last_status = None
|
||||
if most_recent_finding:
|
||||
last_status = most_recent_finding["status"]
|
||||
last_first_seen_at = most_recent_finding["first_seen_at"]
|
||||
last_status_cache[finding_uid] = last_status, last_first_seen_at
|
||||
else:
|
||||
last_status = last_status_cache[finding_uid]
|
||||
last_status, last_first_seen_at = last_status_cache[finding_uid]
|
||||
|
||||
status = FindingStatus[finding.status]
|
||||
delta = _create_finding_delta(last_status, status)
|
||||
# For the findings prior to the change, when a first finding is found with delta!="new" it will be
|
||||
# assigned a current date as first_seen_at and the successive findings with the same UID will
|
||||
# always get the date of the previous finding.
|
||||
# For new findings, when a finding (delta="new") is found for the first time, the first_seen_at
|
||||
# attribute will be assigned the current date, the following findings will get that date.
|
||||
if not last_first_seen_at:
|
||||
last_first_seen_at = datetime.now(tz=timezone.utc)
|
||||
|
||||
# Create the finding
|
||||
finding_instance = Finding.objects.create(
|
||||
@@ -253,11 +266,12 @@ def perform_prowler_scan(
|
||||
raw_result=finding.raw,
|
||||
check_id=finding.check_id,
|
||||
scan=scan_instance,
|
||||
first_seen_at=last_first_seen_at,
|
||||
)
|
||||
finding_instance.add_resources([resource_instance])
|
||||
|
||||
# Update compliance data if applicable
|
||||
if not generate_compliance or finding.status.value == "MUTED":
|
||||
if finding.status.value == "MUTED":
|
||||
continue
|
||||
|
||||
region_dict = check_status_by_region.setdefault(finding.region, {})
|
||||
@@ -285,7 +299,7 @@ def perform_prowler_scan(
|
||||
scan_instance.unique_resource_count = len(unique_resources)
|
||||
scan_instance.save()
|
||||
|
||||
if exception is None and generate_compliance:
|
||||
if exception is None:
|
||||
try:
|
||||
regions = prowler_provider.get_regions()
|
||||
except AttributeError:
|
||||
@@ -330,9 +344,18 @@ def perform_prowler_scan(
|
||||
total_requirements=compliance["total_requirements"],
|
||||
)
|
||||
)
|
||||
with rls_transaction(tenant_id):
|
||||
ComplianceOverview.objects.bulk_create(compliance_overview_objects)
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
ComplianceOverview.objects.bulk_create(
|
||||
compliance_overview_objects, batch_size=100
|
||||
)
|
||||
except Exception as overview_exception:
|
||||
import sentry_sdk
|
||||
|
||||
sentry_sdk.capture_exception(overview_exception)
|
||||
logger.error(
|
||||
f"Error storing compliance overview for scan {scan_id}: {overview_exception}"
|
||||
)
|
||||
if exception is not None:
|
||||
raise exception
|
||||
|
||||
@@ -369,7 +392,7 @@ def aggregate_findings(tenant_id: str, scan_id: str):
|
||||
- muted_changed: Muted findings with a delta of 'changed'.
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
findings = Finding.objects.filter(scan_id=scan_id)
|
||||
findings = Finding.objects.filter(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
aggregation = findings.values(
|
||||
"check_id",
|
||||
|
||||
@@ -1,15 +1,29 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from shutil import rmtree
|
||||
|
||||
from celery import shared_task
|
||||
from celery import chain, shared_task
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.celery import RLSTask
|
||||
from config.django.base import DJANGO_FINDINGS_BATCH_SIZE, DJANGO_TMP_OUTPUT_DIRECTORY
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from tasks.jobs.connection import check_provider_connection
|
||||
from tasks.jobs.deletion import delete_provider, delete_tenant
|
||||
from tasks.jobs.export import (
|
||||
OUTPUT_FORMATS_MAPPING,
|
||||
_compress_output_files,
|
||||
_generate_output_directory,
|
||||
_upload_to_s3,
|
||||
)
|
||||
from tasks.jobs.scan import aggregate_findings, perform_prowler_scan
|
||||
from tasks.utils import batched, get_next_execution_datetime
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.decorators import set_tenant
|
||||
from api.models import Provider, Scan
|
||||
from api.models import Finding, Provider, Scan, ScanSummary, StateChoices
|
||||
from api.utils import initialize_prowler_provider
|
||||
from prowler.lib.outputs.finding import Finding as FindingOutput
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="provider-connection-check")
|
||||
@@ -29,7 +43,7 @@ def check_provider_connection_task(provider_id: str):
|
||||
return check_provider_connection(provider_id=provider_id)
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="provider-deletion")
|
||||
@shared_task(base=RLSTask, name="provider-deletion", queue="deletion")
|
||||
@set_tenant
|
||||
def delete_provider_task(provider_id: str):
|
||||
"""
|
||||
@@ -69,13 +83,22 @@ def perform_scan_task(
|
||||
Returns:
|
||||
dict: The result of the scan execution, typically including the status and results of the performed checks.
|
||||
"""
|
||||
return perform_prowler_scan(
|
||||
result = perform_prowler_scan(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
provider_id=provider_id,
|
||||
checks_to_execute=checks_to_execute,
|
||||
)
|
||||
|
||||
chain(
|
||||
perform_scan_summary_task.si(tenant_id, scan_id),
|
||||
generate_outputs.si(
|
||||
scan_id=scan_id, provider_id=provider_id, tenant_id=tenant_id
|
||||
),
|
||||
).apply_async()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, bind=True, name="scan-perform-scheduled", queue="scans")
|
||||
def perform_scheduled_scan_task(self, tenant_id: str, provider_id: str):
|
||||
@@ -100,34 +123,49 @@ def perform_scheduled_scan_task(self, tenant_id: str, provider_id: str):
|
||||
task_id = self.request.id
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
provider_instance = Provider.objects.get(pk=provider_id)
|
||||
periodic_task_instance = PeriodicTask.objects.get(
|
||||
name=f"scan-perform-scheduled-{provider_id}"
|
||||
)
|
||||
next_scan_date = datetime.combine(
|
||||
datetime.now(timezone.utc), periodic_task_instance.start_time.time()
|
||||
) + timedelta(hours=24)
|
||||
|
||||
scan_instance = Scan.objects.create(
|
||||
next_scan_datetime = get_next_execution_datetime(task_id, provider_id)
|
||||
scan_instance, _ = Scan.objects.get_or_create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider=provider_instance,
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
next_scan_at=next_scan_date,
|
||||
task_id=task_id,
|
||||
state__in=(StateChoices.SCHEDULED, StateChoices.AVAILABLE),
|
||||
scheduler_task_id=periodic_task_instance.id,
|
||||
defaults={"state": StateChoices.SCHEDULED},
|
||||
)
|
||||
|
||||
result = perform_prowler_scan(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=str(scan_instance.id),
|
||||
provider_id=provider_id,
|
||||
)
|
||||
perform_scan_summary_task.apply_async(
|
||||
kwargs={
|
||||
"tenant_id": tenant_id,
|
||||
"scan_id": str(scan_instance.id),
|
||||
}
|
||||
)
|
||||
scan_instance.task_id = task_id
|
||||
scan_instance.save()
|
||||
|
||||
try:
|
||||
result = perform_prowler_scan(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=str(scan_instance.id),
|
||||
provider_id=provider_id,
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
finally:
|
||||
with rls_transaction(tenant_id):
|
||||
Scan.objects.get_or_create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
state=StateChoices.SCHEDULED,
|
||||
scheduled_at=next_scan_datetime,
|
||||
scheduler_task_id=periodic_task_instance.id,
|
||||
)
|
||||
|
||||
chain(
|
||||
perform_scan_summary_task.si(tenant_id, scan_instance.id),
|
||||
generate_outputs.si(
|
||||
scan_id=str(scan_instance.id), provider_id=provider_id, tenant_id=tenant_id
|
||||
),
|
||||
).apply_async()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -136,6 +174,111 @@ def perform_scan_summary_task(tenant_id: str, scan_id: str):
|
||||
return aggregate_findings(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
|
||||
@shared_task(name="tenant-deletion")
|
||||
@shared_task(name="tenant-deletion", queue="deletion")
|
||||
def delete_tenant_task(tenant_id: str):
|
||||
return delete_tenant(pk=tenant_id)
|
||||
|
||||
|
||||
@shared_task(
|
||||
base=RLSTask,
|
||||
name="scan-report",
|
||||
queue="scan-reports",
|
||||
)
|
||||
@set_tenant(keep_tenant=True)
|
||||
def generate_outputs(scan_id: str, provider_id: str, tenant_id: str):
|
||||
"""
|
||||
Process findings in batches and generate output files in multiple formats.
|
||||
|
||||
This function retrieves findings associated with a scan, processes them
|
||||
in batches of 50, and writes each batch to the corresponding output files.
|
||||
It reuses output writer instances across batches, updates them with each
|
||||
batch of transformed findings, and uses a flag to indicate when the final
|
||||
batch is being processed. Finally, the output files are compressed and
|
||||
uploaded to S3.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier.
|
||||
scan_id (str): The scan identifier.
|
||||
provider_id (str): The provider_id id to be used in generating outputs.
|
||||
"""
|
||||
# Initialize the prowler provider
|
||||
prowler_provider = initialize_prowler_provider(Provider.objects.get(id=provider_id))
|
||||
|
||||
# Get the provider UID
|
||||
provider_uid = Provider.objects.get(id=provider_id).uid
|
||||
|
||||
# Generate and ensure the output directory exists
|
||||
output_directory = _generate_output_directory(
|
||||
DJANGO_TMP_OUTPUT_DIRECTORY, provider_uid, tenant_id, scan_id
|
||||
)
|
||||
|
||||
# Define auxiliary variables
|
||||
output_writers = {}
|
||||
scan_summary = FindingOutput._transform_findings_stats(
|
||||
ScanSummary.objects.filter(scan_id=scan_id)
|
||||
)
|
||||
|
||||
# Retrieve findings queryset
|
||||
findings_qs = Finding.all_objects.filter(scan_id=scan_id).order_by("uid")
|
||||
|
||||
# Process findings in batches
|
||||
for batch, is_last_batch in batched(
|
||||
findings_qs.iterator(), DJANGO_FINDINGS_BATCH_SIZE
|
||||
):
|
||||
finding_outputs = [
|
||||
FindingOutput.transform_api_finding(finding, prowler_provider)
|
||||
for finding in batch
|
||||
]
|
||||
|
||||
# Generate output files
|
||||
for mode, config in OUTPUT_FORMATS_MAPPING.items():
|
||||
kwargs = dict(config.get("kwargs", {}))
|
||||
if mode == "html":
|
||||
kwargs["provider"] = prowler_provider
|
||||
kwargs["stats"] = scan_summary
|
||||
|
||||
writer_class = config["class"]
|
||||
if writer_class in output_writers:
|
||||
writer = output_writers[writer_class]
|
||||
writer.transform(finding_outputs)
|
||||
writer.close_file = is_last_batch
|
||||
else:
|
||||
writer = writer_class(
|
||||
findings=finding_outputs,
|
||||
file_path=output_directory,
|
||||
file_extension=config["suffix"],
|
||||
from_cli=False,
|
||||
)
|
||||
writer.close_file = is_last_batch
|
||||
output_writers[writer_class] = writer
|
||||
|
||||
# Write the current batch using the writer
|
||||
writer.batch_write_data_to_file(**kwargs)
|
||||
|
||||
# TODO: Refactor the output classes to avoid this manual reset
|
||||
writer._data = []
|
||||
|
||||
# Compress output files
|
||||
output_directory = _compress_output_files(output_directory)
|
||||
|
||||
# Save to configured storage
|
||||
uploaded = _upload_to_s3(tenant_id, output_directory, scan_id)
|
||||
|
||||
if uploaded:
|
||||
# Remove the local files after upload
|
||||
try:
|
||||
rmtree(Path(output_directory).parent, ignore_errors=True)
|
||||
except FileNotFoundError as e:
|
||||
logger.error(f"Error deleting output files: {e}")
|
||||
|
||||
output_directory = uploaded
|
||||
uploaded = True
|
||||
else:
|
||||
uploaded = False
|
||||
|
||||
# Update the scan instance with the output path
|
||||
Scan.all_objects.filter(id=scan_id).update(output_location=output_directory)
|
||||
|
||||
logger.info(f"Scan output files generated, output location: {output_directory}")
|
||||
|
||||
return {"upload": uploaded}
|
||||
|
||||
@@ -6,6 +6,8 @@ from django_celery_beat.models import IntervalSchedule, PeriodicTask
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
from tasks.beat import schedule_provider_scan
|
||||
|
||||
from api.models import Scan
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestScheduleProviderScan:
|
||||
@@ -15,9 +17,11 @@ class TestScheduleProviderScan:
|
||||
with patch(
|
||||
"tasks.tasks.perform_scheduled_scan_task.apply_async"
|
||||
) as mock_apply_async:
|
||||
assert Scan.all_objects.count() == 0
|
||||
result = schedule_provider_scan(provider_instance)
|
||||
|
||||
assert result is not None
|
||||
assert Scan.all_objects.count() == 1
|
||||
|
||||
mock_apply_async.assert_called_once_with(
|
||||
kwargs={
|
||||
|
||||
102
api/src/backend/tasks/tests/test_utils.py
Normal file
102
api/src/backend/tasks/tests/test_utils.py
Normal file
@@ -0,0 +1,102 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from django_celery_beat.models import IntervalSchedule, PeriodicTask
|
||||
from django_celery_results.models import TaskResult
|
||||
from tasks.utils import batched, get_next_execution_datetime
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestGetNextExecutionDatetime:
|
||||
@pytest.fixture
|
||||
def setup_periodic_task(self, db):
|
||||
# Create a periodic task with an hourly interval
|
||||
interval = IntervalSchedule.objects.create(
|
||||
every=1, period=IntervalSchedule.HOURS
|
||||
)
|
||||
periodic_task = PeriodicTask.objects.create(
|
||||
name="scan-perform-scheduled-123",
|
||||
task="scan-perform-scheduled",
|
||||
interval=interval,
|
||||
)
|
||||
return periodic_task
|
||||
|
||||
@pytest.fixture
|
||||
def setup_task_result(self, db):
|
||||
# Create a task result record
|
||||
task_result = TaskResult.objects.create(
|
||||
task_id="abc123",
|
||||
task_name="scan-perform-scheduled",
|
||||
status="SUCCESS",
|
||||
date_created=datetime.now(timezone.utc) - timedelta(hours=1),
|
||||
result="Success",
|
||||
)
|
||||
return task_result
|
||||
|
||||
def test_get_next_execution_datetime_success(
|
||||
self, setup_task_result, setup_periodic_task
|
||||
):
|
||||
task_result = setup_task_result
|
||||
periodic_task = setup_periodic_task
|
||||
|
||||
# Mock periodic_task_name on TaskResult
|
||||
with patch.object(
|
||||
TaskResult, "periodic_task_name", return_value=periodic_task.name
|
||||
):
|
||||
next_execution = get_next_execution_datetime(
|
||||
task_id=task_result.task_id, provider_id="123"
|
||||
)
|
||||
|
||||
expected_time = task_result.date_created + timedelta(hours=1)
|
||||
assert next_execution == expected_time
|
||||
|
||||
def test_get_next_execution_datetime_fallback_to_provider_id(
|
||||
self, setup_task_result, setup_periodic_task
|
||||
):
|
||||
task_result = setup_task_result
|
||||
|
||||
# Simulate the case where `periodic_task_name` is missing
|
||||
with patch.object(TaskResult, "periodic_task_name", return_value=None):
|
||||
next_execution = get_next_execution_datetime(
|
||||
task_id=task_result.task_id, provider_id="123"
|
||||
)
|
||||
|
||||
expected_time = task_result.date_created + timedelta(hours=1)
|
||||
assert next_execution == expected_time
|
||||
|
||||
def test_get_next_execution_datetime_periodic_task_does_not_exist(
|
||||
self, setup_task_result
|
||||
):
|
||||
task_result = setup_task_result
|
||||
|
||||
with pytest.raises(PeriodicTask.DoesNotExist):
|
||||
get_next_execution_datetime(
|
||||
task_id=task_result.task_id, provider_id="nonexistent"
|
||||
)
|
||||
|
||||
|
||||
class TestBatchedFunction:
|
||||
def test_empty_iterable(self):
|
||||
result = list(batched([], 3))
|
||||
assert result == [([], True)]
|
||||
|
||||
def test_exact_batches(self):
|
||||
result = list(batched([1, 2, 3, 4], 2))
|
||||
expected = [([1, 2], False), ([3, 4], False), ([], True)]
|
||||
assert result == expected
|
||||
|
||||
def test_inexact_batches(self):
|
||||
result = list(batched([1, 2, 3, 4, 5], 2))
|
||||
expected = [([1, 2], False), ([3, 4], False), ([5], True)]
|
||||
assert result == expected
|
||||
|
||||
def test_batch_size_one(self):
|
||||
result = list(batched([1, 2, 3], 1))
|
||||
expected = [([1], False), ([2], False), ([3], False), ([], True)]
|
||||
assert result == expected
|
||||
|
||||
def test_batch_size_greater_than_length(self):
|
||||
result = list(batched([1, 2, 3], 5))
|
||||
expected = [([1, 2, 3], True)]
|
||||
assert result == expected
|
||||
50
api/src/backend/tasks/utils.py
Normal file
50
api/src/backend/tasks/utils.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from django_celery_results.models import TaskResult
|
||||
|
||||
|
||||
def get_next_execution_datetime(task_id: int, provider_id: str) -> datetime:
|
||||
task_instance = TaskResult.objects.get(task_id=task_id)
|
||||
try:
|
||||
periodic_task_instance = PeriodicTask.objects.get(
|
||||
name=task_instance.periodic_task_name
|
||||
)
|
||||
except PeriodicTask.DoesNotExist:
|
||||
periodic_task_instance = PeriodicTask.objects.get(
|
||||
name=f"scan-perform-scheduled-{provider_id}"
|
||||
)
|
||||
|
||||
interval = periodic_task_instance.interval
|
||||
|
||||
current_scheduled_time = datetime.combine(
|
||||
datetime.now(timezone.utc).date(),
|
||||
task_instance.date_created.time(),
|
||||
tzinfo=timezone.utc,
|
||||
)
|
||||
|
||||
return current_scheduled_time + timedelta(**{interval.period: interval.every})
|
||||
|
||||
|
||||
def batched(iterable, batch_size):
|
||||
"""
|
||||
Yield successive batches from an iterable.
|
||||
|
||||
Args:
|
||||
iterable: An iterable source of items.
|
||||
batch_size (int): The number of items per batch.
|
||||
|
||||
Yields:
|
||||
tuple: A pair (batch, is_last_batch) where:
|
||||
- batch (list): A list of items (with length equal to batch_size,
|
||||
except possibly for the last batch).
|
||||
- is_last_batch (bool): True if this is the final batch, False otherwise.
|
||||
"""
|
||||
batch = []
|
||||
for item in iterable:
|
||||
batch.append(item)
|
||||
if len(batch) == batch_size:
|
||||
yield batch, False
|
||||
batch = []
|
||||
|
||||
yield batch, True
|
||||
301
contrib/aws/aws-sso-docker/readme.md
Normal file
301
contrib/aws/aws-sso-docker/readme.md
Normal file
@@ -0,0 +1,301 @@
|
||||
# AWS SSO to Prowler Automation Script
|
||||
|
||||
## Table of Contents
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Setup](#setup)
|
||||
- [Script Overview](#script-overview)
|
||||
- [Usage](#usage)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Customization](#customization)
|
||||
- [Security Considerations](#security-considerations)
|
||||
- [License](#license)
|
||||
|
||||
## Introduction
|
||||
|
||||
This repository provides a Bash script that automates the process of logging into AWS Single Sign-On (SSO), extracting temporary AWS credentials, and running **Prowler**—a security tool that performs AWS security best practices assessments—inside a Docker container using those credentials.
|
||||
|
||||
By following this guide, you can streamline your AWS security assessments, ensuring that you consistently apply best practices across your AWS accounts.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you begin, ensure that you have the following tools installed and properly configured on your system:
|
||||
|
||||
1. **AWS CLI v2**
|
||||
- AWS SSO support is available from AWS CLI version 2 onwards.
|
||||
- [Installation Guide](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html)
|
||||
|
||||
2. **jq**
|
||||
- A lightweight and flexible command-line JSON processor.
|
||||
- **macOS (Homebrew):**
|
||||
```bash
|
||||
brew install jq
|
||||
```
|
||||
- **Ubuntu/Debian:**
|
||||
```bash
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y jq
|
||||
```
|
||||
- **Windows:**
|
||||
- [Download jq](https://stedolan.github.io/jq/download/)
|
||||
|
||||
3. **Docker**
|
||||
- Ensure Docker is installed and running on your system.
|
||||
- [Docker Installation Guide](https://docs.docker.com/get-docker/)
|
||||
|
||||
4. **AWS SSO Profile Configuration**
|
||||
- Ensure that you have configured an AWS CLI profile with SSO.
|
||||
- [Configuring AWS CLI with SSO](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html)
|
||||
|
||||
## Setup
|
||||
|
||||
1. **Clone the Repository**
|
||||
```bash
|
||||
git clone https://github.com/your-username/aws-sso-prowler-automation.git
|
||||
cd aws-sso-prowler-automation
|
||||
```
|
||||
|
||||
2. **Create the Automation Script**
|
||||
Create a new Bash script named `run_prowler_sso.sh` and make it executable.
|
||||
|
||||
```bash
|
||||
nano run_prowler_sso.sh
|
||||
chmod +x run_prowler_sso.sh
|
||||
```
|
||||
|
||||
3. **Add the Script Content**
|
||||
Paste the following content into `run_prowler_sso.sh`:
|
||||
|
||||
4. **Configure AWS SSO Profile**
|
||||
Ensure that your AWS CLI profile (`twodragon` in this case) is correctly configured for SSO.
|
||||
|
||||
```bash
|
||||
aws configure sso --profile twodragon
|
||||
```
|
||||
|
||||
**Example Configuration Prompts:**
|
||||
```
|
||||
SSO session name (Recommended): [twodragon]
|
||||
SSO start URL [None]: https://twodragon.awsapps.com/start
|
||||
SSO region [None]: ap-northeast-2
|
||||
SSO account ID [None]: 123456789012
|
||||
SSO role name [None]: ReadOnlyAccess
|
||||
CLI default client region [None]: ap-northeast-2
|
||||
CLI default output format [None]: json
|
||||
CLI profile name [twodragon]: twodragon
|
||||
```
|
||||
|
||||
## Script Overview
|
||||
|
||||
The `run_prowler_sso.sh` script performs the following actions:
|
||||
|
||||
1. **AWS SSO Login:**
|
||||
- Initiates AWS SSO login for the specified profile.
|
||||
- Opens the SSO authorization page in the default browser for user authentication.
|
||||
|
||||
2. **Extract Temporary Credentials:**
|
||||
- Locates the most recent SSO cache file containing the `accessToken`.
|
||||
- Uses `jq` to parse and extract the `accessToken` from the cache file.
|
||||
- Retrieves the `sso_role_name` and `sso_account_id` from the AWS CLI configuration.
|
||||
- Obtains temporary AWS credentials (`AccessKeyId`, `SecretAccessKey`, `SessionToken`) using the extracted `accessToken`.
|
||||
|
||||
3. **Set Environment Variables:**
|
||||
- Exports the extracted AWS credentials as environment variables to be used by the Docker container.
|
||||
|
||||
4. **Run Prowler:**
|
||||
- Executes the **Prowler** Docker container, passing the AWS credentials as environment variables for security assessments.
|
||||
|
||||
## Usage
|
||||
|
||||
1. **Make the Script Executable**
|
||||
Ensure the script has execute permissions.
|
||||
|
||||
```bash
|
||||
chmod +x run_prowler_sso.sh
|
||||
```
|
||||
|
||||
2. **Run the Script**
|
||||
Execute the script to start the AWS SSO login process and run Prowler.
|
||||
|
||||
```bash
|
||||
./run_prowler_sso.sh
|
||||
```
|
||||
|
||||
3. **Follow the Prompts**
|
||||
- A browser window will open prompting you to authenticate via AWS SSO.
|
||||
- Complete the authentication process in the browser.
|
||||
- Upon successful login, the script will extract temporary credentials and run Prowler.
|
||||
|
||||
4. **Review Prowler Output**
|
||||
- Prowler will analyze your AWS environment based on the specified checks and output the results directly in the terminal.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues during the script execution, follow these steps to diagnose and resolve them.
|
||||
|
||||
### 1. Verify AWS CLI Version
|
||||
|
||||
Ensure you are using AWS CLI version 2 or later.
|
||||
|
||||
```bash
|
||||
aws --version
|
||||
```
|
||||
|
||||
**Expected Output:**
|
||||
```
|
||||
aws-cli/2.11.10 Python/3.9.12 Darwin/20.3.0 exe/x86_64 prompt/off
|
||||
```
|
||||
|
||||
If you are not using version 2, [install or update AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html).
|
||||
|
||||
### 2. Confirm AWS SSO Profile Configuration
|
||||
|
||||
Check that the `twodragon` profile is correctly configured.
|
||||
|
||||
```bash
|
||||
aws configure list-profiles
|
||||
```
|
||||
|
||||
**Expected Output:**
|
||||
```
|
||||
default
|
||||
twodragon
|
||||
```
|
||||
|
||||
Review the profile details:
|
||||
|
||||
```bash
|
||||
aws configure get sso_start_url --profile twodragon
|
||||
aws configure get sso_region --profile twodragon
|
||||
aws configure get sso_account_id --profile twodragon
|
||||
aws configure get sso_role_name --profile twodragon
|
||||
```
|
||||
|
||||
Ensure all fields return the correct values.
|
||||
|
||||
### 3. Check SSO Cache File
|
||||
|
||||
Ensure that the SSO cache file contains a valid `accessToken`.
|
||||
|
||||
```bash
|
||||
cat ~/.aws/sso/cache/*.json
|
||||
```
|
||||
|
||||
**Example Content:**
|
||||
```json
|
||||
{
|
||||
"accessToken": "eyJz93a...k4laUWw",
|
||||
"expiresAt": "2024-12-22T14:07:55Z",
|
||||
"clientId": "example-client-id",
|
||||
"clientSecret": "example-client-secret",
|
||||
"startUrl": "https://twodragon.awsapps.com/start#"
|
||||
}
|
||||
```
|
||||
|
||||
If `accessToken` is `null` or missing, retry the AWS SSO login:
|
||||
|
||||
```bash
|
||||
aws sso login --profile twodragon
|
||||
```
|
||||
|
||||
### 4. Validate `jq` Installation
|
||||
|
||||
Ensure that `jq` is installed and functioning correctly.
|
||||
|
||||
```bash
|
||||
jq --version
|
||||
```
|
||||
|
||||
**Expected Output:**
|
||||
```
|
||||
jq-1.6
|
||||
```
|
||||
|
||||
If `jq` is not installed, install it using the instructions in the [Prerequisites](#prerequisites) section.
|
||||
|
||||
### 5. Test Docker Environment Variables
|
||||
|
||||
Verify that the Docker container receives the AWS credentials correctly.
|
||||
|
||||
```bash
|
||||
docker run --platform linux/amd64 \
|
||||
-e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \
|
||||
-e AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN \
|
||||
toniblyx/prowler /bin/bash -c 'echo $AWS_ACCESS_KEY_ID; echo $AWS_SECRET_ACCESS_KEY; echo $AWS_SESSION_TOKEN'
|
||||
```
|
||||
|
||||
**Expected Output:**
|
||||
```
|
||||
ASIA...
|
||||
wJalrFEMI/K7MDENG/bPxRfiCY...
|
||||
IQoJb3JpZ2luX2VjEHwaCXVz...
|
||||
```
|
||||
|
||||
Ensure that none of the environment variables are empty.
|
||||
|
||||
### 6. Review Script Output
|
||||
|
||||
Run the script with debugging enabled to get detailed output.
|
||||
|
||||
1. **Enable Debugging in Script**
|
||||
Add `set -x` for verbose output.
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -x
|
||||
# ... rest of the script ...
|
||||
```
|
||||
|
||||
2. **Run the Script**
|
||||
|
||||
```bash
|
||||
./run_prowler_sso.sh
|
||||
```
|
||||
|
||||
3. **Analyze Output**
|
||||
Look for any errors or unexpected values in the output to identify where the script is failing.
|
||||
|
||||
## Customization
|
||||
|
||||
You can modify the script to suit your specific needs, such as:
|
||||
|
||||
- **Changing the AWS Profile Name:**
|
||||
Update the `PROFILE` variable at the top of the script.
|
||||
|
||||
```bash
|
||||
PROFILE="your-profile-name"
|
||||
```
|
||||
|
||||
- **Adding Prowler Options:**
|
||||
Pass additional options to Prowler for customized checks or output formats.
|
||||
|
||||
```bash
|
||||
docker run --platform linux/amd64 \
|
||||
-e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \
|
||||
-e AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN \
|
||||
toniblyx/prowler -c check123 -M json
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- **Handle Credentials Securely:**
|
||||
- Avoid sharing or exposing your AWS credentials.
|
||||
- Do not include sensitive information in logs or version control.
|
||||
|
||||
- **Script Permissions:**
|
||||
- Ensure the script file has appropriate permissions to prevent unauthorized access.
|
||||
|
||||
```bash
|
||||
chmod 700 run_prowler_sso.sh
|
||||
```
|
||||
|
||||
- **Environment Variables:**
|
||||
- Be cautious when exporting credentials as environment variables.
|
||||
- Consider using more secure methods for credential management if necessary.
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the [MIT License](LICENSE).
|
||||
136
contrib/aws/aws-sso-docker/run_prowler_sso.sh
Executable file
136
contrib/aws/aws-sso-docker/run_prowler_sso.sh
Executable file
@@ -0,0 +1,136 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Set the profile name
|
||||
PROFILE="twodragon"
|
||||
|
||||
# Set the Prowler output directory
|
||||
OUTPUT_DIR=~/prowler-output
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
# Set the port for the local web server
|
||||
WEB_SERVER_PORT=8000
|
||||
|
||||
# ----------------------------------------------
|
||||
# Functions
|
||||
# ----------------------------------------------
|
||||
|
||||
# Function to open the HTML report in the default browser
|
||||
open_report() {
|
||||
local report_path="$1"
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
open "$report_path"
|
||||
elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
|
||||
xdg-open "$report_path"
|
||||
elif [[ "$OSTYPE" == "msys" ]]; then
|
||||
start "" "$report_path"
|
||||
else
|
||||
echo "Automatic method to open Prowler HTML report is not supported on this OS."
|
||||
echo "Please open the report manually at: $report_path"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to start a simple HTTP server to host the Prowler reports
|
||||
start_web_server() {
|
||||
local directory="$1"
|
||||
local port="$2"
|
||||
|
||||
echo "Starting local web server to host Prowler reports at http://localhost:$port"
|
||||
echo "Press Ctrl+C to stop the web server."
|
||||
|
||||
# Change to the output directory
|
||||
cd "$directory"
|
||||
|
||||
# Start the HTTP server in the foreground
|
||||
# Python 3 is required
|
||||
python3 -m http.server "$port"
|
||||
}
|
||||
|
||||
# ----------------------------------------------
|
||||
# Main Script
|
||||
# ----------------------------------------------
|
||||
|
||||
# AWS SSO Login
|
||||
echo "Logging into AWS SSO..."
|
||||
aws sso login --profile "$PROFILE"
|
||||
|
||||
# Extract temporary credentials
|
||||
echo "Extracting temporary credentials..."
|
||||
|
||||
# Find the most recently modified SSO cache file
|
||||
CACHE_FILE=$(ls -t ~/.aws/sso/cache/*.json 2>/dev/null | head -n 1)
|
||||
echo "Cache File: $CACHE_FILE"
|
||||
|
||||
if [ -z "$CACHE_FILE" ]; then
|
||||
echo "SSO cache file not found. Please ensure AWS SSO login was successful."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract accessToken using jq
|
||||
ACCESS_TOKEN=$(jq -r '.accessToken' "$CACHE_FILE")
|
||||
echo "Access Token: $ACCESS_TOKEN"
|
||||
|
||||
if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" == "null" ]; then
|
||||
echo "Unable to extract accessToken. Please check your SSO login and cache file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract role name and account ID from AWS CLI configuration
|
||||
ROLE_NAME=$(aws configure get sso_role_name --profile "$PROFILE")
|
||||
ACCOUNT_ID=$(aws configure get sso_account_id --profile "$PROFILE")
|
||||
echo "Role Name: $ROLE_NAME"
|
||||
echo "Account ID: $ACCOUNT_ID"
|
||||
|
||||
if [ -z "$ROLE_NAME" ] || [ -z "$ACCOUNT_ID" ]; then
|
||||
echo "Unable to extract sso_role_name or sso_account_id. Please check your profile configuration."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Obtain temporary credentials using AWS SSO
|
||||
TEMP_CREDS=$(aws sso get-role-credentials \
|
||||
--role-name "$ROLE_NAME" \
|
||||
--account-id "$ACCOUNT_ID" \
|
||||
--access-token "$ACCESS_TOKEN" \
|
||||
--profile "$PROFILE")
|
||||
|
||||
echo "TEMP_CREDS: $TEMP_CREDS"
|
||||
|
||||
# Extract credentials from the JSON response
|
||||
AWS_ACCESS_KEY_ID=$(echo "$TEMP_CREDS" | jq -r '.roleCredentials.accessKeyId')
|
||||
AWS_SECRET_ACCESS_KEY=$(echo "$TEMP_CREDS" | jq -r '.roleCredentials.secretAccessKey')
|
||||
AWS_SESSION_TOKEN=$(echo "$TEMP_CREDS" | jq -r '.roleCredentials.sessionToken')
|
||||
|
||||
# Verify that all credentials were extracted successfully
|
||||
if [ -z "$AWS_ACCESS_KEY_ID" ] || [ -z "$AWS_SECRET_ACCESS_KEY" ] || [ -z "$AWS_SESSION_TOKEN" ]; then
|
||||
echo "Unable to extract temporary credentials."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Export AWS credentials as environment variables
|
||||
export AWS_ACCESS_KEY_ID
|
||||
export AWS_SECRET_ACCESS_KEY
|
||||
export AWS_SESSION_TOKEN
|
||||
|
||||
echo "AWS credentials have been set."
|
||||
|
||||
# Run Prowler in Docker container
|
||||
echo "Running Prowler Docker container..."
|
||||
|
||||
docker run --platform linux/amd64 \
|
||||
-e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \
|
||||
-e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \
|
||||
-e AWS_SESSION_TOKEN="$AWS_SESSION_TOKEN" \
|
||||
-v "$OUTPUT_DIR":/home/prowler/output \
|
||||
toniblyx/prowler -M html -M csv -M json-ocsf --output-directory /home/prowler/output --output-filename prowler-output
|
||||
|
||||
echo "Prowler has finished running. Reports are saved in $OUTPUT_DIR."
|
||||
|
||||
# Open the HTML report in the default browser
|
||||
REPORT_PATH="$OUTPUT_DIR/prowler-output.html"
|
||||
echo "Opening Prowler HTML report..."
|
||||
open_report "$REPORT_PATH" &
|
||||
|
||||
# Start the local web server to host the Prowler dashboard
|
||||
# This will run in the foreground. To run it in the background, append an ampersand (&) at the end of the command.
|
||||
start_web_server "$OUTPUT_DIR" "$WEB_SERVER_PORT"
|
||||
24
contrib/k8s/helm/prowler-api/Chart.yaml
Normal file
24
contrib/k8s/helm/prowler-api/Chart.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v2
|
||||
name: prowler-api
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "5.1.1"
|
||||
22
contrib/k8s/helm/prowler-api/templates/NOTES.txt
Normal file
22
contrib/k8s/helm/prowler-api/templates/NOTES.txt
Normal file
@@ -0,0 +1,22 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prowler-api.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prowler-api.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prowler-api.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prowler-api.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
62
contrib/k8s/helm/prowler-api/templates/_helpers.tpl
Normal file
62
contrib/k8s/helm/prowler-api/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "prowler-api.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "prowler-api.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "prowler-api.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "prowler-api.labels" -}}
|
||||
helm.sh/chart: {{ include "prowler-api.chart" . }}
|
||||
{{ include "prowler-api.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "prowler-api.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "prowler-api.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "prowler-api.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "prowler-api.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
9
contrib/k8s/helm/prowler-api/templates/configmap.yaml
Normal file
9
contrib/k8s/helm/prowler-api/templates/configmap.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}-config
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
data:
|
||||
config.yaml: |-
|
||||
{{- toYaml .Values.mainConfig | nindent 4 }}
|
||||
85
contrib/k8s/helm/prowler-api/templates/deployment.yaml
Normal file
85
contrib/k8s/helm/prowler-api/templates/deployment.yaml
Normal file
@@ -0,0 +1,85 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "prowler-api.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "prowler-api.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
{{- range $name,$config := .Values.containers }}
|
||||
{{- if $config.enabled }}
|
||||
- name: {{ $name }}
|
||||
securityContext:
|
||||
{{- toYaml $config.securityContext | nindent 12 }}
|
||||
image: "{{ $config.image.repository }}:{{ $config.image.tag | default $.Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ $config.image.pullPolicy }}
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "prowler-api.fullname" $ }}
|
||||
command:
|
||||
{{- toYaml $config.command | nindent 12 }}
|
||||
{{- if $config.ports }}
|
||||
ports:
|
||||
{{- toYaml $config.ports | nindent 12 }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml $config.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml $config.readinessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml $config.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: {{ include "prowler-api.fullname" $ }}-config
|
||||
mountPath: {{ $.Values.releaseConfigRoot }}{{ $.Values.releaseConfigPath }}
|
||||
subPath: config.yaml
|
||||
{{- with .volumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: {{ include "prowler-api.fullname" . }}-config
|
||||
configMap:
|
||||
name: {{ include "prowler-api.fullname" . }}-config
|
||||
{{- with .Values.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
43
contrib/k8s/helm/prowler-api/templates/ingress.yaml
Normal file
43
contrib/k8s/helm/prowler-api/templates/ingress.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.ingress.className }}
|
||||
ingressClassName: {{ . }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- with .pathType }}
|
||||
pathType: {{ . }}
|
||||
{{- end }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "prowler-api.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
11
contrib/k8s/helm/prowler-api/templates/secrets.yaml
Normal file
11
contrib/k8s/helm/prowler-api/templates/secrets.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- range $k, $v := .Values.secrets }}
|
||||
{{ $k }}: {{ $v | toString | b64enc | quote }}
|
||||
{{- end }}
|
||||
21
contrib/k8s/helm/prowler-api/templates/service.yaml
Normal file
21
contrib/k8s/helm/prowler-api/templates/service.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
{{- range $name,$config := .Values.containers }}
|
||||
{{- if $config.ports }}
|
||||
{{- range $p := $config.ports }}
|
||||
- port: {{ $p.containerPort }}
|
||||
targetPort: {{ $p.containerPort }}
|
||||
protocol: TCP
|
||||
name: {{ $config.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "prowler-api.selectorLabels" . | nindent 4 }}
|
||||
13
contrib/k8s/helm/prowler-api/templates/serviceaccount.yaml
Normal file
13
contrib/k8s/helm/prowler-api/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "prowler-api.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
||||
625
contrib/k8s/helm/prowler-api/values.yaml
Normal file
625
contrib/k8s/helm/prowler-api/values.yaml
Normal file
@@ -0,0 +1,625 @@
|
||||
# Default values for prowler-api.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
|
||||
replicaCount: 1
|
||||
|
||||
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
|
||||
containers:
|
||||
prowler-api:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
command: ["/home/prowler/docker-entrypoint.sh", "prod"]
|
||||
worker:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
command: ["/home/prowler/docker-entrypoint.sh", "worker"]
|
||||
worker-beat:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
command: ["../docker-entrypoint.sh", "beat"]
|
||||
|
||||
secrets:
|
||||
POSTGRES_HOST:
|
||||
POSTGRES_PORT: 5432
|
||||
POSTGRES_ADMIN_USER:
|
||||
POSTGRES_ADMIN_PASSWORD:
|
||||
POSTGRES_USER:
|
||||
POSTGRES_PASSWORD:
|
||||
POSTGRES_DB:
|
||||
# Valkey settings
|
||||
VALKEY_HOST: valkey-headless
|
||||
VALKEY_PORT: "6379"
|
||||
VALKEY_DB: "0"
|
||||
# Django settings
|
||||
DJANGO_ALLOWED_HOSTS: localhost,127.0.0.1,prowler-api
|
||||
DJANGO_BIND_ADDRESS: 0.0.0.0
|
||||
DJANGO_PORT: "8080"
|
||||
DJANGO_DEBUG: False
|
||||
DJANGO_SETTINGS_MODULE: config.django.production
|
||||
# Select one of [ndjson|human_readable]
|
||||
DJANGO_LOGGING_FORMATTER: human_readable
|
||||
# Select one of [DEBUG|INFO|WARNING|ERROR|CRITICAL]
|
||||
# Applies to both Django and Celery Workers
|
||||
DJANGO_LOGGING_LEVEL: INFO
|
||||
# Defaults to the maximum available based on CPU cores if not set.
|
||||
DJANGO_WORKERS: 2
|
||||
# Token lifetime is in minutes
|
||||
DJANGO_ACCESS_TOKEN_LIFETIME: "30"
|
||||
# Token lifetime is in minutes
|
||||
DJANGO_REFRESH_TOKEN_LIFETIME: "1440"
|
||||
DJANGO_CACHE_MAX_AGE: "3600"
|
||||
DJANGO_STALE_WHILE_REVALIDATE: "60"
|
||||
DJANGO_MANAGE_DB_PARTITIONS: "False"
|
||||
# openssl genrsa -out private.pem 2048
|
||||
DJANGO_TOKEN_SIGNING_KEY:
|
||||
# openssl rsa -in private.pem -pubout -out public.pem
|
||||
DJANGO_TOKEN_VERIFYING_KEY:
|
||||
# openssl rand -base64 32
|
||||
DJANGO_SECRETS_ENCRYPTION_KEY:
|
||||
DJANGO_BROKER_VISIBILITY_TIMEOUT: 86400
|
||||
|
||||
releaseConfigRoot: /home/prowler/.cache/pypoetry/virtualenvs/prowler-api-NnJNioq7-py3.12/lib/python3.12/site-packages/
|
||||
releaseConfigPath: prowler/config/config.yaml
|
||||
|
||||
mainConfig:
|
||||
# AWS Configuration
|
||||
aws:
|
||||
# AWS Global Configuration
|
||||
# aws.mute_non_default_regions --> Set to True to muted failed findings in non-default regions for AccessAnalyzer, GuardDuty, SecurityHub, DRS and Config
|
||||
mute_non_default_regions: False
|
||||
# If you want to mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w mutelist.yaml`:
|
||||
# Mutelist:
|
||||
# Accounts:
|
||||
# "*":
|
||||
# Checks:
|
||||
# "*":
|
||||
# Regions:
|
||||
# - "ap-southeast-1"
|
||||
# - "ap-southeast-2"
|
||||
# Resources:
|
||||
# - "*"
|
||||
|
||||
# AWS IAM Configuration
|
||||
# aws.iam_user_accesskey_unused --> CIS recommends 45 days
|
||||
max_unused_access_keys_days: 45
|
||||
# aws.iam_user_console_access_unused --> CIS recommends 45 days
|
||||
max_console_access_days: 45
|
||||
|
||||
# AWS EC2 Configuration
|
||||
# aws.ec2_elastic_ip_shodan
|
||||
# TODO: create common config
|
||||
shodan_api_key: null
|
||||
# aws.ec2_securitygroup_with_many_ingress_egress_rules --> by default is 50 rules
|
||||
max_security_group_rules: 50
|
||||
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
|
||||
max_ec2_instance_age_in_days: 180
|
||||
# aws.ec2_securitygroup_allow_ingress_from_internet_to_any_port
|
||||
# allowed network interface types for security groups open to the Internet
|
||||
ec2_allowed_interface_types:
|
||||
[
|
||||
"api_gateway_managed",
|
||||
"vpc_endpoint",
|
||||
]
|
||||
# allowed network interface owners for security groups open to the Internet
|
||||
ec2_allowed_instance_owners:
|
||||
[
|
||||
"amazon-elb"
|
||||
]
|
||||
# aws.ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports
|
||||
ec2_high_risk_ports:
|
||||
[
|
||||
25,
|
||||
110,
|
||||
135,
|
||||
143,
|
||||
445,
|
||||
3000,
|
||||
4333,
|
||||
5000,
|
||||
5500,
|
||||
8080,
|
||||
8088,
|
||||
]
|
||||
|
||||
# AWS ECS Configuration
|
||||
# aws.ecs_service_fargate_latest_platform_version
|
||||
fargate_linux_latest_version: "1.4.0"
|
||||
fargate_windows_latest_version: "1.0.0"
|
||||
|
||||
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
|
||||
# AWS SSM Configuration (aws.ssm_documents_set_as_public)
|
||||
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
|
||||
# Multi account environment: Any additional trusted account number should be added as a space separated list, e.g.
|
||||
# trusted_account_ids : ["123456789012", "098765432109", "678901234567"]
|
||||
trusted_account_ids: []
|
||||
|
||||
# AWS Cloudwatch Configuration
|
||||
# aws.cloudwatch_log_group_retention_policy_specific_days_enabled --> by default is 365 days
|
||||
log_group_retention_days: 365
|
||||
|
||||
# AWS CloudFormation Configuration
|
||||
# cloudformation_stack_cdktoolkit_bootstrap_version --> by default is 21
|
||||
recommended_cdk_bootstrap_version: 21
|
||||
|
||||
# AWS AppStream Session Configuration
|
||||
# aws.appstream_fleet_session_idle_disconnect_timeout
|
||||
max_idle_disconnect_timeout_in_seconds: 600 # 10 Minutes
|
||||
# aws.appstream_fleet_session_disconnect_timeout
|
||||
max_disconnect_timeout_in_seconds: 300 # 5 Minutes
|
||||
# aws.appstream_fleet_maximum_session_duration
|
||||
max_session_duration_seconds: 36000 # 10 Hours
|
||||
|
||||
# AWS Lambda Configuration
|
||||
# aws.awslambda_function_using_supported_runtimes
|
||||
obsolete_lambda_runtimes:
|
||||
[
|
||||
"java8",
|
||||
"go1.x",
|
||||
"provided",
|
||||
"python3.6",
|
||||
"python2.7",
|
||||
"python3.7",
|
||||
"nodejs4.3",
|
||||
"nodejs4.3-edge",
|
||||
"nodejs6.10",
|
||||
"nodejs",
|
||||
"nodejs8.10",
|
||||
"nodejs10.x",
|
||||
"nodejs12.x",
|
||||
"nodejs14.x",
|
||||
"nodejs16.x",
|
||||
"dotnet5.0",
|
||||
"dotnet7",
|
||||
"dotnetcore1.0",
|
||||
"dotnetcore2.0",
|
||||
"dotnetcore2.1",
|
||||
"dotnetcore3.1",
|
||||
"ruby2.5",
|
||||
"ruby2.7",
|
||||
]
|
||||
# aws.awslambda_function_vpc_is_in_multi_azs
|
||||
lambda_min_azs: 2
|
||||
|
||||
# AWS Organizations
|
||||
# aws.organizations_scp_check_deny_regions
|
||||
# aws.organizations_enabled_regions: [
|
||||
# "eu-central-1",
|
||||
# "eu-west-1",
|
||||
# "us-east-1"
|
||||
# ]
|
||||
organizations_enabled_regions: []
|
||||
organizations_trusted_delegated_administrators: []
|
||||
|
||||
# AWS ECR
|
||||
# aws.ecr_repositories_scan_vulnerabilities_in_latest_image
|
||||
# CRITICAL
|
||||
# HIGH
|
||||
# MEDIUM
|
||||
ecr_repository_vulnerability_minimum_severity: "MEDIUM"
|
||||
|
||||
# AWS Trusted Advisor
|
||||
# aws.trustedadvisor_premium_support_plan_subscribed
|
||||
verify_premium_support_plans: True
|
||||
|
||||
# AWS CloudTrail Configuration
|
||||
# aws.cloudtrail_threat_detection_privilege_escalation
|
||||
threat_detection_privilege_escalation_threshold: 0.2 # Percentage of actions found to decide if it is an privilege_escalation attack event, by default is 0.2 (20%)
|
||||
threat_detection_privilege_escalation_minutes: 1440 # Past minutes to search from now for privilege_escalation attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_privilege_escalation_actions:
|
||||
[
|
||||
"AddPermission",
|
||||
"AddRoleToInstanceProfile",
|
||||
"AddUserToGroup",
|
||||
"AssociateAccessPolicy",
|
||||
"AssumeRole",
|
||||
"AttachGroupPolicy",
|
||||
"AttachRolePolicy",
|
||||
"AttachUserPolicy",
|
||||
"ChangePassword",
|
||||
"CreateAccessEntry",
|
||||
"CreateAccessKey",
|
||||
"CreateDevEndpoint",
|
||||
"CreateEventSourceMapping",
|
||||
"CreateFunction",
|
||||
"CreateGroup",
|
||||
"CreateJob",
|
||||
"CreateKeyPair",
|
||||
"CreateLoginProfile",
|
||||
"CreatePipeline",
|
||||
"CreatePolicyVersion",
|
||||
"CreateRole",
|
||||
"CreateStack",
|
||||
"DeleteRolePermissionsBoundary",
|
||||
"DeleteRolePolicy",
|
||||
"DeleteUserPermissionsBoundary",
|
||||
"DeleteUserPolicy",
|
||||
"DetachRolePolicy",
|
||||
"DetachUserPolicy",
|
||||
"GetCredentialsForIdentity",
|
||||
"GetId",
|
||||
"GetPolicyVersion",
|
||||
"GetUserPolicy",
|
||||
"Invoke",
|
||||
"ModifyInstanceAttribute",
|
||||
"PassRole",
|
||||
"PutGroupPolicy",
|
||||
"PutPipelineDefinition",
|
||||
"PutRolePermissionsBoundary",
|
||||
"PutRolePolicy",
|
||||
"PutUserPermissionsBoundary",
|
||||
"PutUserPolicy",
|
||||
"ReplaceIamInstanceProfileAssociation",
|
||||
"RunInstances",
|
||||
"SetDefaultPolicyVersion",
|
||||
"UpdateAccessKey",
|
||||
"UpdateAssumeRolePolicy",
|
||||
"UpdateDevEndpoint",
|
||||
"UpdateEventSourceMapping",
|
||||
"UpdateFunctionCode",
|
||||
"UpdateJob",
|
||||
"UpdateLoginProfile",
|
||||
]
|
||||
# aws.cloudtrail_threat_detection_enumeration
|
||||
threat_detection_enumeration_threshold: 0.3 # Percentage of actions found to decide if it is an enumeration attack event, by default is 0.3 (30%)
|
||||
threat_detection_enumeration_minutes: 1440 # Past minutes to search from now for enumeration attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_enumeration_actions:
|
||||
[
|
||||
"DescribeAccessEntry",
|
||||
"DescribeAccountAttributes",
|
||||
"DescribeAvailabilityZones",
|
||||
"DescribeBundleTasks",
|
||||
"DescribeCarrierGateways",
|
||||
"DescribeClientVpnRoutes",
|
||||
"DescribeCluster",
|
||||
"DescribeDhcpOptions",
|
||||
"DescribeFlowLogs",
|
||||
"DescribeImages",
|
||||
"DescribeInstanceAttribute",
|
||||
"DescribeInstanceInformation",
|
||||
"DescribeInstanceTypes",
|
||||
"DescribeInstances",
|
||||
"DescribeInstances",
|
||||
"DescribeKeyPairs",
|
||||
"DescribeLogGroups",
|
||||
"DescribeLogStreams",
|
||||
"DescribeOrganization",
|
||||
"DescribeRegions",
|
||||
"DescribeSecurityGroups",
|
||||
"DescribeSnapshotAttribute",
|
||||
"DescribeSnapshotTierStatus",
|
||||
"DescribeSubscriptionFilters",
|
||||
"DescribeTransitGatewayMulticastDomains",
|
||||
"DescribeVolumes",
|
||||
"DescribeVolumesModifications",
|
||||
"DescribeVpcEndpointConnectionNotifications",
|
||||
"DescribeVpcs",
|
||||
"GetAccount",
|
||||
"GetAccountAuthorizationDetails",
|
||||
"GetAccountSendingEnabled",
|
||||
"GetBucketAcl",
|
||||
"GetBucketLogging",
|
||||
"GetBucketPolicy",
|
||||
"GetBucketReplication",
|
||||
"GetBucketVersioning",
|
||||
"GetCallerIdentity",
|
||||
"GetCertificate",
|
||||
"GetConsoleScreenshot",
|
||||
"GetCostAndUsage",
|
||||
"GetDetector",
|
||||
"GetEbsDefaultKmsKeyId",
|
||||
"GetEbsEncryptionByDefault",
|
||||
"GetFindings",
|
||||
"GetFlowLogsIntegrationTemplate",
|
||||
"GetIdentityVerificationAttributes",
|
||||
"GetInstances",
|
||||
"GetIntrospectionSchema",
|
||||
"GetLaunchTemplateData",
|
||||
"GetLaunchTemplateData",
|
||||
"GetLogRecord",
|
||||
"GetParameters",
|
||||
"GetPolicyVersion",
|
||||
"GetPublicAccessBlock",
|
||||
"GetQueryResults",
|
||||
"GetRegions",
|
||||
"GetSMSAttributes",
|
||||
"GetSMSSandboxAccountStatus",
|
||||
"GetSendQuota",
|
||||
"GetTransitGatewayRouteTableAssociations",
|
||||
"GetUserPolicy",
|
||||
"HeadObject",
|
||||
"ListAccessKeys",
|
||||
"ListAccounts",
|
||||
"ListAllMyBuckets",
|
||||
"ListAssociatedAccessPolicies",
|
||||
"ListAttachedUserPolicies",
|
||||
"ListClusters",
|
||||
"ListDetectors",
|
||||
"ListDomains",
|
||||
"ListFindings",
|
||||
"ListHostedZones",
|
||||
"ListIPSets",
|
||||
"ListIdentities",
|
||||
"ListInstanceProfiles",
|
||||
"ListObjects",
|
||||
"ListOrganizationalUnitsForParent",
|
||||
"ListOriginationNumbers",
|
||||
"ListPolicyVersions",
|
||||
"ListRoles",
|
||||
"ListRoles",
|
||||
"ListRules",
|
||||
"ListServiceQuotas",
|
||||
"ListSubscriptions",
|
||||
"ListTargetsByRule",
|
||||
"ListTopics",
|
||||
"ListUsers",
|
||||
"LookupEvents",
|
||||
"Search",
|
||||
]
|
||||
# aws.cloudtrail_threat_detection_llm_jacking
|
||||
threat_detection_llm_jacking_threshold: 0.4 # Percentage of actions found to decide if it is an LLM Jacking attack event, by default is 0.4 (40%)
|
||||
threat_detection_llm_jacking_minutes: 1440 # Past minutes to search from now for LLM Jacking attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_llm_jacking_actions:
|
||||
[
|
||||
"PutUseCaseForModelAccess", # Submits a use case for model access, providing justification (Write).
|
||||
"PutFoundationModelEntitlement", # Grants entitlement for accessing a foundation model (Write).
|
||||
"PutModelInvocationLoggingConfiguration", # Configures logging for model invocations (Write).
|
||||
"CreateFoundationModelAgreement", # Creates a new agreement to use a foundation model (Write).
|
||||
"InvokeModel", # Invokes a specified Bedrock model for inference using provided prompt and parameters (Read).
|
||||
"InvokeModelWithResponseStream", # Invokes a Bedrock model for inference with real-time token streaming (Read).
|
||||
"GetUseCaseForModelAccess", # Retrieves an existing use case for model access (Read).
|
||||
"GetModelInvocationLoggingConfiguration", # Fetches the logging configuration for model invocations (Read).
|
||||
"GetFoundationModelAvailability", # Checks the availability of a foundation model for use (Read).
|
||||
"ListFoundationModelAgreementOffers", # Lists available agreement offers for accessing foundation models (List).
|
||||
"ListFoundationModels", # Lists the available foundation models in Bedrock (List).
|
||||
"ListProvisionedModelThroughputs", # Lists the provisioned throughput for previously created models (List).
|
||||
]
|
||||
|
||||
# AWS RDS Configuration
|
||||
# aws.rds_instance_backup_enabled
|
||||
# Whether to check RDS instance replicas or not
|
||||
check_rds_instance_replicas: False
|
||||
|
||||
# AWS ACM Configuration
|
||||
# aws.acm_certificates_expiration_check
|
||||
days_to_expire_threshold: 7
|
||||
# aws.acm_certificates_with_secure_key_algorithms
|
||||
insecure_key_algorithms:
|
||||
[
|
||||
"RSA-1024",
|
||||
"P-192",
|
||||
"SHA-1",
|
||||
]
|
||||
|
||||
# AWS EKS Configuration
|
||||
# aws.eks_control_plane_logging_all_types_enabled
|
||||
# EKS control plane logging types that must be enabled
|
||||
eks_required_log_types:
|
||||
[
|
||||
"api",
|
||||
"audit",
|
||||
"authenticator",
|
||||
"controllerManager",
|
||||
"scheduler",
|
||||
]
|
||||
|
||||
# aws.eks_cluster_uses_a_supported_version
|
||||
# EKS clusters must be version 1.28 or higher
|
||||
eks_cluster_oldest_version_supported: "1.28"
|
||||
|
||||
# AWS CodeBuild Configuration
|
||||
# aws.codebuild_project_no_secrets_in_variables
|
||||
# CodeBuild sensitive variables that are excluded from the check
|
||||
excluded_sensitive_environment_variables:
|
||||
[
|
||||
|
||||
]
|
||||
|
||||
# AWS ELB Configuration
|
||||
# aws.elb_is_in_multiple_az
|
||||
# Minimum number of Availability Zones that an CLB must be in
|
||||
elb_min_azs: 2
|
||||
|
||||
# AWS ELBv2 Configuration
|
||||
# aws.elbv2_is_in_multiple_az
|
||||
# Minimum number of Availability Zones that an ELBv2 must be in
|
||||
elbv2_min_azs: 2
|
||||
|
||||
|
||||
# AWS Secrets Configuration
|
||||
# Patterns to ignore in the secrets checks
|
||||
secrets_ignore_patterns: []
|
||||
|
||||
# AWS Secrets Manager Configuration
|
||||
# aws.secretsmanager_secret_unused
|
||||
# Maximum number of days a secret can be unused
|
||||
max_days_secret_unused: 90
|
||||
|
||||
# aws.secretsmanager_secret_rotated_periodically
|
||||
# Maximum number of days a secret should be rotated
|
||||
max_days_secret_unrotated: 90
|
||||
|
||||
# AWS Kinesis Configuration
|
||||
# Minimum retention period in hours for Kinesis streams
|
||||
min_kinesis_stream_retention_hours: 168 # 7 days
|
||||
|
||||
|
||||
# Azure Configuration
|
||||
azure:
|
||||
# Azure Network Configuration
|
||||
# azure.network_public_ip_shodan
|
||||
# TODO: create common config
|
||||
shodan_api_key: null
|
||||
|
||||
# Azure App Service
|
||||
# azure.app_ensure_php_version_is_latest
|
||||
php_latest_version: "8.2"
|
||||
# azure.app_ensure_python_version_is_latest
|
||||
python_latest_version: "3.12"
|
||||
# azure.app_ensure_java_version_is_latest
|
||||
java_latest_version: "17"
|
||||
|
||||
# Azure SQL Server
|
||||
# azure.sqlserver_minimal_tls_version
|
||||
recommended_minimal_tls_versions:
|
||||
[
|
||||
"1.2",
|
||||
"1.3",
|
||||
]
|
||||
|
||||
# GCP Configuration
|
||||
gcp:
|
||||
# GCP Compute Configuration
|
||||
# gcp.compute_public_address_shodan
|
||||
shodan_api_key: null
|
||||
|
||||
# Kubernetes Configuration
|
||||
kubernetes:
|
||||
# Kubernetes API Server
|
||||
# kubernetes.apiserver_audit_log_maxbackup_set
|
||||
audit_log_maxbackup: 10
|
||||
# kubernetes.apiserver_audit_log_maxsize_set
|
||||
audit_log_maxsize: 100
|
||||
# kubernetes.apiserver_audit_log_maxage_set
|
||||
audit_log_maxage: 30
|
||||
# kubernetes.apiserver_strong_ciphers_only
|
||||
apiserver_strong_ciphers:
|
||||
[
|
||||
"TLS_AES_128_GCM_SHA256",
|
||||
"TLS_AES_256_GCM_SHA384",
|
||||
"TLS_CHACHA20_POLY1305_SHA256",
|
||||
]
|
||||
# Kubelet
|
||||
# kubernetes.kubelet_strong_ciphers_only
|
||||
kubelet_strong_ciphers:
|
||||
[
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
]
|
||||
|
||||
|
||||
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# This is for setting Kubernetes Annotations to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
podAnnotations: {}
|
||||
# This is for setting Kubernetes Labels to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
podLabels: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
|
||||
service:
|
||||
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
|
||||
type: ClusterIP
|
||||
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
|
||||
port: 80
|
||||
|
||||
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
|
||||
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes: []
|
||||
# - name: foo
|
||||
# secret:
|
||||
# secretName: mysecret
|
||||
# optional: false
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts: []
|
||||
# - name: foo
|
||||
# mountPath: "/etc/foo"
|
||||
# readOnly: true
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
23
contrib/k8s/helm/prowler-cli/.helmignore
Normal file
23
contrib/k8s/helm/prowler-cli/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -39,4 +39,3 @@ spec:
|
||||
path: {{ $value }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user