Compare commits

...

98 Commits

Author SHA1 Message Date
Pepe Fagoaga 2ab5a702c9 chore(api): Bump to v1.3.0 (#6670) 2025-01-23 16:40:59 +01:00
Sergio Garcia 11d9cdf24e feat(resource metadata): add resource metadata to JSON OCSF (#6592)
Co-authored-by: Rubén De la Torre Vico <ruben@prowler.com>
2025-01-23 16:08:13 +01:00
Rubén De la Torre Vico b1de41619b fix: add missing Check_Report_Azure parameters (#6583) 2025-01-23 16:07:23 +01:00
Rubén De la Torre Vico 18a4881a51 feat(network): extract Network resource metadata automated (#6555)
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-01-23 16:06:44 +01:00
Rubén De la Torre Vico de76a168c0 feat(storage): extract Storage resource metadata automated (#6563)
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-01-23 16:06:40 +01:00
Rubén De la Torre Vico bcc13a742d feat(vm): extract VM resource metadata automated (#6564) 2025-01-23 16:06:32 +01:00
Rubén De la Torre Vico 23b584f4bf feat(sqlserver): extract SQL Server resource metadata automated (#6562) 2025-01-23 16:06:25 +01:00
Daniel Barranquero 074b7c1ff5 feat(aws): include resource metadata to remaining checks (#6551)
Co-authored-by: MrCloudSec <hello@mistercloudsec.com>
2025-01-23 16:06:18 +01:00
Rubén De la Torre Vico c04e9ed914 feat(postgresql): extract PostgreSQL resource metadata automated (#6560) 2025-01-23 16:06:11 +01:00
Rubén De la Torre Vico 1f1b126e79 feat(policy): extract Policy resource metadata automated (#6558) 2025-01-23 16:06:06 +01:00
Rubén De la Torre Vico 9b0dd80f13 feat(entra): extract Entra resource metadata automated (#6542)
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-01-23 16:06:01 +01:00
Rubén De la Torre Vico b0807478f2 feat(monitor): extract monitor resource metadata automated (#6554)
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-01-23 16:05:54 +01:00
Rubén De la Torre Vico 11dfa58ecd feat(mysql): extract MySQL resource metadata automated (#6556) 2025-01-23 16:05:48 +01:00
Rubén De la Torre Vico 412f396e0a feat(keyvault): extract KeyVault resource metadata automated (#6553) 2025-01-23 16:05:44 +01:00
Rubén De la Torre Vico 8100d43ff2 feat(iam): extract IAM resource metadata automated (#6552) 2025-01-23 16:05:38 +01:00
Sergio Garcia bc96acef48 fix(gcp): iterate through service projects (#6549)
Co-authored-by: pedrooot <pedromarting3@gmail.com>
2025-01-23 16:05:24 +01:00
Hugo Pereira Brito 6e9876b61a feat(aws): include resource metadata in services from r* to s* (#6536)
Co-authored-by: MrCloudSec <hello@mistercloudsec.com>
2025-01-23 16:05:17 +01:00
Pedro Martín 32253ca4f7 feat(gcp): add resource metadata to report (#6500)
Co-authored-by: MrCloudSec <hello@mistercloudsec.com>
2025-01-23 16:05:08 +01:00
Hugo Pereira Brito 4c6be5e283 feat(aws): include resource metadata in services from a* to b* (#6504)
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-01-23 16:05:03 +01:00
Daniel Barranquero 69178fd7bd chore(aws): add resource metadata to services from t to w (#6546) 2025-01-23 16:04:49 +01:00
Daniel Barranquero cd4432c14b chore(aws): add resource metadata to services from f to o (#6545) 2025-01-23 16:04:40 +01:00
Rubén De la Torre Vico 0e47172aec feat(defender): extract Defender resource metadata in automated way (#6538) 2025-01-23 16:04:30 +01:00
Rubén De la Torre Vico efe18ae8b2 feat(appinsights): extract App Insights resource metadata in automated way (#6540) 2025-01-23 16:04:18 +01:00
Hugo Pereira Brito d5e13df4fe feat: add resource metadata to emr_cluster_account_public_block_enabled (#6539) 2025-01-23 16:04:13 +01:00
Sergio Garcia b0e84d74f2 feat(kubernetes): add resource metadata to report (#6479) 2025-01-23 16:04:05 +01:00
Hugo Pereira Brito 28526b591c feat(aws): include resource metadata in services from d* to e* (#6532)
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
2025-01-23 16:03:59 +01:00
Daniel Barranquero 51e6a6edb1 feat(aws): add resource metadata to all services starting with c (#6493) 2025-01-23 16:03:50 +01:00
Rubén De la Torre Vico c1b132a29e feat(cosmosdb): extract CosmosDB resource metadata in automated way (#6533) 2025-01-23 16:03:43 +01:00
Rubén De la Torre Vico e2530e7d57 feat(containerregistry): extract Container Registry resource metadata in automated way (#6530) 2025-01-23 16:03:37 +01:00
Rubén De la Torre Vico f2fcb1599b feat(azure-app): extract Web App resource metadata in automated way (#6529) 2025-01-23 16:03:32 +01:00
Rubén De la Torre Vico 160eafa0c9 feat(aks): use Check_Report_Azure constructor properly in AKS checks (#6509) 2025-01-23 16:03:17 +01:00
Rubén De la Torre Vico c2bc0f1368 feat(aisearch): use Check_Report_Azure constructor properly in AISearch checks (#6506) 2025-01-23 16:03:10 +01:00
Rubén De la Torre Vico 27d27fff81 feat(azure): include resource metadata in Check_Report_Azure (#6505) 2025-01-23 16:02:00 +01:00
Pepe Fagoaga 66e8f0ce18 chore(scan): Remove ._findings (#6667) 2025-01-23 15:58:37 +01:00
Pedro Martín 0ceae8361b feat(kubernetes): add CIS 1.10 compliance (#6508)
Co-authored-by: Sergio Garcia <hello@mistercloudsec.com>
# Conflicts:
#	prowler/compliance/kubernetes/cis_1.10_kubernetes.json
2025-01-23 15:39:40 +01:00
Pedro Martín 5e52fded83 feat(azure): add CIS 3.0 for Azure (#5226)
# Conflicts:
#	prowler/compliance/azure/cis_3.0_azure.json
2025-01-23 15:35:35 +01:00
Pablo Lara 0a7d07b4b6 chore: adjust DateWithTime component height when used with InfoField (#6669) 2025-01-23 15:21:42 +01:00
Pablo Lara 34b5f483d7 chore(scans): improve scan details (#6665) 2025-01-23 14:16:35 +01:00
Pedro Martín 9d04a1bc52 feat(detect-secrets): get secrets plugins from config.yaml (#6544)
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2025-01-23 14:16:15 +01:00
dependabot[bot] b25c0aaa00 chore(deps): bump azure-mgmt-containerservice from 33.0.0 to 34.0.0 (#6630)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-23 14:16:03 +01:00
dependabot[bot] 652b93ea45 chore(deps): bump azure-mgmt-compute from 33.1.0 to 34.0.0 (#6628)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-23 14:15:54 +01:00
Pepe Fagoaga ccb7726511 fix(templates): Customize principals and add validation (#6655) 2025-01-23 14:15:41 +01:00
Anton Rubets c514a4e451 chore(helm): Add prowler helm support (#6580)
Co-authored-by: MrCloudSec <hello@mistercloudsec.com>
2025-01-23 14:15:33 +01:00
Prowler Bot d841bd6890 chore(regions_update): Changes in regions for AWS services (#6652)
Co-authored-by: MrCloudSec <38561120+MrCloudSec@users.noreply.github.com>
2025-01-23 14:15:24 +01:00
dependabot[bot] ff54e10ab2 chore(deps): bump boto3 from 1.35.94 to 1.35.99 (#6651)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-23 14:15:03 +01:00
Pepe Fagoaga 718e562741 chore(pre-commit): poetry checks for API and SDK (#6658) 2025-01-23 14:14:10 +01:00
Pablo Lara ce05e2a939 feat(providers): show the cloud formation and terraform template links on the form (#6660) 2025-01-23 14:13:37 +01:00
Pablo Lara 90831d3084 feat(providers): make external id field mandatory in the aws role secret form (#6656) 2025-01-23 14:13:30 +01:00
dependabot[bot] 2c928dead5 chore(deps-dev): bump moto from 5.0.16 to 5.0.27 (#6632)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: MrCloudSec <hello@mistercloudsec.com>
2025-01-23 14:13:23 +01:00
dependabot[bot] 3ffe147664 chore(deps): bump botocore from 1.35.94 to 1.35.99 (#6520)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: MrCloudSec <hello@mistercloudsec.com>
2025-01-23 14:13:04 +01:00
dependabot[bot] 3373b0f47c chore(deps-dev): bump mkdocs-material from 9.5.49 to 9.5.50 (#6631)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-23 14:12:52 +01:00
Prowler Bot b29db04560 chore(regions_update): Changes in regions for AWS services (#6599)
Co-authored-by: MrCloudSec <38561120+MrCloudSec@users.noreply.github.com>
2025-01-23 14:12:38 +01:00
Hugo Pereira Brito f964a0362e chore(services): delete all comment headers (#6585) 2025-01-23 14:11:58 +01:00
Prowler Bot 537b23dfae chore(regions_update): Changes in regions for AWS services (#6577)
Co-authored-by: MrCloudSec <38561120+MrCloudSec@users.noreply.github.com>
2025-01-23 14:11:46 +01:00
Pablo Lara 48cf3528b4 feat(findings): add first seen in findings details (#6575) 2025-01-23 14:11:08 +01:00
dependabot[bot] 3c4b9d32c9 chore(deps): bump msgraph-sdk from 1.16.0 to 1.17.0 (#6547)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-23 14:09:46 +01:00
Víctor Fernández Poyatos fd8361ae2c feat(db): Update Django DB manager to use psycopg3 and connection pooling (#6541)
# Conflicts:
#	api/poetry.lock
#	api/pyproject.toml
2025-01-23 14:07:34 +01:00
Prowler Bot 33cadaa932 chore(regions_update): Changes in regions for AWS services (#6526)
Co-authored-by: MrCloudSec <38561120+MrCloudSec@users.noreply.github.com>
2025-01-23 14:02:14 +01:00
dependabot[bot] cc126eb8b4 chore(deps): bump google-api-python-client from 2.158.0 to 2.159.0 (#6521)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-23 14:02:07 +01:00
Pedro Martín c996b3f6aa docs(readme): update pr template to add check for readme (#6531) 2025-01-23 14:01:53 +01:00
Adrián Jesús Peña Rodríguez e2b406a300 feat(finding): add first_seen attribute (#6460) 2025-01-23 14:01:46 +01:00
dependabot[bot] c2c69da603 chore(deps): bump django from 5.1.4 to 5.1.5 in /api (#6519)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
# Conflicts:
#	api/poetry.lock
2025-01-23 14:01:33 +01:00
Adrián Jesús Peña Rodríguez 4e51348ff2 feat(provider-secret): make existing external_id field mandatory (#6510) 2025-01-23 14:00:42 +01:00
dependabot[bot] 3257b82706 chore(deps-dev): bump eslint-config-prettier from 9.1.0 to 10.0.1 in /ui (#6518)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-23 13:59:26 +01:00
Pepe Fagoaga c98d764daa chore(version): set next minor (#6511)
# Conflicts:
#	prowler/config/config.py
#	pyproject.toml
2025-01-23 13:57:20 +01:00
Prowler Bot 0448429a9f chore(regions_update): Changes in regions for AWS services (#6495)
Co-authored-by: MrCloudSec <38561120+MrCloudSec@users.noreply.github.com>
Co-authored-by: MrCloudSec <hello@mistercloudsec.com>
2025-01-23 13:56:43 +01:00
Pepe Fagoaga b948ac6125 feat(prowler-role): Add templates to deploy it in AWS (#6499) 2025-01-23 13:56:28 +01:00
dependabot[bot] 3acc09ea16 chore(deps): bump jinja2 from 3.1.4 to 3.1.5 (#6457)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-23 13:56:09 +01:00
dependabot[bot] 82d0d0de9d chore(deps-dev): bump bandit from 1.8.0 to 1.8.2 (#6485)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-23 13:56:00 +01:00
Prowler Bot f9cfc4d087 fix: add detector and line number of potential secret (#6663)
Co-authored-by: Kay Agahd <kagahd@users.noreply.github.com>
2025-01-22 10:55:49 -05:00
Pepe Fagoaga 7d68ff455b chore(api): Use prowler from v5.1 (#6659) 2025-01-22 20:04:55 +05:45
Pepe Fagoaga ddf4881971 chore(version): Update version to 5.1.6 (#6645) 2025-01-21 13:28:44 -05:00
Prowler Bot 9ad4944142 fix(filters): fix dynamic filters (#6643)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-21 13:40:18 +01:00
Prowler Bot 7f33ea76a4 fix(OCSF): fix OCSF output when timestamp is UNIX format (#6627)
Co-authored-by: Rubén De la Torre Vico <rubendltv22@gmail.com>
2025-01-20 18:03:51 -05:00
Prowler Bot 1140c29384 fix(aws): list tags for DocumentDB clusters (#6622)
Co-authored-by: Kay Agahd <kagahd@users.noreply.github.com>
2025-01-20 17:22:06 -05:00
Prowler Bot 2441a62f39 fix: update Azure CIS with existing App checks (#6625)
Co-authored-by: Rubén De la Torre Vico <rubendltv22@gmail.com>
2025-01-20 16:27:14 -05:00
Pepe Fagoaga c26a231fc1 chore(version): Update version to 5.1.5 (#6618) 2025-01-20 15:07:58 -05:00
Prowler Bot 2fb2315037 chore(RBAC): add permission's info (#6617)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-20 17:31:52 +01:00
Prowler Bot a9e475481a fix(snippet-id): improve provider ID readability in tables (#6616)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-20 17:28:04 +01:00
Prowler Bot 826d7c4dc3 fix(rbac): remove invalid required permission (#6614)
Co-authored-by: Adrián Jesús Peña Rodríguez <adrianjpr@gmail.com>
2025-01-20 17:02:31 +01:00
Prowler Bot b7f4b37f66 feat(api): restrict the deletion of users, only the user of the request can be deleted (#6613)
Co-authored-by: Adrián Jesús Peña Rodríguez <adrianjpr@gmail.com>
2025-01-20 17:02:21 +01:00
Prowler Bot 193d691bfe fix(RBAC): tweaks for edit role form (#6610)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-20 14:12:43 +01:00
Prowler Bot a359bc581c fix(RBAC): restore manage_account permission for roles (#6603)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-20 11:53:08 +01:00
Prowler Bot 9a28ff025a fix(sqs): fix flaky test (#6595)
Co-authored-by: Daniel Barranquero <74871504+danibarranqueroo@users.noreply.github.com>
2025-01-17 12:40:11 -05:00
Prowler Bot f1c7050700 fix(apigatewayv2): managed exception NotFoundException (#6590)
Co-authored-by: Hugo Pereira Brito <101209179+HugoPBrito@users.noreply.github.com>
2025-01-17 09:27:19 -05:00
Pepe Fagoaga 9391c27b9e chore(version): Update version to 5.1.4 (#6591) 2025-01-17 09:25:35 -05:00
Prowler Bot 4c54de092f feat(findings): Add resource_tag filters for findings endpoint (#6587)
Co-authored-by: Víctor Fernández Poyatos <victor@prowler.com>
2025-01-17 19:01:51 +05:45
Prowler Bot 690c482a43 fix(gcp): fix flaky tests from dns service (#6571)
Co-authored-by: Daniel Barranquero <74871504+danibarranqueroo@users.noreply.github.com>
2025-01-17 08:15:34 -05:00
Prowler Bot ad2d857c6f feat(findings): add /findings/metadata to retrieve dynamic filters information (#6586)
Co-authored-by: Víctor Fernández Poyatos <victor@prowler.com>
2025-01-17 18:47:59 +05:45
Pepe Fagoaga 07ee59d2ef chore(version): Update version to 5.1.3 (#6584) 2025-01-17 18:46:08 +05:45
Prowler Bot bec4617d0a fix(providers): update the label and placeholder based on the cloud provider (#6582)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-17 12:33:25 +01:00
Prowler Bot 94916f8305 fix(findings): remove filter delta_in applied by default (#6579)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-17 11:06:07 +01:00
Prowler Bot 44de651be3 fix(cis): add subsections if needed (#6568)
Co-authored-by: Pedro Martín <pedromarting3@gmail.com>
2025-01-16 14:49:49 -05:00
Prowler Bot bdcba9c642 fix(detect_secrets): refactor logic for detect-secrets (#6566)
Co-authored-by: Pedro Martín <pedromarting3@gmail.com>
2025-01-16 13:07:18 -05:00
Prowler Bot c172f75f1a fix(dep): address compatibility issues (#6557)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-16 14:35:06 +01:00
Prowler Bot ec492fa13a feat(filters): add resource type filter for findings (#6525)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-15 08:43:49 +01:00
Prowler Bot 702659959c fix(Azure TDE): add filter for master DB (#6514)
Co-authored-by: johannes-engler-mw <132657752+johannes-engler-mw@users.noreply.github.com>
2025-01-14 15:25:27 -05:00
Pepe Fagoaga fef332a591 chore(version): set next fixes 2025-01-14 18:05:04 +01:00
1332 changed files with 24562 additions and 13692 deletions
+4
View File
@@ -92,3 +92,7 @@ jQIDAQAB
# openssl rand -base64 32
DJANGO_SECRETS_ENCRYPTION_KEY="oE/ltOhp/n1TdbHjVmzcjDPLcLA41CVI/4Rk+UB5ESc="
DJANGO_BROKER_VISIBILITY_TIMEOUT=86400
DJANGO_DB_CONNECTION_POOL_MIN_SIZE=4
DJANGO_DB_CONNECTION_POOL_MAX_SIZE=10
DJANGO_DB_CONNECTION_POOL_MAX_IDLE=36000
DJANGO_DB_CONNECTION_POOL_MAX_LIFETIME=86400
+2 -1
View File
@@ -15,7 +15,8 @@ Please include a summary of the change and which issue is fixed. List any depend
- [ ] Review if the code is being covered by tests.
- [ ] Review if code is being documented following this specification https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings
- [ ] Review if backport is needed.
- [ ] Review if is needed to change the [Readme.md](https://github.com/prowler-cloud/prowler/blob/master/README.md)
### License
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
+1
View File
@@ -45,6 +45,7 @@ junit-reports/
# Terraform
.terraform*
*.tfstate
*.tfstate.*
# .env
ui/.env*
+19 -1
View File
@@ -27,6 +27,7 @@ repos:
hooks:
- id: shellcheck
exclude: contrib
## PYTHON
- repo: https://github.com/myint/autoflake
rev: v2.3.1
@@ -61,8 +62,25 @@ repos:
rev: 1.8.0
hooks:
- id: poetry-check
name: API - poetry-check
args: ["--directory=./api"]
pass_filenames: false
- id: poetry-lock
args: ["--no-update"]
name: API - poetry-lock
args: ["--no-update", "--directory=./api"]
pass_filenames: false
- id: poetry-check
name: SDK - poetry-check
args: ["--directory=./"]
pass_filenames: false
- id: poetry-lock
name: SDK - poetry-lock
args: ["--no-update", "--directory=./"]
pass_filenames: false
- repo: https://github.com/hadolint/hadolint
rev: v2.13.0-beta
+2 -2
View File
@@ -73,8 +73,8 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
|---|---|---|---|---|
| AWS | 561 | 81 -> `prowler aws --list-services` | 30 -> `prowler aws --list-compliance` | 9 -> `prowler aws --list-categories` |
| GCP | 77 | 13 -> `prowler gcp --list-services` | 4 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
| Azure | 139 | 18 -> `prowler azure --list-services` | 4 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | 1 -> `prowler kubernetes --list-compliance` | 7 -> `prowler kubernetes --list-categories` |
| Azure | 139 | 18 -> `prowler azure --list-services` | 5 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | 2 -> `prowler kubernetes --list-compliance` | 7 -> `prowler kubernetes --list-categories` |
# 💻 Installation
+4
View File
@@ -23,6 +23,10 @@ DJANGO_SECRETS_ENCRYPTION_KEY=""
DJANGO_MANAGE_DB_PARTITIONS=[True|False]
DJANGO_CELERY_DEADLOCK_ATTEMPTS=5
DJANGO_BROKER_VISIBILITY_TIMEOUT=86400
DJANGO_DB_CONNECTION_POOL_MIN_SIZE=4
DJANGO_DB_CONNECTION_POOL_MAX_SIZE=10
DJANGO_DB_CONNECTION_POOL_MAX_IDLE=36000
DJANGO_DB_CONNECTION_POOL_MAX_LIFETIME=86400
# PostgreSQL settings
# If running django and celery on host, use 'localhost', else use 'postgres-db'
+157 -127
View File
@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "about-time"
@@ -362,13 +362,13 @@ isodate = ">=0.6.1,<1.0.0"
[[package]]
name = "azure-mgmt-compute"
version = "33.0.0"
version = "33.1.0"
description = "Microsoft Azure Compute Management Client Library for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "azure-mgmt-compute-33.0.0.tar.gz", hash = "sha256:a3cc0fe4f09c8e1d3523c1bfb92620dfe263a0a893b0ac13a33d7057e9ddddd2"},
{file = "azure_mgmt_compute-33.0.0-py3-none-any.whl", hash = "sha256:155f8d78a1fdedcea1725fd12b85b2d87fbcb6b53f8e77451c644f45701e3bcf"},
{file = "azure_mgmt_compute-33.1.0-py3-none-any.whl", hash = "sha256:9b119a1b7f621aee951074ef110b16d545d7b45c9cfb303becf04210bd772c91"},
{file = "azure_mgmt_compute-33.1.0.tar.gz", hash = "sha256:f5a5e18a5a7a0354562bbfa589b5db4aaf1e8ac3a194a2a910db55900d2535e9"},
]
[package.dependencies]
@@ -476,13 +476,13 @@ isodate = ">=0.6.1,<1.0.0"
[[package]]
name = "azure-mgmt-network"
version = "28.0.0"
version = "28.1.0"
description = "Microsoft Azure Network Management Client Library for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "azure_mgmt_network-28.0.0-py3-none-any.whl", hash = "sha256:2ee23c1f2ba75752187bd7f4c3e94ad172282cbf8153694feadc7886ef88493c"},
{file = "azure_mgmt_network-28.0.0.tar.gz", hash = "sha256:40356d348ef4838324f19a41cd80340b4f8dd4ac2f0a18a4cbd5cc95ef2974f3"},
{file = "azure_mgmt_network-28.1.0-py3-none-any.whl", hash = "sha256:8ddb0e9ec8f10c9c152d60fc945908d113e4591f397ea3e40b92290ec2b01658"},
{file = "azure_mgmt_network-28.1.0.tar.gz", hash = "sha256:8c84bffb5ec75c6e0244e58ecf07c00d5fc421d616b0cb369c6fe585af33cf87"},
]
[package.dependencies]
@@ -689,17 +689,17 @@ files = [
[[package]]
name = "boto3"
version = "1.35.71"
version = "1.35.94"
description = "The AWS SDK for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "boto3-1.35.71-py3-none-any.whl", hash = "sha256:e2969a246bb3208122b3c349c49cc6604c6fc3fc2b2f65d99d3e8ccd745b0c16"},
{file = "boto3-1.35.71.tar.gz", hash = "sha256:3ed7172b3d4fceb6218bb0ec3668c4d40c03690939c2fca4f22bb875d741a07f"},
{file = "boto3-1.35.94-py3-none-any.whl", hash = "sha256:516c514fb447d6f216833d06a0781c003fcf43099a4ca2f5a363a8afe0942070"},
{file = "boto3-1.35.94.tar.gz", hash = "sha256:5aa606239f0fe0dca0506e0ad6bbe4c589048e7e6c2486cee5ec22b6aa7ec2f8"},
]
[package.dependencies]
botocore = ">=1.35.71,<1.36.0"
botocore = ">=1.35.94,<1.36.0"
jmespath = ">=0.7.1,<2.0.0"
s3transfer = ">=0.10.0,<0.11.0"
@@ -708,13 +708,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
[[package]]
name = "botocore"
version = "1.35.71"
version = "1.35.94"
description = "Low-level, data-driven core of boto 3."
optional = false
python-versions = ">=3.8"
files = [
{file = "botocore-1.35.71-py3-none-any.whl", hash = "sha256:fc46e7ab1df3cef66dfba1633f4da77c75e07365b36f03bd64a3793634be8fc1"},
{file = "botocore-1.35.71.tar.gz", hash = "sha256:f9fa058e0393660c3fe53c1e044751beb64b586def0bd2212448a7c328b0cbba"},
{file = "botocore-1.35.94-py3-none-any.whl", hash = "sha256:d784d944865d8279c79d2301fc09ac28b5221d4e7328fb4e23c642c253b9932c"},
{file = "botocore-1.35.94.tar.gz", hash = "sha256:2b3309b356541faa4d88bb957dcac1d8004aa44953c0b7d4521a6cc5d3d5d6ba"},
]
[package.dependencies]
@@ -1369,13 +1369,13 @@ profile = ["gprof2dot (>=2022.7.29)"]
[[package]]
name = "django"
version = "5.1.4"
version = "5.1.5"
description = "A high-level Python web framework that encourages rapid development and clean, pragmatic design."
optional = false
python-versions = ">=3.10"
files = [
{file = "Django-5.1.4-py3-none-any.whl", hash = "sha256:236e023f021f5ce7dee5779de7b286565fdea5f4ab86bae5338e3f7b69896cf0"},
{file = "Django-5.1.4.tar.gz", hash = "sha256:de450c09e91879fa5a307f696e57c851955c910a438a35e6b4c895e86bedc82a"},
{file = "Django-5.1.5-py3-none-any.whl", hash = "sha256:c46eb936111fffe6ec4bc9930035524a8be98ec2f74d8a0ff351226a3e52f459"},
{file = "Django-5.1.5.tar.gz", hash = "sha256:19bbca786df50b9eca23cee79d495facf55c8f5c54c529d9bf1fe7b5ea086af3"},
]
[package.dependencies]
@@ -1925,13 +1925,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
[[package]]
name = "google-api-python-client"
version = "2.154.0"
version = "2.158.0"
description = "Google API Client Library for Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "google_api_python_client-2.154.0-py2.py3-none-any.whl", hash = "sha256:a521bbbb2ec0ba9d6f307cdd64ed6e21eeac372d1bd7493a4ab5022941f784ad"},
{file = "google_api_python_client-2.154.0.tar.gz", hash = "sha256:1b420062e03bfcaa1c79e2e00a612d29a6a934151ceb3d272fe150a656dc8f17"},
{file = "google_api_python_client-2.158.0-py2.py3-none-any.whl", hash = "sha256:36f8c8d2e79e50f76790ca5946d2f3f8333e210dc8539a6c88e0742416474ad2"},
{file = "google_api_python_client-2.158.0.tar.gz", hash = "sha256:b6664597a9955e04977a62752e33fe44cb35c580e190c1cb08a041893172bd67"},
]
[package.dependencies]
@@ -2495,13 +2495,13 @@ files = [
[[package]]
name = "microsoft-kiota-abstractions"
version = "1.6.2"
version = "1.6.8"
description = "Core abstractions for kiota generated libraries in Python"
optional = false
python-versions = "<4.0,>=3.8"
files = [
{file = "microsoft_kiota_abstractions-1.6.2-py3-none-any.whl", hash = "sha256:8c2c777748e80f17dba3809b5d149585d9918198f0f94125e87432f7165ba80e"},
{file = "microsoft_kiota_abstractions-1.6.2.tar.gz", hash = "sha256:dec30f0fb427a051003e94b5c6fcf266f4702ecbd9d6961e3966124b9cbe41bf"},
{file = "microsoft_kiota_abstractions-1.6.8-py3-none-any.whl", hash = "sha256:12819dee24d5aaa31e99683d938f65e50cbc446de087df244cd26c3326ec4e15"},
{file = "microsoft_kiota_abstractions-1.6.8.tar.gz", hash = "sha256:7070affabfa7182841646a0c8491cbb240af366aff2b9132f0caa45c4837dd78"},
]
[package.dependencies]
@@ -2661,13 +2661,13 @@ dev = ["bumpver", "isort", "mypy", "pylint", "pytest", "yapf"]
[[package]]
name = "msgraph-sdk"
version = "1.12.0"
version = "1.16.0"
description = "The Microsoft Graph Python SDK"
optional = false
python-versions = ">=3.8"
files = [
{file = "msgraph_sdk-1.12.0-py3-none-any.whl", hash = "sha256:ac298b546b240391b0e407379d039db32862a56d6fe15cf7c5f7e77631fc6771"},
{file = "msgraph_sdk-1.12.0.tar.gz", hash = "sha256:fbb5a8a9f6eed89b496f207eb35b6b4cfc7fefa75608aeef07477a3b2276d4fa"},
{file = "msgraph_sdk-1.16.0-py3-none-any.whl", hash = "sha256:1dd26ece74c43167818e2ff58b062180233ce7187ad2a061057af1195395c56c"},
{file = "msgraph_sdk-1.16.0.tar.gz", hash = "sha256:980d19617d8d8b20545ef77fa5629fef768ce4ea1f2d1a124c5a9dd88d77940c"},
]
[package.dependencies]
@@ -3405,14 +3405,12 @@ files = [
[[package]]
name = "prowler"
version = "5.0.2"
version = "5.1.6"
description = "Prowler is an Open Source security tool to perform AWS, GCP and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness. It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks."
optional = false
python-versions = "<3.13,>=3.9"
files = [
{file = "prowler-5.0.2-py3-none-any.whl", hash = "sha256:752c23fcba0ef1f6b2e14557683a9f1040de278bf525020471b28397e06e477c"},
{file = "prowler-5.0.2.tar.gz", hash = "sha256:32f6fcaaa6921715bda38e19b0782f07f717a7f02b7065f7ea08e83216bcf39f"},
]
python-versions = ">=3.9,<3.13"
files = []
develop = false
[package.dependencies]
alive-progress = "3.2.0"
@@ -3421,13 +3419,13 @@ azure-identity = "1.19.0"
azure-keyvault-keys = "4.10.0"
azure-mgmt-applicationinsights = "4.0.0"
azure-mgmt-authorization = "4.0.0"
azure-mgmt-compute = "33.0.0"
azure-mgmt-compute = "33.1.0"
azure-mgmt-containerregistry = "10.3.0"
azure-mgmt-containerservice = "33.0.0"
azure-mgmt-cosmosdb = "9.7.0"
azure-mgmt-keyvault = "10.3.1"
azure-mgmt-monitor = "6.0.2"
azure-mgmt-network = "28.0.0"
azure-mgmt-network = "28.1.0"
azure-mgmt-rdbms = "10.1.0"
azure-mgmt-resource = "23.2.0"
azure-mgmt-search = "9.1.0"
@@ -3437,31 +3435,37 @@ azure-mgmt-storage = "21.2.1"
azure-mgmt-subscription = "3.1.1"
azure-mgmt-web = "7.3.1"
azure-storage-blob = "12.24.0"
boto3 = "1.35.71"
botocore = "1.35.71"
boto3 = "1.35.94"
botocore = "1.35.94"
colorama = "0.4.6"
cryptography = "43.0.1"
dash = "2.18.2"
dash-bootstrap-components = "1.6.0"
detect-secrets = "1.5.0"
google-api-python-client = "2.154.0"
google-api-python-client = "2.158.0"
google-auth-httplib2 = ">=0.1,<0.3"
jsonschema = "4.23.0"
kubernetes = "31.0.0"
microsoft-kiota-abstractions = "1.6.2"
msgraph-sdk = "1.12.0"
microsoft-kiota-abstractions = "1.6.8"
msgraph-sdk = "1.16.0"
numpy = "2.0.2"
pandas = "2.2.3"
py-ocsf-models = "0.2.0"
pydantic = "1.10.18"
python-dateutil = ">=2.9.0.post0,<3.0.0"
python-dateutil = "^2.9.0.post0"
pytz = "2024.2"
schema = "0.7.7"
shodan = "1.31.0"
slack-sdk = "3.33.4"
slack-sdk = "3.34.0"
tabulate = "0.9.0"
tzlocal = "5.2"
[package.source]
type = "git"
url = "https://github.com/prowler-cloud/prowler.git"
reference = "v5.1"
resolved_reference = "ddf4881971a5e1bafa244429937e19403b3f0268"
[[package]]
name = "psutil"
version = "6.0.0"
@@ -3492,86 +3496,117 @@ files = [
test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
[[package]]
name = "psycopg2-binary"
version = "2.9.9"
description = "psycopg2 - Python-PostgreSQL Database Adapter"
name = "psycopg"
version = "3.2.3"
description = "PostgreSQL database adapter for Python"
optional = false
python-versions = ">=3.7"
python-versions = ">=3.8"
files = [
{file = "psycopg2-binary-2.9.9.tar.gz", hash = "sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-win32.whl", hash = "sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682"},
{file = "psycopg2_binary-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-win32.whl", hash = "sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5"},
{file = "psycopg2_binary-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-win32.whl", hash = "sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90"},
{file = "psycopg2_binary-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957"},
{file = "psycopg-3.2.3-py3-none-any.whl", hash = "sha256:644d3973fe26908c73d4be746074f6e5224b03c1101d302d9a53bf565ad64907"},
{file = "psycopg-3.2.3.tar.gz", hash = "sha256:a5764f67c27bec8bfac85764d23c534af2c27b893550377e37ce59c12aac47a2"},
]
[package.dependencies]
psycopg-binary = {version = "3.2.3", optional = true, markers = "implementation_name != \"pypy\" and extra == \"binary\""}
psycopg-pool = {version = "*", optional = true, markers = "extra == \"pool\""}
typing-extensions = {version = ">=4.6", markers = "python_version < \"3.13\""}
tzdata = {version = "*", markers = "sys_platform == \"win32\""}
[package.extras]
binary = ["psycopg-binary (==3.2.3)"]
c = ["psycopg-c (==3.2.3)"]
dev = ["ast-comments (>=1.1.2)", "black (>=24.1.0)", "codespell (>=2.2)", "dnspython (>=2.1)", "flake8 (>=4.0)", "mypy (>=1.11)", "types-setuptools (>=57.4)", "wheel (>=0.37)"]
docs = ["Sphinx (>=5.0)", "furo (==2022.6.21)", "sphinx-autobuild (>=2021.3.14)", "sphinx-autodoc-typehints (>=1.12)"]
pool = ["psycopg-pool"]
test = ["anyio (>=4.0)", "mypy (>=1.11)", "pproxy (>=2.7)", "pytest (>=6.2.5)", "pytest-cov (>=3.0)", "pytest-randomly (>=3.5)"]
[[package]]
name = "psycopg-binary"
version = "3.2.3"
description = "PostgreSQL database adapter for Python -- C optimisation distribution"
optional = false
python-versions = ">=3.8"
files = [
{file = "psycopg_binary-3.2.3-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:965455eac8547f32b3181d5ec9ad8b9be500c10fe06193543efaaebe3e4ce70c"},
{file = "psycopg_binary-3.2.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:71adcc8bc80a65b776510bc39992edf942ace35b153ed7a9c6c573a6849ce308"},
{file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f73adc05452fb85e7a12ed3f69c81540a8875960739082e6ea5e28c373a30774"},
{file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8630943143c6d6ca9aefc88bbe5e76c90553f4e1a3b2dc339e67dc34aa86f7e"},
{file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bffb61e198a91f712cc3d7f2d176a697cb05b284b2ad150fb8edb308eba9002"},
{file = "psycopg_binary-3.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc4fa2240c9fceddaa815a58f29212826fafe43ce80ff666d38c4a03fb036955"},
{file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:192a5f8496e6e1243fdd9ac20e117e667c0712f148c5f9343483b84435854c78"},
{file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64dc6e9ec64f592f19dc01a784e87267a64a743d34f68488924251253da3c818"},
{file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:79498df398970abcee3d326edd1d4655de7d77aa9aecd578154f8af35ce7bbd2"},
{file = "psycopg_binary-3.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:949551752930d5e478817e0b49956350d866b26578ced0042a61967e3fcccdea"},
{file = "psycopg_binary-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:80a2337e2dfb26950894c8301358961430a0304f7bfe729d34cc036474e9c9b1"},
{file = "psycopg_binary-3.2.3-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:6d8f2144e0d5808c2e2aed40fbebe13869cd00c2ae745aca4b3b16a435edb056"},
{file = "psycopg_binary-3.2.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:94253be2b57ef2fea7ffe08996067aabf56a1eb9648342c9e3bad9e10c46e045"},
{file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fda0162b0dbfa5eaed6cdc708179fa27e148cb8490c7d62e5cf30713909658ea"},
{file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c0419cdad8c70eaeb3116bb28e7b42d546f91baf5179d7556f230d40942dc78"},
{file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74fbf5dd3ef09beafd3557631e282f00f8af4e7a78fbfce8ab06d9cd5a789aae"},
{file = "psycopg_binary-3.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d784f614e4d53050cbe8abf2ae9d1aaacf8ed31ce57b42ce3bf2a48a66c3a5c"},
{file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4e76ce2475ed4885fe13b8254058be710ec0de74ebd8ef8224cf44a9a3358e5f"},
{file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5938b257b04c851c2d1e6cb2f8c18318f06017f35be9a5fe761ee1e2e344dfb7"},
{file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:257c4aea6f70a9aef39b2a77d0658a41bf05c243e2bf41895eb02220ac6306f3"},
{file = "psycopg_binary-3.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:06b5cc915e57621eebf2393f4173793ed7e3387295f07fed93ed3fb6a6ccf585"},
{file = "psycopg_binary-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:09baa041856b35598d335b1a74e19a49da8500acedf78164600694c0ba8ce21b"},
{file = "psycopg_binary-3.2.3-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:48f8ca6ee8939bab760225b2ab82934d54330eec10afe4394a92d3f2a0c37dd6"},
{file = "psycopg_binary-3.2.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:5361ea13c241d4f0ec3f95e0bf976c15e2e451e9cc7ef2e5ccfc9d170b197a40"},
{file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb987f14af7da7c24f803111dbc7392f5070fd350146af3345103f76ea82e339"},
{file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0463a11b1cace5a6aeffaf167920707b912b8986a9c7920341c75e3686277920"},
{file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b7be9a6c06518967b641fb15032b1ed682fd3b0443f64078899c61034a0bca6"},
{file = "psycopg_binary-3.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64a607e630d9f4b2797f641884e52b9f8e239d35943f51bef817a384ec1678fe"},
{file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fa33ead69ed133210d96af0c63448b1385df48b9c0247eda735c5896b9e6dbbf"},
{file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:1f8b0d0e99d8e19923e6e07379fa00570be5182c201a8c0b5aaa9a4d4a4ea20b"},
{file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:709447bd7203b0b2debab1acec23123eb80b386f6c29e7604a5d4326a11e5bd6"},
{file = "psycopg_binary-3.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5e37d5027e297a627da3551a1e962316d0f88ee4ada74c768f6c9234e26346d9"},
{file = "psycopg_binary-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:261f0031ee6074765096a19b27ed0f75498a8338c3dcd7f4f0d831e38adf12d1"},
{file = "psycopg_binary-3.2.3-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:41fdec0182efac66b27478ac15ef54c9ebcecf0e26ed467eb7d6f262a913318b"},
{file = "psycopg_binary-3.2.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:07d019a786eb020c0f984691aa1b994cb79430061065a694cf6f94056c603d26"},
{file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c57615791a337378fe5381143259a6c432cdcbb1d3e6428bfb7ce59fff3fb5c"},
{file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8eb9a4e394926b93ad919cad1b0a918e9b4c846609e8c1cfb6b743683f64da0"},
{file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5905729668ef1418bd36fbe876322dcb0f90b46811bba96d505af89e6fbdce2f"},
{file = "psycopg_binary-3.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd65774ed7d65101b314808b6893e1a75b7664f680c3ef18d2e5c84d570fa393"},
{file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:700679c02f9348a0d0a2adcd33a0275717cd0d0aee9d4482b47d935023629505"},
{file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:96334bb64d054e36fed346c50c4190bad9d7c586376204f50bede21a913bf942"},
{file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9099e443d4cc24ac6872e6a05f93205ba1a231b1a8917317b07c9ef2b955f1f4"},
{file = "psycopg_binary-3.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1985ab05e9abebfbdf3163a16ebb37fbc5d49aff2bf5b3d7375ff0920bbb54cd"},
{file = "psycopg_binary-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:e90352d7b610b4693fad0feea48549d4315d10f1eba5605421c92bb834e90170"},
{file = "psycopg_binary-3.2.3-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:69320f05de8cdf4077ecd7fefdec223890eea232af0d58f2530cbda2871244a0"},
{file = "psycopg_binary-3.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4926ea5c46da30bec4a85907aa3f7e4ea6313145b2aa9469fdb861798daf1502"},
{file = "psycopg_binary-3.2.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c64c4cd0d50d5b2288ab1bcb26c7126c772bbdebdfadcd77225a77df01c4a57e"},
{file = "psycopg_binary-3.2.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05a1bdce30356e70a05428928717765f4a9229999421013f41338d9680d03a63"},
{file = "psycopg_binary-3.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ad357e426b0ea5c3043b8ec905546fa44b734bf11d33b3da3959f6e4447d350"},
{file = "psycopg_binary-3.2.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:967b47a0fd237aa17c2748fdb7425015c394a6fb57cdad1562e46a6eb070f96d"},
{file = "psycopg_binary-3.2.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:71db8896b942770ed7ab4efa59b22eee5203be2dfdee3c5258d60e57605d688c"},
{file = "psycopg_binary-3.2.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2773f850a778575dd7158a6dd072f7925b67f3ba305e2003538e8831fec77a1d"},
{file = "psycopg_binary-3.2.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aeddf7b3b3f6e24ccf7d0edfe2d94094ea76b40e831c16eff5230e040ce3b76b"},
{file = "psycopg_binary-3.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:824c867a38521d61d62b60aca7db7ca013a2b479e428a0db47d25d8ca5067410"},
{file = "psycopg_binary-3.2.3-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:9994f7db390c17fc2bd4c09dca722fd792ff8a49bb3bdace0c50a83f22f1767d"},
{file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1303bf8347d6be7ad26d1362af2c38b3a90b8293e8d56244296488ee8591058e"},
{file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:842da42a63ecb32612bb7f5b9e9f8617eab9bc23bd58679a441f4150fcc51c96"},
{file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2bb342a01c76f38a12432848e6013c57eb630103e7556cf79b705b53814c3949"},
{file = "psycopg_binary-3.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd40af959173ea0d087b6b232b855cfeaa6738f47cb2a0fd10a7f4fa8b74293f"},
{file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9b60b465773a52c7d4705b0a751f7f1cdccf81dd12aee3b921b31a6e76b07b0e"},
{file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fc6d87a1c44df8d493ef44988a3ded751e284e02cdf785f746c2d357e99782a6"},
{file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f0b018e37608c3bfc6039a1dc4eb461e89334465a19916be0153c757a78ea426"},
{file = "psycopg_binary-3.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a29f5294b0b6360bfda69653697eff70aaf2908f58d1073b0acd6f6ab5b5a4f"},
{file = "psycopg_binary-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:e56b1fd529e5dde2d1452a7d72907b37ed1b4f07fdced5d8fb1e963acfff6749"},
]
[[package]]
name = "psycopg-pool"
version = "3.2.4"
description = "Connection Pool for Psycopg"
optional = false
python-versions = ">=3.8"
files = [
{file = "psycopg_pool-3.2.4-py3-none-any.whl", hash = "sha256:f6a22cff0f21f06d72fb2f5cb48c618946777c49385358e0c88d062c59cbd224"},
{file = "psycopg_pool-3.2.4.tar.gz", hash = "sha256:61774b5bbf23e8d22bedc7504707135aaf744679f8ef9b3fe29942920746a6ed"},
]
[package.dependencies]
typing-extensions = ">=4.6"
[[package]]
name = "py-ocsf-models"
version = "0.2.0"
@@ -4370,7 +4405,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da"},
{file = "ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6"},
@@ -4379,7 +4413,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4"},
{file = "ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632"},
@@ -4388,7 +4421,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5"},
{file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a"},
@@ -4397,7 +4429,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6"},
{file = "ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fc4b630cd3fa2cf7fce38afa91d7cfe844a9f75d7f0f36393fa98815e911d987"},
@@ -4406,7 +4437,6 @@ files = [
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2f1c3765db32be59d18ab3953f43ab62a761327aafc1594a2a1fbe038b8b8a7"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d85252669dc32f98ebcd5d36768f5d4faeaeaa2d655ac0473be490ecdae3c285"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e143ada795c341b56de9418c58d028989093ee611aa27ffb9b7f609c00d813ed"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2c59aa6170b990d8d2719323e628aaf36f3bfbc1c26279c0eeeb24d05d2d11c7"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win32.whl", hash = "sha256:beffaed67936fbbeffd10966a4eb53c402fafd3d6833770516bf7314bc6ffa12"},
{file = "ruamel.yaml.clib-0.2.12-cp39-cp39-win_amd64.whl", hash = "sha256:040ae85536960525ea62868b642bdb0c2cc6021c9f9d507810c0c604e66f5a7b"},
{file = "ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f"},
@@ -4582,17 +4612,17 @@ files = [
[[package]]
name = "slack-sdk"
version = "3.33.4"
version = "3.34.0"
description = "The Slack API Platform SDK for Python"
optional = false
python-versions = ">=3.6"
files = [
{file = "slack_sdk-3.33.4-py2.py3-none-any.whl", hash = "sha256:9f30cb3c9c07b441c49d53fc27f9f1837ad1592a7e9d4ca431f53cdad8826cc6"},
{file = "slack_sdk-3.33.4.tar.gz", hash = "sha256:5e109847f6b6a22d227609226ba4ed936109dc00675bddeb7e0bee502d3ee7e0"},
{file = "slack_sdk-3.34.0-py2.py3-none-any.whl", hash = "sha256:c61f57f310d85be83466db5a98ab6ae3bb2e5587437b54fa0daa8fae6a0feffa"},
{file = "slack_sdk-3.34.0.tar.gz", hash = "sha256:ff61db7012160eed742285ea91f11c72b7a38a6500a7f6c5335662b4bc6b853d"},
]
[package.extras]
optional = ["SQLAlchemy (>=1.4,<3)", "aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "boto3 (<=2)", "websocket-client (>=1,<2)", "websockets (>=9.1,<14)"]
optional = ["SQLAlchemy (>=1.4,<3)", "aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "boto3 (<=2)", "websocket-client (>=1,<2)", "websockets (>=9.1,<15)"]
[[package]]
name = "sniffio"
@@ -5070,4 +5100,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.11,<3.13"
content-hash = "247311932c0e29596c7edad53f681df4265c1578deda47045c67c355cd4eb883"
content-hash = "952b86609d55795904bd3e85fdd309d251535cee7e864de12ecf6035924e7826"
+4 -4
View File
@@ -8,11 +8,11 @@ description = "Prowler's API (Django/DRF)"
license = "Apache-2.0"
name = "prowler-api"
package-mode = false
version = "1.1.0"
version = "1.3.0"
[tool.poetry.dependencies]
celery = {extras = ["pytest"], version = "^5.4.0"}
django = "5.1.4"
django = "5.1.5"
django-celery-beat = "^2.7.0"
django-celery-results = "^2.5.1"
django-cors-headers = "4.4.0"
@@ -27,8 +27,8 @@ drf-nested-routers = "^0.94.1"
drf-spectacular = "0.27.2"
drf-spectacular-jsonapi = "0.5.1"
gunicorn = "23.0.0"
prowler = "^5.0"
psycopg2-binary = "2.9.9"
prowler = {git = "https://github.com/prowler-cloud/prowler.git", branch = "v5.2"}
psycopg = {extras = ["pool", "binary"], version = "3.2.3"}
pytest-celery = {extras = ["redis"], version = "^1.0.1"}
# Needed for prowler compatibility
python = ">=3.11,<3.13"
+30 -31
View File
@@ -6,8 +6,10 @@ from datetime import datetime, timedelta, timezone
from django.conf import settings
from django.contrib.auth.models import BaseUserManager
from django.db import connection, models, transaction
from psycopg2 import connect as psycopg2_connect
from psycopg2.extensions import AsIs, new_type, register_adapter, register_type
from psycopg import connect as psycopg_connect
from psycopg.adapt import Dumper
from psycopg.types import TypeInfo
from psycopg.types.string import TextLoader
from rest_framework_json_api.serializers import ValidationError
DB_USER = settings.DATABASES["default"]["USER"] if not settings.TESTING else "test"
@@ -20,6 +22,7 @@ DB_PROWLER_USER = (
DB_PROWLER_PASSWORD = (
settings.DATABASES["prowler_user"]["PASSWORD"] if not settings.TESTING else "test"
)
TASK_RUNNER_DB_TABLE = "django_celery_results_taskresult"
POSTGRES_TENANT_VAR = "api.tenant_id"
POSTGRES_USER_VAR = "api.user_id"
@@ -29,21 +32,25 @@ SET_CONFIG_QUERY = "SELECT set_config(%s, %s::text, TRUE);"
@contextmanager
def psycopg_connection(database_alias: str):
psycopg2_connection = None
"""
Context manager returning a psycopg 3 connection
for the specified 'database_alias' in Django settings.
"""
pg_conn = None
try:
admin_db = settings.DATABASES[database_alias]
psycopg2_connection = psycopg2_connect(
pg_conn = psycopg_connect(
dbname=admin_db["NAME"],
user=admin_db["USER"],
password=admin_db["PASSWORD"],
host=admin_db["HOST"],
port=admin_db["PORT"],
)
yield psycopg2_connection
yield pg_conn
finally:
if psycopg2_connection is not None:
psycopg2_connection.close()
if pg_conn is not None:
pg_conn.close()
@contextmanager
@@ -59,7 +66,7 @@ def rls_transaction(value: str, parameter: str = POSTGRES_TENANT_VAR):
with transaction.atomic():
with connection.cursor() as cursor:
try:
# just in case the value is an UUID object
# Just in case the value is a UUID object
uuid.UUID(str(value))
except ValueError:
raise ValidationError("Must be a valid UUID")
@@ -187,32 +194,24 @@ class EnumType:
return self.value
def enum_adapter(enum_obj):
return AsIs(f"'{enum_obj.value}'::{enum_obj.__class__.enum_type_name}")
def register_enum(apps, schema_editor, enum_class):
"""
psycopg 3 approach: register a loader + dumper for the given enum_class,
so we can read/write the custom Postgres ENUM seamlessly.
"""
with psycopg_connection(schema_editor.connection.alias) as conn:
ti = TypeInfo.fetch(conn, enum_class.enum_type_name)
class EnumLoader(TextLoader):
def load(self, data):
return data
def get_enum_oid(connection, enum_type_name: str):
with connection.cursor() as cursor:
cursor.execute("SELECT oid FROM pg_type WHERE typname = %s;", (enum_type_name,))
result = cursor.fetchone()
if result is None:
raise ValueError(f"Enum type '{enum_type_name}' not found")
return result[0]
class EnumDumper(Dumper):
def dump(self, obj):
return f"'{obj.value}'::{obj.__class__.enum_type_name}"
def register_enum(apps, schema_editor, enum_class): # noqa: F841
with psycopg_connection(schema_editor.connection.alias) as connection:
enum_oid = get_enum_oid(connection, enum_class.enum_type_name)
enum_instance = new_type(
(enum_oid,),
enum_class.enum_type_name,
lambda value, cur: value, # noqa: F841
)
register_type(enum_instance, connection)
register_adapter(enum_class, enum_adapter)
# Postgres enum definition for member role
conn.adapters.register_loader(ti.oid, EnumLoader)
conn.adapters.register_dumper(enum_class, EnumDumper)
class MemberRoleEnum(EnumType):
+37
View File
@@ -319,6 +319,27 @@ class FindingFilter(FilterSet):
field_name="resources__type", lookup_expr="icontains"
)
resource_tag_key = CharFilter(field_name="resources__tags__key")
resource_tag_key__in = CharInFilter(
field_name="resources__tags__key", lookup_expr="in"
)
resource_tag_key__icontains = CharFilter(
field_name="resources__tags__key", lookup_expr="icontains"
)
resource_tag_value = CharFilter(field_name="resources__tags__value")
resource_tag_value__in = CharInFilter(
field_name="resources__tags__value", lookup_expr="in"
)
resource_tag_value__icontains = CharFilter(
field_name="resources__tags__value", lookup_expr="icontains"
)
resource_tags = CharInFilter(
method="filter_resource_tag",
lookup_expr="in",
help_text="Filter by resource tags `key:value` pairs.\nMultiple values may be "
"separated by commas.",
)
scan = UUIDFilter(method="filter_scan_id")
scan__in = UUIDInFilter(method="filter_scan_id_in")
@@ -353,6 +374,12 @@ class FindingFilter(FilterSet):
},
}
@property
def qs(self):
# Force distinct results to prevent duplicates with many-to-many relationships
parent_qs = super().qs
return parent_qs.distinct()
# Convert filter values to UUIDv7 values for use with partitioning
def filter_scan_id(self, queryset, name, value):
try:
@@ -426,6 +453,16 @@ class FindingFilter(FilterSet):
return queryset.filter(id__lte=end).filter(inserted_at__lte=value)
def filter_resource_tag(self, queryset, name, value):
overall_query = Q()
for key_value_pair in value:
tag_key, tag_value = key_value_pair.split(":", 1)
overall_query |= Q(
resources__tags__key__icontains=tag_key,
resources__tags__value__icontains=tag_value,
)
return queryset.filter(overall_query).distinct()
@staticmethod
def maybe_date_to_datetime(value):
dt = value
@@ -6,6 +6,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.823Z",
"updated_at": "2024-10-18T10:46:04.841Z",
"first_seen_at": "2024-10-18T10:46:04.823Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -61,6 +62,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.855Z",
"updated_at": "2024-10-18T10:46:04.858Z",
"first_seen_at": "2024-10-18T10:46:04.855Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
"delta": "new",
"status": "FAIL",
@@ -116,6 +118,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.869Z",
"updated_at": "2024-10-18T10:46:04.876Z",
"first_seen_at": "2024-10-18T10:46:04.869Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -171,6 +174,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.888Z",
"updated_at": "2024-10-18T10:46:04.892Z",
"first_seen_at": "2024-10-18T10:46:04.888Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -226,6 +230,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.901Z",
"updated_at": "2024-10-18T10:46:04.905Z",
"first_seen_at": "2024-10-18T10:46:04.901Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -281,6 +286,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.915Z",
"updated_at": "2024-10-18T10:46:04.919Z",
"first_seen_at": "2024-10-18T10:46:04.915Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -336,6 +342,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.929Z",
"updated_at": "2024-10-18T10:46:04.934Z",
"first_seen_at": "2024-10-18T10:46:04.929Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -391,6 +398,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.944Z",
"updated_at": "2024-10-18T10:46:04.947Z",
"first_seen_at": "2024-10-18T10:46:04.944Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -446,6 +454,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.957Z",
"updated_at": "2024-10-18T10:46:04.962Z",
"first_seen_at": "2024-10-18T10:46:04.957Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
"delta": "new",
"status": "PASS",
@@ -501,6 +510,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.971Z",
"updated_at": "2024-10-18T10:46:04.975Z",
"first_seen_at": "2024-10-18T10:46:04.971Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -556,6 +566,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.984Z",
"updated_at": "2024-10-18T10:46:04.989Z",
"first_seen_at": "2024-10-18T10:46:04.984Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -611,6 +622,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:04.999Z",
"updated_at": "2024-10-18T10:46:05.003Z",
"first_seen_at": "2024-10-18T10:46:04.999Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -666,6 +678,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.013Z",
"updated_at": "2024-10-18T10:46:05.018Z",
"first_seen_at": "2024-10-18T10:46:05.013Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -721,6 +734,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.029Z",
"updated_at": "2024-10-18T10:46:05.033Z",
"first_seen_at": "2024-10-18T10:46:05.029Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -776,6 +790,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.045Z",
"updated_at": "2024-10-18T10:46:05.050Z",
"first_seen_at": "2024-10-18T10:46:05.045Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -831,6 +846,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.061Z",
"updated_at": "2024-10-18T10:46:05.065Z",
"first_seen_at": "2024-10-18T10:46:05.061Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
"delta": "new",
"status": "FAIL",
@@ -886,6 +902,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.080Z",
"updated_at": "2024-10-18T10:46:05.085Z",
"first_seen_at": "2024-10-18T10:46:05.080Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -941,6 +958,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.099Z",
"updated_at": "2024-10-18T10:46:05.104Z",
"first_seen_at": "2024-10-18T10:46:05.099Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
"delta": "new",
"status": "FAIL",
@@ -996,6 +1014,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T10:46:05.115Z",
"updated_at": "2024-10-18T10:46:05.121Z",
"first_seen_at": "2024-10-18T10:46:05.115Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
"delta": "new",
"status": "FAIL",
@@ -1051,6 +1070,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.489Z",
"updated_at": "2024-10-18T11:16:24.506Z",
"first_seen_at": "2024-10-18T10:46:04.823Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -1106,6 +1126,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.518Z",
"updated_at": "2024-10-18T11:16:24.521Z",
"first_seen_at": "2024-10-18T10:46:04.855Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
"delta": null,
"status": "FAIL",
@@ -1161,6 +1182,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.526Z",
"updated_at": "2024-10-18T11:16:24.529Z",
"first_seen_at": "2024-10-18T10:46:04.869Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -1216,6 +1238,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.535Z",
"updated_at": "2024-10-18T11:16:24.538Z",
"first_seen_at": "2024-10-18T10:46:04.888Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1271,6 +1294,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.544Z",
"updated_at": "2024-10-18T11:16:24.546Z",
"first_seen_at": "2024-10-18T10:46:04.901Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -1326,6 +1350,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.551Z",
"updated_at": "2024-10-18T11:16:24.554Z",
"first_seen_at": "2024-10-18T10:46:04.915Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1381,6 +1406,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.560Z",
"updated_at": "2024-10-18T11:16:24.562Z",
"first_seen_at": "2024-10-18T10:46:04.929Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1436,6 +1462,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.567Z",
"updated_at": "2024-10-18T11:16:24.569Z",
"first_seen_at": "2024-10-18T10:46:04.944Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1491,6 +1518,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.573Z",
"updated_at": "2024-10-18T11:16:24.575Z",
"first_seen_at": "2024-10-18T10:46:04.957Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
"delta": null,
"status": "PASS",
@@ -1546,6 +1574,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.580Z",
"updated_at": "2024-10-18T11:16:24.582Z",
"first_seen_at": "2024-10-18T10:46:04.971Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -1601,6 +1630,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.587Z",
"updated_at": "2024-10-18T11:16:24.589Z",
"first_seen_at": "2024-10-18T10:46:04.984Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1656,6 +1686,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.595Z",
"updated_at": "2024-10-18T11:16:24.597Z",
"first_seen_at": "2024-10-18T10:46:04.999Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1711,6 +1742,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.602Z",
"updated_at": "2024-10-18T11:16:24.604Z",
"first_seen_at": "2024-10-18T10:46:05.013Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -1766,6 +1798,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.610Z",
"updated_at": "2024-10-18T11:16:24.612Z",
"first_seen_at": "2024-10-18T10:46:05.029Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1821,6 +1854,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.617Z",
"updated_at": "2024-10-18T11:16:24.620Z",
"first_seen_at": "2024-10-18T10:46:05.045Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1876,6 +1910,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.625Z",
"updated_at": "2024-10-18T11:16:24.627Z",
"first_seen_at": "2024-10-18T10:46:05.061Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
"delta": null,
"status": "FAIL",
@@ -1931,6 +1966,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.632Z",
"updated_at": "2024-10-18T11:16:24.634Z",
"first_seen_at": "2024-10-18T10:46:05.080Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -1986,6 +2022,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.639Z",
"updated_at": "2024-10-18T11:16:24.642Z",
"first_seen_at": "2024-10-18T10:46:05.099Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
"delta": null,
"status": "FAIL",
@@ -2041,6 +2078,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:24.646Z",
"updated_at": "2024-10-18T11:16:24.648Z",
"first_seen_at": "2024-10-18T10:46:05.115Z",
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
"delta": null,
"status": "FAIL",
@@ -2096,6 +2134,7 @@
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
"inserted_at": "2024-10-18T11:16:26.033Z",
"updated_at": "2024-10-18T11:16:26.045Z",
"first_seen_at": "2024-10-18T11:16:26.033Z",
"uid": "prowler-aws-account_security_contact_information_is_registered-112233445566-us-east-1-112233445566",
"delta": "new",
"status": "MANUAL",
@@ -0,0 +1,15 @@
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0005_rbac_missing_admin_roles"),
]
operations = [
migrations.AddField(
model_name="finding",
name="first_seen_at",
field=models.DateTimeField(editable=False, null=True),
),
]
+1
View File
@@ -615,6 +615,7 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
id = models.UUIDField(primary_key=True, default=uuid7, editable=False)
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
first_seen_at = models.DateTimeField(editable=False, null=True)
uid = models.CharField(max_length=300)
delta = FindingDeltaEnumField(
File diff suppressed because it is too large Load Diff
+45 -14
View File
@@ -261,6 +261,16 @@ class TestUserViewSet:
assert response.status_code == status.HTTP_204_NO_CONTENT
assert not User.objects.filter(id=create_test_user.id).exists()
def test_users_destroy_other_user(
self, authenticated_client, create_test_user, users_fixture
):
user = users_fixture[2]
response = authenticated_client.delete(
reverse("user-detail", kwargs={"pk": str(user.id)})
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert User.objects.filter(id=create_test_user.id).exists()
def test_users_destroy_invalid_user(self, authenticated_client, create_test_user):
another_user = User.objects.create_user(
password="otherpassword", email="other@example.com"
@@ -268,7 +278,7 @@ class TestUserViewSet:
response = authenticated_client.delete(
reverse("user-detail", kwargs={"pk": another_user.id})
)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert User.objects.filter(id=another_user.id).exists()
@pytest.mark.parametrize(
@@ -2444,6 +2454,15 @@ class TestFindingViewSet:
("search", "ec2", 2),
# full text search on finding tags
("search", "value2", 2),
("resource_tag_key", "key", 2),
("resource_tag_key__in", "key,key2", 2),
("resource_tag_key__icontains", "key", 2),
("resource_tag_value", "value", 2),
("resource_tag_value__in", "value,value2", 2),
("resource_tag_value__icontains", "value", 2),
("resource_tags", "key:value", 2),
("resource_tags", "not:exists", 0),
("resource_tags", "not:exists,key:value", 2),
]
),
)
@@ -2582,30 +2601,34 @@ class TestFindingViewSet:
)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_findings_services_regions_retrieve(
self, authenticated_client, findings_fixture
):
def test_findings_metadata_retrieve(self, authenticated_client, findings_fixture):
finding_1, *_ = findings_fixture
response = authenticated_client.get(
reverse("finding-findings_services_regions"),
reverse("finding-metadata"),
{"filter[inserted_at]": finding_1.updated_at.strftime("%Y-%m-%d")},
)
data = response.json()
expected_services = {"ec2", "s3"}
expected_regions = {"eu-west-1", "us-east-1"}
expected_tags = {"key": ["value"], "key2": ["value2"]}
expected_resource_types = {"prowler-test"}
assert data["data"]["type"] == "finding-dynamic-filters"
assert data["data"]["type"] == "findings-metadata"
assert data["data"]["id"] is None
assert set(data["data"]["attributes"]["services"]) == expected_services
assert set(data["data"]["attributes"]["regions"]) == expected_regions
assert (
set(data["data"]["attributes"]["resource_types"]) == expected_resource_types
)
assert data["data"]["attributes"]["tags"] == expected_tags
def test_findings_services_regions_severity_retrieve(
def test_findings_metadata_severity_retrieve(
self, authenticated_client, findings_fixture
):
finding_1, *_ = findings_fixture
response = authenticated_client.get(
reverse("finding-findings_services_regions"),
reverse("finding-metadata"),
{
"filter[severity__in]": ["low", "medium"],
"filter[inserted_at]": finding_1.updated_at.strftime("%Y-%m-%d"),
@@ -2615,26 +2638,34 @@ class TestFindingViewSet:
expected_services = {"s3"}
expected_regions = {"eu-west-1"}
expected_tags = {"key": ["value"], "key2": ["value2"]}
expected_resource_types = {"prowler-test"}
assert data["data"]["type"] == "finding-dynamic-filters"
assert data["data"]["type"] == "findings-metadata"
assert data["data"]["id"] is None
assert set(data["data"]["attributes"]["services"]) == expected_services
assert set(data["data"]["attributes"]["regions"]) == expected_regions
assert (
set(data["data"]["attributes"]["resource_types"]) == expected_resource_types
)
assert data["data"]["attributes"]["tags"] == expected_tags
def test_findings_services_regions_future_date(self, authenticated_client):
def test_findings_metadata_future_date(self, authenticated_client):
response = authenticated_client.get(
reverse("finding-findings_services_regions"),
reverse("finding-metadata"),
{"filter[inserted_at]": "2048-01-01"},
)
data = response.json()
assert data["data"]["type"] == "finding-dynamic-filters"
assert data["data"]["type"] == "findings-metadata"
assert data["data"]["id"] is None
assert data["data"]["attributes"]["services"] == []
assert data["data"]["attributes"]["regions"] == []
assert data["data"]["attributes"]["tags"] == {}
assert data["data"]["attributes"]["resource_types"] == []
def test_findings_services_regions_invalid_date(self, authenticated_client):
def test_findings_metadata_invalid_date(self, authenticated_client):
response = authenticated_client.get(
reverse("finding-findings_services_regions"),
reverse("finding-metadata"),
{"filter[inserted_at]": "2048-01-011"},
)
assert response.json() == {
+20 -7
View File
@@ -905,6 +905,7 @@ class FindingSerializer(RLSSerializer):
"raw_result",
"inserted_at",
"updated_at",
"first_seen_at",
"url",
# Relationships
"scan",
@@ -917,6 +918,7 @@ class FindingSerializer(RLSSerializer):
}
# To be removed when the related endpoint is removed as well
class FindingDynamicFilterSerializer(serializers.Serializer):
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
@@ -925,6 +927,18 @@ class FindingDynamicFilterSerializer(serializers.Serializer):
resource_name = "finding-dynamic-filters"
class FindingMetadataSerializer(serializers.Serializer):
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
resource_types = serializers.ListField(
child=serializers.CharField(), allow_empty=True
)
tags = serializers.JSONField(help_text="Tags are described as key-value pairs.")
class Meta:
resource_name = "findings-metadata"
# Provider secrets
class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
@staticmethod
@@ -997,7 +1011,7 @@ class KubernetesProviderSecret(serializers.Serializer):
class AWSRoleAssumptionProviderSecret(serializers.Serializer):
role_arn = serializers.CharField()
external_id = serializers.CharField(required=False)
external_id = serializers.CharField()
role_session_name = serializers.CharField(required=False)
session_duration = serializers.IntegerField(
required=False, min_value=900, max_value=43200
@@ -1044,6 +1058,10 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
"description": "The Amazon Resource Name (ARN) of the role to assume. Required for AWS role "
"assumption.",
},
"external_id": {
"type": "string",
"description": "An identifier to enhance security for role assumption.",
},
"aws_access_key_id": {
"type": "string",
"description": "The AWS access key ID. Only required if the environment lacks pre-configured "
@@ -1065,11 +1083,6 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
"default": 3600,
"description": "The duration (in seconds) for the role session.",
},
"external_id": {
"type": "string",
"description": "An optional identifier to enhance security for role assumption; may be "
"required by the role administrator.",
},
"role_session_name": {
"type": "string",
"description": "An identifier for the role session, useful for tracking sessions in AWS logs. "
@@ -1083,7 +1096,7 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
"pattern": "^[a-zA-Z0-9=,.@_-]+$",
},
},
"required": ["role_arn"],
"required": ["role_arn", "external_id"],
},
{
"type": "object",
+66 -5
View File
@@ -4,6 +4,7 @@ from django.contrib.postgres.aggregates import ArrayAgg
from django.contrib.postgres.search import SearchQuery
from django.db import transaction
from django.db.models import Count, F, OuterRef, Prefetch, Q, Subquery, Sum
from django.db.models.functions import JSONObject
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_control
@@ -87,6 +88,7 @@ from api.v1.serializers import (
ComplianceOverviewFullSerializer,
ComplianceOverviewSerializer,
FindingDynamicFilterSerializer,
FindingMetadataSerializer,
FindingSerializer,
InvitationAcceptSerializer,
InvitationCreateSerializer,
@@ -192,7 +194,7 @@ class SchemaView(SpectacularAPIView):
def get(self, request, *args, **kwargs):
spectacular_settings.TITLE = "Prowler API"
spectacular_settings.VERSION = "1.1.1"
spectacular_settings.VERSION = "1.2.0"
spectacular_settings.DESCRIPTION = (
"Prowler API specification.\n\nThis file is auto-generated."
)
@@ -275,8 +277,8 @@ class SchemaView(SpectacularAPIView):
),
destroy=extend_schema(
tags=["User"],
summary="Delete a user account",
description="Remove a user account from the system.",
summary="Delete the user account",
description="Remove the current user account from the system.",
),
me=extend_schema(
tags=["User"],
@@ -340,6 +342,12 @@ class UserViewSet(BaseUserViewset):
status=status.HTTP_200_OK,
)
def destroy(self, request, *args, **kwargs):
if kwargs["pk"] != str(self.request.user.id):
raise ValidationError("Only the current user can be deleted.")
return super().destroy(request, *args, **kwargs)
@extend_schema(
parameters=[
OpenApiParameter(
@@ -1044,7 +1052,7 @@ class ScanViewSet(BaseRLSViewSet):
"""
if self.request.method in SAFE_METHODS:
# No permissions required for GET requests
self.required_permissions = [Permissions.MANAGE_PROVIDERS]
self.required_permissions = []
else:
# Require permission for non-GET requests
self.required_permissions = [Permissions.MANAGE_SCANS]
@@ -1274,7 +1282,13 @@ class ResourceViewSet(BaseRLSViewSet):
tags=["Finding"],
summary="Retrieve the services and regions that are impacted by findings",
description="Fetch services and regions affected in findings.",
responses={201: OpenApiResponse(response=MembershipSerializer)},
filters=True,
deprecated=True,
),
metadata=extend_schema(
tags=["Finding"],
summary="Retrieve metadata values from findings",
description="Fetch unique metadata values from a set of findings. This is useful for dynamic filtering.",
filters=True,
),
)
@@ -1308,6 +1322,8 @@ class FindingViewSet(BaseRLSViewSet):
def get_serializer_class(self):
if self.action == "findings_services_regions":
return FindingDynamicFilterSerializer
elif self.action == "metadata":
return FindingMetadataSerializer
return super().get_serializer_class()
@@ -1376,6 +1392,51 @@ class FindingViewSet(BaseRLSViewSet):
return Response(data=serializer.data, status=status.HTTP_200_OK)
@action(detail=False, methods=["get"], url_name="metadata")
def metadata(self, request):
queryset = self.get_queryset()
filtered_queryset = self.filter_queryset(queryset)
result = filtered_queryset.aggregate(
services=ArrayAgg("resources__service", flat=True, distinct=True),
regions=ArrayAgg("resources__region", flat=True, distinct=True),
tags=ArrayAgg(
JSONObject(
key=F("resources__tags__key"), value=F("resources__tags__value")
),
distinct=True,
filter=Q(resources__tags__key__isnull=False),
),
resource_types=ArrayAgg("resources__type", flat=True, distinct=True),
)
if result["services"] is None:
result["services"] = []
if result["regions"] is None:
result["regions"] = []
if result["regions"] is None:
result["regions"] = []
if result["resource_types"] is None:
result["resource_types"] = []
if result["tags"] is None:
result["tags"] = []
tags_dict = {}
for t in result["tags"]:
key, value = t["key"], t["value"]
if key not in tags_dict:
tags_dict[key] = []
tags_dict[key].append(value)
result["tags"] = tags_dict
serializer = self.get_serializer(
data=result,
)
serializer.is_valid(raise_exception=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
@extend_schema_view(
list=extend_schema(
+6
View File
@@ -115,6 +115,12 @@ DJANGO_GUID = {
DATABASE_ROUTERS = ["api.db_router.MainRouter"]
# Database connection pool
DB_CP_MIN_SIZE = env.int("DJANGO_DB_CONNECTION_POOL_MIN_SIZE", 4)
DB_CP_MAX_SIZE = env.int("DJANGO_DB_CONNECTION_POOL_MAX_SIZE", 10)
DB_CP_MAX_IDLE = env.int("DJANGO_DB_CONNECTION_POOL_MAX_IDLE", 36000)
DB_CP_MAX_LIFETIME = env.int("DJANGO_DB_CONNECTION_POOL_MAX_LIFETIME", 86400)
# Password validation
# https://docs.djangoproject.com/en/5.0/ref/settings/#auth-password-validators
+16 -1
View File
@@ -1,7 +1,6 @@
from config.django.base import * # noqa
from config.env import env
DEBUG = env.bool("DJANGO_DEBUG", default=True)
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["*"])
@@ -14,6 +13,14 @@ DATABASES = {
"PASSWORD": env("POSTGRES_PASSWORD", default="prowler"),
"HOST": env("POSTGRES_HOST", default="postgres-db"),
"PORT": env("POSTGRES_PORT", default="5432"),
"OPTIONS": {
"pool": {
"min_size": DB_CP_MIN_SIZE, # noqa: F405
"max_size": DB_CP_MAX_SIZE, # noqa: F405
"max_idle": DB_CP_MAX_IDLE, # noqa: F405
"max_lifetime": DB_CP_MAX_LIFETIME, # noqa: F405
}
},
},
"admin": {
"ENGINE": "psqlextra.backend",
@@ -22,6 +29,14 @@ DATABASES = {
"PASSWORD": env("POSTGRES_ADMIN_PASSWORD", default="S3cret"),
"HOST": env("POSTGRES_HOST", default="postgres-db"),
"PORT": env("POSTGRES_PORT", default="5432"),
"OPTIONS": {
"pool": {
"min_size": DB_CP_MIN_SIZE, # noqa: F405
"max_size": DB_CP_MAX_SIZE, # noqa: F405
"max_idle": DB_CP_MAX_IDLE, # noqa: F405
"max_lifetime": DB_CP_MAX_LIFETIME, # noqa: F405
}
},
},
}
DATABASES["default"] = DATABASES["prowler_user"]
+16 -1
View File
@@ -1,7 +1,6 @@
from config.django.base import * # noqa
from config.env import env
DEBUG = env.bool("DJANGO_DEBUG", default=False)
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
@@ -15,6 +14,14 @@ DATABASES = {
"PASSWORD": env("POSTGRES_PASSWORD"),
"HOST": env("POSTGRES_HOST"),
"PORT": env("POSTGRES_PORT"),
"OPTIONS": {
"pool": {
"min_size": DB_CP_MIN_SIZE, # noqa: F405
"max_size": DB_CP_MAX_SIZE, # noqa: F405
"max_idle": DB_CP_MAX_IDLE, # noqa: F405
"max_lifetime": DB_CP_MAX_LIFETIME, # noqa: F405
}
},
},
"admin": {
"ENGINE": "psqlextra.backend",
@@ -23,6 +30,14 @@ DATABASES = {
"PASSWORD": env("POSTGRES_ADMIN_PASSWORD"),
"HOST": env("POSTGRES_HOST"),
"PORT": env("POSTGRES_PORT"),
"OPTIONS": {
"pool": {
"min_size": DB_CP_MIN_SIZE, # noqa: F405
"max_size": DB_CP_MAX_SIZE, # noqa: F405
"max_idle": DB_CP_MAX_IDLE, # noqa: F405
"max_lifetime": DB_CP_MAX_LIFETIME, # noqa: F405
}
},
},
}
DATABASES["default"] = DATABASES["prowler_user"]
+8 -1
View File
@@ -1,7 +1,6 @@
from config.django.base import * # noqa
from config.env import env
DEBUG = env.bool("DJANGO_DEBUG", default=False)
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
@@ -14,6 +13,14 @@ DATABASES = {
"PASSWORD": env("POSTGRES_PASSWORD", default="postgres"),
"HOST": env("POSTGRES_HOST", default="localhost"),
"PORT": env("POSTGRES_PORT", default="5432"),
"OPTIONS": {
"pool": {
"min_size": DB_CP_MIN_SIZE, # noqa: F405
"max_size": DB_CP_MAX_SIZE, # noqa: F405
"max_idle": DB_CP_MAX_IDLE, # noqa: F405
"max_lifetime": DB_CP_MAX_LIFETIME, # noqa: F405
}
},
},
}
+2
View File
@@ -626,6 +626,7 @@ def findings_fixture(scans_fixture, resources_fixture):
"CheckId": "test_check_id",
"Description": "test description apple sauce",
},
first_seen_at="2024-01-02T00:00:00Z",
)
finding1.add_resources([resource1])
@@ -651,6 +652,7 @@ def findings_fixture(scans_fixture, resources_fixture):
"CheckId": "test_check_id",
"Description": "test description orange juice",
},
first_seen_at="2024-01-02T00:00:00Z",
)
finding2.add_resources([resource2])
+13 -8
View File
@@ -219,24 +219,28 @@ def perform_prowler_scan(
# Process finding
with rls_transaction(tenant_id):
finding_uid = finding.uid
last_first_seen_at = None
if finding_uid not in last_status_cache:
most_recent_finding = (
Finding.objects.filter(uid=finding_uid)
.order_by("-id")
.values("status")
.values("status", "first_seen_at")
.first()
)
last_status = (
most_recent_finding["status"]
if most_recent_finding
else None
)
last_status_cache[finding_uid] = last_status
last_status = None
if most_recent_finding:
last_status = most_recent_finding["status"]
last_first_seen_at = most_recent_finding["first_seen_at"]
last_status_cache[finding_uid] = last_status, last_first_seen_at
else:
last_status = last_status_cache[finding_uid]
last_status, last_first_seen_at = last_status_cache[finding_uid]
status = FindingStatus[finding.status]
delta = _create_finding_delta(last_status, status)
# For the findings prior to the change, when a first finding is found with delta!="new" it will be assigned a current date as first_seen_at and the successive findings with the same UID will always get the date of the previous finding.
# For new findings, when a finding (delta="new") is found for the first time, the first_seen_at attribute will be assigned the current date, the following findings will get that date.
if not last_first_seen_at:
last_first_seen_at = datetime.now(tz=timezone.utc)
# Create the finding
finding_instance = Finding.objects.create(
@@ -251,6 +255,7 @@ def perform_prowler_scan(
raw_result=finding.raw,
check_id=finding.check_id,
scan=scan_instance,
first_seen_at=last_first_seen_at,
)
finding_instance.add_resources([resource_instance])
+24
View File
@@ -0,0 +1,24 @@
apiVersion: v2
name: prowler-api
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "5.1.1"
@@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prowler-api.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prowler-api.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prowler-api.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prowler-api.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}
@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "prowler-api.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "prowler-api.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "prowler-api.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "prowler-api.labels" -}}
helm.sh/chart: {{ include "prowler-api.chart" . }}
{{ include "prowler-api.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "prowler-api.selectorLabels" -}}
app.kubernetes.io/name: {{ include "prowler-api.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "prowler-api.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "prowler-api.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
@@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "prowler-api.fullname" . }}-config
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
data:
config.yaml: |-
{{- toYaml .Values.mainConfig | nindent 4 }}
@@ -0,0 +1,85 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prowler-api.fullname" . }}
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "prowler-api.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prowler-api.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "prowler-api.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
{{- range $name,$config := .Values.containers }}
{{- if $config.enabled }}
- name: {{ $name }}
securityContext:
{{- toYaml $config.securityContext | nindent 12 }}
image: "{{ $config.image.repository }}:{{ $config.image.tag | default $.Chart.AppVersion }}"
imagePullPolicy: {{ $config.image.pullPolicy }}
envFrom:
- secretRef:
name: {{ include "prowler-api.fullname" $ }}
command:
{{- toYaml $config.command | nindent 12 }}
{{- if $config.ports }}
ports:
{{- toYaml $config.ports | nindent 12 }}
{{- end }}
livenessProbe:
{{- toYaml $config.livenessProbe | nindent 12 }}
readinessProbe:
{{- toYaml $config.readinessProbe | nindent 12 }}
resources:
{{- toYaml $config.resources | nindent 12 }}
volumeMounts:
- name: {{ include "prowler-api.fullname" $ }}-config
mountPath: {{ $.Values.releaseConfigRoot }}{{ $.Values.releaseConfigPath }}
subPath: config.yaml
{{- with .volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- end }}
volumes:
- name: {{ include "prowler-api.fullname" . }}-config
configMap:
name: {{ include "prowler-api.fullname" . }}-config
{{- with .Values.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
@@ -0,0 +1,43 @@
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "prowler-api.fullname" . }}
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.ingress.className }}
ingressClassName: {{ . }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- with .pathType }}
pathType: {{ . }}
{{- end }}
backend:
service:
name: {{ include "prowler-api.fullname" $ }}
port:
number: {{ $.Values.service.port }}
{{- end }}
{{- end }}
{{- end }}
@@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "prowler-api.fullname" . }}
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
type: Opaque
data:
{{- range $k, $v := .Values.secrets }}
{{ $k }}: {{ $v | toString | b64enc | quote }}
{{- end }}
@@ -0,0 +1,21 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "prowler-api.fullname" . }}
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
{{- range $name,$config := .Values.containers }}
{{- if $config.ports }}
{{- range $p := $config.ports }}
- port: {{ $p.containerPort }}
targetPort: {{ $p.containerPort }}
protocol: TCP
name: {{ $config.name }}
{{- end }}
{{- end }}
{{- end }}
selector:
{{- include "prowler-api.selectorLabels" . | nindent 4 }}
@@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prowler-api.serviceAccountName" . }}
labels:
{{- include "prowler-api.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}
+625
View File
@@ -0,0 +1,625 @@
# Default values for prowler-api.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
containers:
prowler-api:
enabled: true
image:
repository: prowlercloud/prowler-api
pullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
protocol: TCP
command: ["/home/prowler/docker-entrypoint.sh", "prod"]
worker:
enabled: true
image:
repository: prowlercloud/prowler-api
pullPolicy: IfNotPresent
command: ["/home/prowler/docker-entrypoint.sh", "worker"]
worker-beat:
enabled: true
image:
repository: prowlercloud/prowler-api
pullPolicy: IfNotPresent
command: ["../docker-entrypoint.sh", "beat"]
secrets:
POSTGRES_HOST:
POSTGRES_PORT: 5432
POSTGRES_ADMIN_USER:
POSTGRES_ADMIN_PASSWORD:
POSTGRES_USER:
POSTGRES_PASSWORD:
POSTGRES_DB:
# Valkey settings
VALKEY_HOST: valkey-headless
VALKEY_PORT: "6379"
VALKEY_DB: "0"
# Django settings
DJANGO_ALLOWED_HOSTS: localhost,127.0.0.1,prowler-api
DJANGO_BIND_ADDRESS: 0.0.0.0
DJANGO_PORT: "8080"
DJANGO_DEBUG: False
DJANGO_SETTINGS_MODULE: config.django.production
# Select one of [ndjson|human_readable]
DJANGO_LOGGING_FORMATTER: human_readable
# Select one of [DEBUG|INFO|WARNING|ERROR|CRITICAL]
# Applies to both Django and Celery Workers
DJANGO_LOGGING_LEVEL: INFO
# Defaults to the maximum available based on CPU cores if not set.
DJANGO_WORKERS: 2
# Token lifetime is in minutes
DJANGO_ACCESS_TOKEN_LIFETIME: "30"
# Token lifetime is in minutes
DJANGO_REFRESH_TOKEN_LIFETIME: "1440"
DJANGO_CACHE_MAX_AGE: "3600"
DJANGO_STALE_WHILE_REVALIDATE: "60"
DJANGO_MANAGE_DB_PARTITIONS: "False"
# openssl genrsa -out private.pem 2048
DJANGO_TOKEN_SIGNING_KEY:
# openssl rsa -in private.pem -pubout -out public.pem
DJANGO_TOKEN_VERIFYING_KEY:
# openssl rand -base64 32
DJANGO_SECRETS_ENCRYPTION_KEY:
DJANGO_BROKER_VISIBILITY_TIMEOUT: 86400
releaseConfigRoot: /home/prowler/.cache/pypoetry/virtualenvs/prowler-api-NnJNioq7-py3.12/lib/python3.12/site-packages/
releaseConfigPath: prowler/config/config.yaml
mainConfig:
# AWS Configuration
aws:
# AWS Global Configuration
# aws.mute_non_default_regions --> Set to True to muted failed findings in non-default regions for AccessAnalyzer, GuardDuty, SecurityHub, DRS and Config
mute_non_default_regions: False
# If you want to mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w mutelist.yaml`:
# Mutelist:
# Accounts:
# "*":
# Checks:
# "*":
# Regions:
# - "ap-southeast-1"
# - "ap-southeast-2"
# Resources:
# - "*"
# AWS IAM Configuration
# aws.iam_user_accesskey_unused --> CIS recommends 45 days
max_unused_access_keys_days: 45
# aws.iam_user_console_access_unused --> CIS recommends 45 days
max_console_access_days: 45
# AWS EC2 Configuration
# aws.ec2_elastic_ip_shodan
# TODO: create common config
shodan_api_key: null
# aws.ec2_securitygroup_with_many_ingress_egress_rules --> by default is 50 rules
max_security_group_rules: 50
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
max_ec2_instance_age_in_days: 180
# aws.ec2_securitygroup_allow_ingress_from_internet_to_any_port
# allowed network interface types for security groups open to the Internet
ec2_allowed_interface_types:
[
"api_gateway_managed",
"vpc_endpoint",
]
# allowed network interface owners for security groups open to the Internet
ec2_allowed_instance_owners:
[
"amazon-elb"
]
# aws.ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports
ec2_high_risk_ports:
[
25,
110,
135,
143,
445,
3000,
4333,
5000,
5500,
8080,
8088,
]
# AWS ECS Configuration
# aws.ecs_service_fargate_latest_platform_version
fargate_linux_latest_version: "1.4.0"
fargate_windows_latest_version: "1.0.0"
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
# AWS SSM Configuration (aws.ssm_documents_set_as_public)
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
# Multi account environment: Any additional trusted account number should be added as a space separated list, e.g.
# trusted_account_ids : ["123456789012", "098765432109", "678901234567"]
trusted_account_ids: []
# AWS Cloudwatch Configuration
# aws.cloudwatch_log_group_retention_policy_specific_days_enabled --> by default is 365 days
log_group_retention_days: 365
# AWS CloudFormation Configuration
# cloudformation_stack_cdktoolkit_bootstrap_version --> by default is 21
recommended_cdk_bootstrap_version: 21
# AWS AppStream Session Configuration
# aws.appstream_fleet_session_idle_disconnect_timeout
max_idle_disconnect_timeout_in_seconds: 600 # 10 Minutes
# aws.appstream_fleet_session_disconnect_timeout
max_disconnect_timeout_in_seconds: 300 # 5 Minutes
# aws.appstream_fleet_maximum_session_duration
max_session_duration_seconds: 36000 # 10 Hours
# AWS Lambda Configuration
# aws.awslambda_function_using_supported_runtimes
obsolete_lambda_runtimes:
[
"java8",
"go1.x",
"provided",
"python3.6",
"python2.7",
"python3.7",
"nodejs4.3",
"nodejs4.3-edge",
"nodejs6.10",
"nodejs",
"nodejs8.10",
"nodejs10.x",
"nodejs12.x",
"nodejs14.x",
"nodejs16.x",
"dotnet5.0",
"dotnet7",
"dotnetcore1.0",
"dotnetcore2.0",
"dotnetcore2.1",
"dotnetcore3.1",
"ruby2.5",
"ruby2.7",
]
# aws.awslambda_function_vpc_is_in_multi_azs
lambda_min_azs: 2
# AWS Organizations
# aws.organizations_scp_check_deny_regions
# aws.organizations_enabled_regions: [
# "eu-central-1",
# "eu-west-1",
# "us-east-1"
# ]
organizations_enabled_regions: []
organizations_trusted_delegated_administrators: []
# AWS ECR
# aws.ecr_repositories_scan_vulnerabilities_in_latest_image
# CRITICAL
# HIGH
# MEDIUM
ecr_repository_vulnerability_minimum_severity: "MEDIUM"
# AWS Trusted Advisor
# aws.trustedadvisor_premium_support_plan_subscribed
verify_premium_support_plans: True
# AWS CloudTrail Configuration
# aws.cloudtrail_threat_detection_privilege_escalation
threat_detection_privilege_escalation_threshold: 0.2 # Percentage of actions found to decide if it is an privilege_escalation attack event, by default is 0.2 (20%)
threat_detection_privilege_escalation_minutes: 1440 # Past minutes to search from now for privilege_escalation attacks, by default is 1440 minutes (24 hours)
threat_detection_privilege_escalation_actions:
[
"AddPermission",
"AddRoleToInstanceProfile",
"AddUserToGroup",
"AssociateAccessPolicy",
"AssumeRole",
"AttachGroupPolicy",
"AttachRolePolicy",
"AttachUserPolicy",
"ChangePassword",
"CreateAccessEntry",
"CreateAccessKey",
"CreateDevEndpoint",
"CreateEventSourceMapping",
"CreateFunction",
"CreateGroup",
"CreateJob",
"CreateKeyPair",
"CreateLoginProfile",
"CreatePipeline",
"CreatePolicyVersion",
"CreateRole",
"CreateStack",
"DeleteRolePermissionsBoundary",
"DeleteRolePolicy",
"DeleteUserPermissionsBoundary",
"DeleteUserPolicy",
"DetachRolePolicy",
"DetachUserPolicy",
"GetCredentialsForIdentity",
"GetId",
"GetPolicyVersion",
"GetUserPolicy",
"Invoke",
"ModifyInstanceAttribute",
"PassRole",
"PutGroupPolicy",
"PutPipelineDefinition",
"PutRolePermissionsBoundary",
"PutRolePolicy",
"PutUserPermissionsBoundary",
"PutUserPolicy",
"ReplaceIamInstanceProfileAssociation",
"RunInstances",
"SetDefaultPolicyVersion",
"UpdateAccessKey",
"UpdateAssumeRolePolicy",
"UpdateDevEndpoint",
"UpdateEventSourceMapping",
"UpdateFunctionCode",
"UpdateJob",
"UpdateLoginProfile",
]
# aws.cloudtrail_threat_detection_enumeration
threat_detection_enumeration_threshold: 0.3 # Percentage of actions found to decide if it is an enumeration attack event, by default is 0.3 (30%)
threat_detection_enumeration_minutes: 1440 # Past minutes to search from now for enumeration attacks, by default is 1440 minutes (24 hours)
threat_detection_enumeration_actions:
[
"DescribeAccessEntry",
"DescribeAccountAttributes",
"DescribeAvailabilityZones",
"DescribeBundleTasks",
"DescribeCarrierGateways",
"DescribeClientVpnRoutes",
"DescribeCluster",
"DescribeDhcpOptions",
"DescribeFlowLogs",
"DescribeImages",
"DescribeInstanceAttribute",
"DescribeInstanceInformation",
"DescribeInstanceTypes",
"DescribeInstances",
"DescribeInstances",
"DescribeKeyPairs",
"DescribeLogGroups",
"DescribeLogStreams",
"DescribeOrganization",
"DescribeRegions",
"DescribeSecurityGroups",
"DescribeSnapshotAttribute",
"DescribeSnapshotTierStatus",
"DescribeSubscriptionFilters",
"DescribeTransitGatewayMulticastDomains",
"DescribeVolumes",
"DescribeVolumesModifications",
"DescribeVpcEndpointConnectionNotifications",
"DescribeVpcs",
"GetAccount",
"GetAccountAuthorizationDetails",
"GetAccountSendingEnabled",
"GetBucketAcl",
"GetBucketLogging",
"GetBucketPolicy",
"GetBucketReplication",
"GetBucketVersioning",
"GetCallerIdentity",
"GetCertificate",
"GetConsoleScreenshot",
"GetCostAndUsage",
"GetDetector",
"GetEbsDefaultKmsKeyId",
"GetEbsEncryptionByDefault",
"GetFindings",
"GetFlowLogsIntegrationTemplate",
"GetIdentityVerificationAttributes",
"GetInstances",
"GetIntrospectionSchema",
"GetLaunchTemplateData",
"GetLaunchTemplateData",
"GetLogRecord",
"GetParameters",
"GetPolicyVersion",
"GetPublicAccessBlock",
"GetQueryResults",
"GetRegions",
"GetSMSAttributes",
"GetSMSSandboxAccountStatus",
"GetSendQuota",
"GetTransitGatewayRouteTableAssociations",
"GetUserPolicy",
"HeadObject",
"ListAccessKeys",
"ListAccounts",
"ListAllMyBuckets",
"ListAssociatedAccessPolicies",
"ListAttachedUserPolicies",
"ListClusters",
"ListDetectors",
"ListDomains",
"ListFindings",
"ListHostedZones",
"ListIPSets",
"ListIdentities",
"ListInstanceProfiles",
"ListObjects",
"ListOrganizationalUnitsForParent",
"ListOriginationNumbers",
"ListPolicyVersions",
"ListRoles",
"ListRoles",
"ListRules",
"ListServiceQuotas",
"ListSubscriptions",
"ListTargetsByRule",
"ListTopics",
"ListUsers",
"LookupEvents",
"Search",
]
# aws.cloudtrail_threat_detection_llm_jacking
threat_detection_llm_jacking_threshold: 0.4 # Percentage of actions found to decide if it is an LLM Jacking attack event, by default is 0.4 (40%)
threat_detection_llm_jacking_minutes: 1440 # Past minutes to search from now for LLM Jacking attacks, by default is 1440 minutes (24 hours)
threat_detection_llm_jacking_actions:
[
"PutUseCaseForModelAccess", # Submits a use case for model access, providing justification (Write).
"PutFoundationModelEntitlement", # Grants entitlement for accessing a foundation model (Write).
"PutModelInvocationLoggingConfiguration", # Configures logging for model invocations (Write).
"CreateFoundationModelAgreement", # Creates a new agreement to use a foundation model (Write).
"InvokeModel", # Invokes a specified Bedrock model for inference using provided prompt and parameters (Read).
"InvokeModelWithResponseStream", # Invokes a Bedrock model for inference with real-time token streaming (Read).
"GetUseCaseForModelAccess", # Retrieves an existing use case for model access (Read).
"GetModelInvocationLoggingConfiguration", # Fetches the logging configuration for model invocations (Read).
"GetFoundationModelAvailability", # Checks the availability of a foundation model for use (Read).
"ListFoundationModelAgreementOffers", # Lists available agreement offers for accessing foundation models (List).
"ListFoundationModels", # Lists the available foundation models in Bedrock (List).
"ListProvisionedModelThroughputs", # Lists the provisioned throughput for previously created models (List).
]
# AWS RDS Configuration
# aws.rds_instance_backup_enabled
# Whether to check RDS instance replicas or not
check_rds_instance_replicas: False
# AWS ACM Configuration
# aws.acm_certificates_expiration_check
days_to_expire_threshold: 7
# aws.acm_certificates_with_secure_key_algorithms
insecure_key_algorithms:
[
"RSA-1024",
"P-192",
"SHA-1",
]
# AWS EKS Configuration
# aws.eks_control_plane_logging_all_types_enabled
# EKS control plane logging types that must be enabled
eks_required_log_types:
[
"api",
"audit",
"authenticator",
"controllerManager",
"scheduler",
]
# aws.eks_cluster_uses_a_supported_version
# EKS clusters must be version 1.28 or higher
eks_cluster_oldest_version_supported: "1.28"
# AWS CodeBuild Configuration
# aws.codebuild_project_no_secrets_in_variables
# CodeBuild sensitive variables that are excluded from the check
excluded_sensitive_environment_variables:
[
]
# AWS ELB Configuration
# aws.elb_is_in_multiple_az
# Minimum number of Availability Zones that an CLB must be in
elb_min_azs: 2
# AWS ELBv2 Configuration
# aws.elbv2_is_in_multiple_az
# Minimum number of Availability Zones that an ELBv2 must be in
elbv2_min_azs: 2
# AWS Secrets Configuration
# Patterns to ignore in the secrets checks
secrets_ignore_patterns: []
# AWS Secrets Manager Configuration
# aws.secretsmanager_secret_unused
# Maximum number of days a secret can be unused
max_days_secret_unused: 90
# aws.secretsmanager_secret_rotated_periodically
# Maximum number of days a secret should be rotated
max_days_secret_unrotated: 90
# AWS Kinesis Configuration
# Minimum retention period in hours for Kinesis streams
min_kinesis_stream_retention_hours: 168 # 7 days
# Azure Configuration
azure:
# Azure Network Configuration
# azure.network_public_ip_shodan
# TODO: create common config
shodan_api_key: null
# Azure App Service
# azure.app_ensure_php_version_is_latest
php_latest_version: "8.2"
# azure.app_ensure_python_version_is_latest
python_latest_version: "3.12"
# azure.app_ensure_java_version_is_latest
java_latest_version: "17"
# Azure SQL Server
# azure.sqlserver_minimal_tls_version
recommended_minimal_tls_versions:
[
"1.2",
"1.3",
]
# GCP Configuration
gcp:
# GCP Compute Configuration
# gcp.compute_public_address_shodan
shodan_api_key: null
# Kubernetes Configuration
kubernetes:
# Kubernetes API Server
# kubernetes.apiserver_audit_log_maxbackup_set
audit_log_maxbackup: 10
# kubernetes.apiserver_audit_log_maxsize_set
audit_log_maxsize: 100
# kubernetes.apiserver_audit_log_maxage_set
audit_log_maxage: 30
# kubernetes.apiserver_strong_ciphers_only
apiserver_strong_ciphers:
[
"TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256",
]
# Kubelet
# kubernetes.kubelet_strong_ciphers_only
kubelet_strong_ciphers:
[
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_128_GCM_SHA256",
]
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# This is to override the chart name.
nameOverride: ""
fullnameOverride: ""
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
service:
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
port: 80
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}
+23
View File
@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
@@ -39,4 +39,3 @@ spec:
path: {{ $value }}
{{- end }}
{{- end }}
+23
View File
@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
+6
View File
@@ -0,0 +1,6 @@
apiVersion: v2
name: prowler-ui
description: A Helm chart for Kubernetes
type: application
version: 0.1.0
appVersion: "5.1.1"
@@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prowler-ui.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prowler-ui.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prowler-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prowler-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}
@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "prowler-ui.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "prowler-ui.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "prowler-ui.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "prowler-ui.labels" -}}
helm.sh/chart: {{ include "prowler-ui.chart" . }}
{{ include "prowler-ui.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "prowler-ui.selectorLabels" -}}
app.kubernetes.io/name: {{ include "prowler-ui.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "prowler-ui.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "prowler-ui.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
@@ -0,0 +1,72 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "prowler-ui.fullname" . }}
labels:
{{- include "prowler-ui.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "prowler-ui.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "prowler-ui.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "prowler-ui.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
envFrom:
- secretRef:
name: {{ include "prowler-ui.fullname" $ }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
readinessProbe:
{{- toYaml .Values.readinessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
@@ -0,0 +1,43 @@
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "prowler-ui.fullname" . }}
labels:
{{- include "prowler-ui.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.ingress.className }}
ingressClassName: {{ . }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- with .pathType }}
pathType: {{ . }}
{{- end }}
backend:
service:
name: {{ include "prowler-ui.fullname" $ }}
port:
number: {{ $.Values.service.port }}
{{- end }}
{{- end }}
{{- end }}
@@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "prowler-ui.fullname" . }}
labels:
{{- include "prowler-ui.labels" . | nindent 4 }}
type: Opaque
data:
{{- range $k, $v := .Values.secrets }}
{{ $k }}: {{ $v | toString | b64enc | quote }}
{{- end }}
@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "prowler-ui.fullname" . }}
labels:
{{- include "prowler-ui.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "prowler-ui.selectorLabels" . | nindent 4 }}
@@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "prowler-ui.serviceAccountName" . }}
labels:
{{- include "prowler-ui.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}
+132
View File
@@ -0,0 +1,132 @@
# Default values for prowler-ui.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
image:
repository: prowlercloud/prowler-ui
# This sets the pull policy for images.
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# This is to override the chart name.
nameOverride: ""
fullnameOverride: ""
secrets:
SITE_URL: http://localhost:3000
API_BASE_URL: http://prowler-api:8080/api/v1
NEXT_PUBLIC_API_DOCS_URL: http://prowler-api:8080/api/v1/docs
AUTH_TRUST_HOST: True
UI_PORT: 3000
# openssl rand -base64 32
AUTH_SECRET:
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
service:
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
port: 3000
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}
@@ -0,0 +1,24 @@
import warnings
from dashboard.common_methods import get_section_containers_cis
warnings.filterwarnings("ignore")
def get_table(data):
aux = data[
[
"REQUIREMENTS_ID",
"REQUIREMENTS_DESCRIPTION",
"REQUIREMENTS_ATTRIBUTES_SECTION",
"CHECKID",
"STATUS",
"REGION",
"ACCOUNTID",
"RESOURCEID",
]
].copy()
return get_section_containers_cis(
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
)
+25
View File
@@ -0,0 +1,25 @@
import warnings
from dashboard.common_methods import get_section_containers_cis
warnings.filterwarnings("ignore")
def get_table(data):
aux = data[
[
"REQUIREMENTS_ID",
"REQUIREMENTS_DESCRIPTION",
"REQUIREMENTS_ATTRIBUTES_SECTION",
"CHECKID",
"STATUS",
"REGION",
"ACCOUNTID",
"RESOURCEID",
]
].copy()
return get_section_containers_cis(
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
)
-1
View File
@@ -56,7 +56,6 @@ from prowler.providers.<provider>.lib.service.service import ServiceParentClass
# Create a class for the Service
################## <Service>
class <Service>(ServiceParentClass):
def __init__(self, provider):
# Call Service Parent Class __init__
+4 -2
View File
@@ -669,8 +669,9 @@ class Test_app_ensure_http_is_redirected_to_https:
# Create the custom App object to be tested
app_client.apps = {
AZURE_SUBSCRIPTION_ID: {
"app_id-1": WebApp(
resource_id: WebApp(
resource_id=resource_id,
name="app_id-1",
auth_enabled=True,
configurations=mock.MagicMock(),
client_cert_mode="Ignore",
@@ -716,8 +717,9 @@ class Test_app_ensure_http_is_redirected_to_https:
app_client.apps = {
AZURE_SUBSCRIPTION_ID: {
"app_id-1": WebApp(
resource_id: WebApp(
resource_id=resource_id,
name="app_id-1",
auth_enabled=True,
configurations=mock.MagicMock(),
client_cert_mode="Ignore",
+5 -4
View File
@@ -52,7 +52,7 @@
],
"Resource": "*",
"Effect": "Allow",
"Sid": "AllowMoreReadForProwler"
"Sid": "AllowMoreReadOnly"
},
{
"Effect": "Allow",
@@ -60,9 +60,10 @@
"apigateway:GET"
],
"Resource": [
"arn:aws:apigateway:*::/restapis/*",
"arn:aws:apigateway:*::/apis/*"
]
"arn:*:apigateway:*::/restapis/*",
"arn:*:apigateway:*::/apis/*"
],
"Sid": "AllowAPIGatewayReadOnly"
}
]
}
-13
View File
@@ -1,13 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"securityhub:BatchImportFindings",
"securityhub:GetFindings"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
@@ -0,0 +1,127 @@
AWSTemplateFormatVersion: "2010-09-09"
# You can invoke CloudFormation and pass the principal ARN from a command line like this:
# aws cloudformation create-stack \
# --capabilities CAPABILITY_IAM --capabilities CAPABILITY_NAMED_IAM \
# --template-body "file://prowler-pro-saas-scan-role.yaml" \
# --stack-name "ProwlerProSaaSScanRole" \
# --parameters "ParameterKey=ExternalId,ParameterValue=ProvidedExternalID"
Description: |
This template creates the ProwlerScan IAM Role in this account with
all read-only permissions to scan your account for security issues.
Contains two AWS managed policies (SecurityAudit and ViewOnlyAccess) and an inline policy.
It sets the trust policy on that IAM Role to permit Prowler to assume that role.
Parameters:
ExternalId:
Description: |
This is the External ID that Prowler will use to assume the role ProwlerScan IAM Role.
Type: String
MinLength: 1
AllowedPattern: ".+"
ConstraintDescription: "ExternalId must not be empty."
AccountId:
Description: |
AWS Account ID that will assume the role created, if you are deploying this template to be used in Prowler Cloud please do not edit this.
Type: String
Default: "232136659152"
MinLength: 12
MaxLength: 12
AllowedPattern: "[0-9]{12}"
ConstraintDescription: "AccountId must be a valid AWS Account ID."
IAMPrincipal:
Description: |
The IAM principal type and name that will be allowed to assume the role created, leave an * for all the IAM principals in your AWS account. If you are deploying this template to be used in Prowler Cloud please do not edit this.
Type: String
Default: role/prowler*
Resources:
ProwlerScan:
Type: AWS::IAM::Role
Properties:
RoleName: ProwlerScan
AssumeRolePolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Principal:
AWS: !Sub "arn:${AWS::Partition}:iam::${AccountId}:root"
Action: "sts:AssumeRole"
Condition:
StringEquals:
"sts:ExternalId": !Sub ${ExternalId}
StringLike:
"aws:PrincipalArn": !Sub "arn:${AWS::Partition}:iam::${AccountId}:${IAMPrincipal}"
MaxSessionDuration: 3600
ManagedPolicyArns:
- "arn:aws:iam::aws:policy/SecurityAudit"
- "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"
Policies:
- PolicyName: ProwlerScan
PolicyDocument:
Version: "2012-10-17"
Statement:
- Sid: AllowMoreReadOnly
Effect: Allow
Action:
- "account:Get*"
- "appstream:Describe*"
- "appstream:List*"
- "backup:List*"
- "bedrock:List*"
- "bedrock:Get*"
- "cloudtrail:GetInsightSelectors"
- "codeartifact:List*"
- "codebuild:BatchGet*"
- "codebuild:ListReportGroups"
- "cognito-idp:GetUserPoolMfaConfig"
- "dlm:Get*"
- "drs:Describe*"
- "ds:Get*"
- "ds:Describe*"
- "ds:List*"
- "dynamodb:GetResourcePolicy"
- "ec2:GetEbsEncryptionByDefault"
- "ec2:GetSnapshotBlockPublicAccessState"
- "ec2:GetInstanceMetadataDefaults"
- "ecr:Describe*"
- "ecr:GetRegistryScanningConfiguration"
- "elasticfilesystem:DescribeBackupPolicy"
- "glue:GetConnections"
- "glue:GetSecurityConfiguration*"
- "glue:SearchTables"
- "lambda:GetFunction*"
- "logs:FilterLogEvents"
- "lightsail:GetRelationalDatabases"
- "macie2:GetMacieSession"
- "macie2:GetAutomatedDiscoveryConfiguration"
- "s3:GetAccountPublicAccessBlock"
- "shield:DescribeProtection"
- "shield:GetSubscriptionState"
- "securityhub:BatchImportFindings"
- "securityhub:GetFindings"
- "servicecatalog:Describe*"
- "servicecatalog:List*"
- "ssm:GetDocument"
- "ssm-incidents:List*"
- "states:ListTagsForResource"
- "support:Describe*"
- "tag:GetTagKeys"
- "wellarchitected:List*"
Resource: "*"
- Sid: AllowAPIGatewayReadOnly
Effect: Allow
Action:
- "apigateway:GET"
Resource:
- "arn:*:apigateway:*::/restapis/*"
- "arn:*:apigateway:*::/apis/*"
Tags:
- Key: "Service"
Value: "https://prowler.com"
- Key: "Support"
Value: "support@prowler.com"
- Key: "CloudFormation"
Value: "true"
- Key: "Name"
Value: "ProwlerScan"
+10
View File
@@ -0,0 +1,10 @@
## Deployment using Terraform
To deploy the Prowler Scan Role in order to allow to scan you AWS account from Prowler, please run the following commands in your terminal:
1. `terraform init`
2. `terraform plan`
3. `terraform apply`
During the `terraform plan` and `terraform apply` steps you will be asked for an External ID to be configured in the `ProwlerScan` IAM role.
> Note that Terraform will use the AWS credentials of your default profile.
+111
View File
@@ -0,0 +1,111 @@
# Variables
###################################
variable "external_id" {
type = string
description = "This is the External ID that Prowler will use to assume the role ProwlerScan IAM Role."
validation {
condition = length(var.external_id) > 0
error_message = "ExternalId must not be empty."
}
}
variable "account_id" {
type = string
description = "AWS Account ID that will assume the role created, if you are deploying this template to be used in Prowler Cloud please do not edit this."
default = "232136659152"
validation {
condition = length(var.account_id) == 12
error_message = "AccountId must be a valid AWS Account ID."
}
}
variable "iam_principal" {
type = string
description = "The IAM principal type and name that will be allowed to assume the role created, leave an * for all the IAM principals in your AWS account. If you are deploying this template to be used in Prowler Cloud please do not edit this."
default = "role/prowler*"
}
##### PLEASE, DO NOT EDIT BELOW THIS LINE #####
# Terraform Provider Configuration
###################################
terraform {
required_version = ">= 1.5"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.83"
}
}
}
provider "aws" {
region = "us-east-1"
default_tags {
tags = {
"Name" = "ProwlerScan",
"Terraform" = "true",
"Service" = "https://prowler.com",
"Support" = "support@prowler.com"
}
}
}
data "aws_partition" "current" {}
# IAM Role
###################################
data "aws_iam_policy_document" "prowler_assume_role_policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "AWS"
identifiers = ["arn:${data.aws_partition.current.partition}:iam::${var.account_id}:root"]
}
condition {
test = "StringEquals"
variable = "sts:ExternalId"
values = [
var.external_id,
]
}
condition {
test = "StringLike"
variable = "aws:PrincipalArn"
values = [
"arn:${data.aws_partition.current.partition}:iam::${var.account_id}:${var.iam_principal}",
]
}
}
}
resource "aws_iam_role" "prowler_scan" {
name = "ProwlerScan"
assume_role_policy = data.aws_iam_policy_document.prowler_assume_role_policy.json
}
resource "aws_iam_policy" "prowler_scan_policy" {
name = "ProwlerScan"
description = "Prowler Scan Policy"
policy = file("../../prowler-additions-policy.json")
}
resource "aws_iam_role_policy_attachment" "prowler_scan_policy_attachment" {
role = aws_iam_role.prowler_scan.name
policy_arn = aws_iam_policy.prowler_scan_policy.arn
}
resource "aws_iam_role_policy_attachment" "prowler_scan_securityaudit_policy_attachment" {
role = aws_iam_role.prowler_scan.name
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/SecurityAudit"
}
resource "aws_iam_role_policy_attachment" "prowler_scan_viewonly_policy_attachment" {
role = aws_iam_role.prowler_scan.name
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/job-function/ViewOnlyAccess"
}
Generated
+854 -842
View File
File diff suppressed because it is too large Load Diff
+14 -7
View File
@@ -455,7 +455,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
@@ -476,7 +477,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
@@ -497,7 +499,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
@@ -518,7 +521,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
@@ -540,7 +544,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
@@ -561,7 +566,8 @@
],
"Attributes": [
{
"Section": "2.2. Elastic Compute Cloud (EC2)",
"Section": "2. Storage",
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
@@ -582,7 +588,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
+20 -10
View File
@@ -455,7 +455,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
@@ -476,7 +477,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
@@ -497,7 +499,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
@@ -518,7 +521,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
@@ -540,7 +544,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
@@ -561,7 +566,8 @@
],
"Attributes": [
{
"Section": "2.2. Elastic Compute Cloud (EC2)",
"Section": "2. Storage",
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
@@ -582,7 +588,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
@@ -603,7 +610,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
@@ -624,7 +632,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.",
@@ -645,7 +654,8 @@
],
"Attributes": [
{
"Section": "2.4 Elastic File System (EFS)",
"Section": "2. Storage",
"SubSection": "2.4 Elastic File System (EFS)",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",
+18 -9
View File
@@ -474,7 +474,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
@@ -495,7 +496,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
@@ -516,7 +518,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
@@ -538,7 +541,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
@@ -559,7 +563,8 @@
],
"Attributes": [
{
"Section": "2.2. Elastic Compute Cloud (EC2)",
"Section": "2. Storage",
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
@@ -580,7 +585,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
@@ -601,7 +607,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
@@ -622,7 +629,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.",
@@ -643,7 +651,8 @@
],
"Attributes": [
{
"Section": "2.4 Elastic File System (EFS)",
"Section": "2. Storage",
"SubSection": "2.4 Elastic File System (EFS)",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",
+18 -9
View File
@@ -474,7 +474,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
@@ -495,7 +496,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
@@ -516,7 +518,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
@@ -538,7 +541,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
@@ -559,7 +563,8 @@
],
"Attributes": [
{
"Section": "2.2. Elastic Compute Cloud (EC2)",
"Section": "2. Storage",
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
@@ -580,7 +585,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
@@ -601,7 +607,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
@@ -622,7 +629,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to anypublicly accessible RDS database instance, you must disable the database PubliclyAccessible flag and update the VPC security group associated with the instance",
@@ -643,7 +651,8 @@
],
"Attributes": [
{
"Section": "2.4 Elastic File System (EFS)",
"Section": "2. Storage",
"SubSection": "2.4 Elastic File System (EFS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",
+179 -102
View File
@@ -12,7 +12,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1. Identity and Access Management",
"SubSection": "1.1 Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Security defaults in Azure Active Directory (Azure AD) make it easier to be secure and help protect your organization. Security defaults contain preconfigured security settings for common attacks. Security defaults is available to everyone. The goal is to ensure that all organizations have a basic level of security",
@@ -34,7 +35,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1. Identity and Access Management",
"SubSection": "1.1 Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Enable multi-factor authentication for all roles, groups, and users that have write access or permissions to Azure resources. These include custom created objects or built-in roles such as; • Service Co-Administrators • Subscription Owners • Contributors",
@@ -56,7 +58,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1. Identity and Access Management",
"SubSection": "1.1 Security Defaults",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable multi-factor authentication for all non-privileged users.",
@@ -76,7 +79,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1. Identity and Access Management",
"SubSection": "1.1 Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Do not allow users to remember multi-factor authentication on devices.",
@@ -98,7 +102,8 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Azure Active Directory Conditional Access allows an organization to configure Named locations and configure whether those locations are trusted or untrusted. These settings provide organizations the means to specify Geographical locations for use in conditional access policies, or define actual IP addresses and IP ranges and whether or not those IP addresses and/or ranges are trusted by the organization.",
@@ -118,7 +123,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "CAUTION: If these policies are created without first auditing and testing the result, misconfiguration can potentially lock out administrators or create undesired access issues. Conditional Access Policies can be used to block access from geographic locations that are deemed out-of-scope for your organization or application. The scope and variables for this policy should be carefully examined and defined.",
@@ -138,7 +144,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
@@ -158,7 +165,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
@@ -178,7 +186,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
@@ -198,7 +207,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
@@ -220,7 +230,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Require administrators or appropriately delegated users to create new tenants.",
@@ -240,7 +250,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "This recommendation extends guest access review by utilizing the Azure AD Privileged Identity Management feature provided in Azure AD Premium P2. Azure AD is extended to include Azure AD B2B collaboration, allowing you to invite people from outside your organization to be guest users in your cloud account and sign in with their own work, school, or social identities. Guest users allow you to share your company's applications and services with users from any other organization, while maintaining control over your own corporate data. Work with external partners, large or small, even if they don't have Azure AD or an IT department. A simple invitation and redemption process lets partners use their own credentials to access your company's resources a a guest user.",
@@ -260,7 +270,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Azure AD is extended to include Azure AD B2B collaboration, allowing you to invite people from outside your organization to be guest users in your cloud account and sign in with their own work, school, or social identities. Guest users allow you to share your company's applications and services with users from any other organization, while maintaining control over your own corporate data. Work with external partners, large or small, even if they don't have Azure AD or an IT department. A simple invitation and redemption process lets partners use their own credentials to access your company's resources as a guest user. Guest users in every subscription should be review on a regular basis to ensure that inactive and unneeded accounts are removed.",
@@ -280,7 +290,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensures that two alternate forms of identification are provided before allowing a password reset.",
@@ -300,7 +310,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Microsoft Azure provides a Global Banned Password policy that applies to Azure administrative and normal user accounts. This is not applied to user accounts that are synced from an on-premise Active Directory unless Azure AD Connect is used and you enable EnforceCloudPasswordPolicyForPasswordSyncedUsers. Please see the list in default values on the specifics of this policy. To further password security, it is recommended to further define a custom banned password policy.",
@@ -320,7 +330,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensure that the number of days before users are asked to re-confirm their authentication information is not set to 0.",
@@ -340,7 +350,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensure that users are notified on their primary and secondary emails on password resets.",
@@ -360,7 +370,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensure that all Global Administrators are notified if any other administrator resets their password.",
@@ -382,7 +392,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Require administrators to provide consent for applications before use.",
@@ -404,7 +414,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Allow users to provide consent for selected permissions when a request is coming from a verified publisher.",
@@ -424,7 +434,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Require administrators to provide consent for the apps before use.",
@@ -446,7 +456,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Require administrators or appropriately delegated users to register third-party applications.",
@@ -468,7 +478,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Limit guest user permissions.",
@@ -490,7 +500,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Restrict invitations to users with specific administrative roles only.",
@@ -510,7 +520,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Restrict access to the Azure AD administration portal to administrators only. NOTE: This only affects access to the Azure AD administrator's web portal. This setting does not prohibit privileged users from using other methods such as Rest API or Powershell to obtain sensitive information from Azure AD.",
@@ -530,7 +540,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Restricts group creation to administrators with permissions only.",
@@ -552,7 +562,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Restrict security group creation to administrators only.",
@@ -572,7 +582,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Restrict security group management to administrators only.",
@@ -594,7 +604,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Restrict Microsoft 365 group creation to administrators only.",
@@ -614,7 +624,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Joining or registering devices to the active directory should require Multi-factor authentication.",
@@ -636,7 +646,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The principle of least privilege should be followed and only necessary privileges should be assigned instead of allowing full administrative access.",
@@ -658,7 +668,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Resource locking is a powerful protection mechanism that can prevent inadvertent modification/deletion of resources within Azure subscriptions/Resource Groups and is a recommended NIST configuration.",
@@ -678,7 +688,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Users who are set as subscription owners are able to make administrative changes to the subscriptions and move them into and out of Azure Active Directories.",
@@ -700,7 +710,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Servers enables threat detection for Servers, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -722,7 +733,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for App Service enables threat detection for App Service, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -744,7 +756,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Databases enables threat detection for the instances running your database software. This provides threat intelligence, anomaly detection, and behavior analytics in the Azure Microsoft Defender for Cloud. Instead of being enabled on services like Platform as a Service (PaaS), this implementation will run within your instances as Infrastructure as a Service (IaaS) on the Operating Systems hosting your databases.",
@@ -766,7 +779,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Azure SQL Databases enables threat detection for Azure SQL database servers, providing threat intelligence, anomaly detection, andbehavior analytics in the Microsoft Defender for Cloud.",
@@ -788,7 +802,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for SQL servers on machines enables threat detection for SQL servers on machines, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -810,7 +825,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Open-source relational databases enables threat detection for Open-source relational databases, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -832,7 +848,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Storage enables threat detection for Storage, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -854,7 +871,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Containers enables threat detection for Container Registries including Kubernetes, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -876,7 +894,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Microsoft Defender for Azure Cosmos DB scans all incoming network requests for threats to your Azure Cosmos DB resources.",
@@ -898,7 +917,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Key Vault enables threat detection for Key Vault, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -920,7 +940,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Microsoft Defender for DNS scans all network traffic exiting from within a subscription.",
@@ -942,7 +963,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Microsoft Defender for Resource Manager scans incoming administrative requests to change your infrastructure from both CLI and the Azure portal.",
@@ -964,7 +986,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensure that the latest OS patches for all virtual machines are applied.",
@@ -986,7 +1009,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "None of the settings offered by ASC Default policy should be set to effect Disabled.",
@@ -1008,7 +1032,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Enable automatic provisioning of the monitoring agent to collect security data.",
@@ -1030,7 +1055,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable automatic provisioning of vulnerability assessment for machines on both Azure and hybrid (Arc enabled) machines.",
@@ -1050,7 +1076,8 @@
"Checks": [],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable automatic provisioning of the Microsoft Defender for Containers components.",
@@ -1072,7 +1099,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Enable security alert emails to subscription owners.",
@@ -1094,7 +1122,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Microsoft Defender for Cloud emails the subscription owners whenever a high-severity alert is triggered for their subscription. You should provide a security contact email address as an additional email address.",
@@ -1116,7 +1145,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enables emailing security alerts to the subscription owner or other designated security contact.",
@@ -1138,7 +1168,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "This integration setting enables Microsoft Defender for Cloud Apps (formerly 'Microsoft Cloud App Security' or 'MCAS' - see additional info) to communicate with Microsoft Defender for Cloud.",
@@ -1160,7 +1191,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "This integration setting enables Microsoft Defender for Endpoint (formerly 'Advanced Threat Protection' or 'ATP' or 'WDATP' - see additional info) to communicate with Microsoft Defender for Cloud. IMPORTANT: When enabling integration between DfE & DfC it needs to be taken into account that this will have some side effects that may be undesirable. 1. For server 2019 & above if defender is installed (default for these server SKU's) this will trigger a deployment of the new unified agent and link to any of the extended configuration in the Defender portal. 2. If the new unified agent is required for server SKU's of Win 2016 or Linux and lower there is additional integration that needs to be switched on and agents need to be aligned.",
@@ -1182,7 +1214,8 @@
],
"Attributes": [
{
"Section": "2.2 Microsoft Defender for IoT",
"Section": "2. Microsoft Defender",
"SubSection": "2.2 Microsoft Defender for IoT",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Microsoft Defender for IoT acts as a central security hub for IoT devices within your organization.",
@@ -1524,7 +1557,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable auditing on SQL Servers.",
@@ -1546,7 +1580,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that no SQL Databases allow ingress from 0.0.0.0/0 (ANY IP).",
@@ -1568,7 +1603,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Transparent Data Encryption (TDE) with Customer-managed key support provides increased transparency and control over the TDE Protector, increased security with an HSM-backed external service, and promotion of separation of duties. With TDE, data is encrypted at rest with a symmetric key (called the database encryption key) stored in the database or data warehouse distribution. To protect this data encryption key (DEK) in the past, only a certificate that the Azure SQL Service managed could be used. Now, with Customer-managed key support for TDE, the DEK can be protected with an asymmetric key that is stored in the Azure Key Vault. The Azure Key Vault is a highly available and scalable cloud-based key store which offers central key management, leverages FIPS 140-2 Level 2 validated hardware security modules (HSMs), and allows separation of management of keys and data for additional security. Based on business needs or criticality of data/databases hosted on a SQL server, it is recommended that the TDE protector is encrypted by a key that is managed by the data owner (Customer-managed key).",
@@ -1590,7 +1626,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Use Azure Active Directory Authentication for authentication with SQL Database to manage credentials in a single place.",
@@ -1612,7 +1649,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable Transparent Data Encryption on every SQL server.",
@@ -1634,7 +1672,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "SQL Server Audit Retention should be configured to be greater than 90 days.",
@@ -1656,7 +1695,8 @@
],
"Attributes": [
{
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
"Section": "4. Database Services",
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Enable 'Microsoft Defender for SQL' on critical SQL Servers.",
@@ -1678,7 +1718,8 @@
],
"Attributes": [
{
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
"Section": "4. Database Services",
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Enable Vulnerability Assessment (VA) service scans for critical SQL servers and corresponding SQL databases.",
@@ -1700,7 +1741,8 @@
],
"Attributes": [
{
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
"Section": "4. Database Services",
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Enable Vulnerability Assessment (VA) service scans for critical SQL servers and corresponding SQL databases.",
@@ -1722,7 +1764,8 @@
],
"Attributes": [
{
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
"Section": "4. Database Services",
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Configure 'Send scan reports to' with email addresses of concerned data owners/stakeholders for a critical SQL servers",
@@ -1744,7 +1787,8 @@
],
"Attributes": [
{
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
"Section": "4. Database Services",
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable Vulnerability Assessment (VA) setting 'Also send email notifications to admins and subscription owners'.",
@@ -1766,7 +1810,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable SSL connection on PostgreSQL Servers.",
@@ -1788,7 +1833,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable log_checkpoints on PostgreSQL Servers.",
@@ -1810,7 +1856,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable log_connections on PostgreSQL Servers.",
@@ -1832,7 +1879,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable log_disconnections on PostgreSQL Servers.",
@@ -1854,7 +1902,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable connection_throttling on PostgreSQL Servers.",
@@ -1876,7 +1925,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure log_retention_days on PostgreSQL Servers is set to an appropriate value.",
@@ -1898,7 +1948,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Disable access from Azure services to PostgreSQL Database Server.",
@@ -1918,7 +1969,8 @@
"Checks": [],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Azure Database for PostgreSQL servers should be created with 'infrastructure double encryption' enabled.",
@@ -1940,7 +1992,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable SSL connection on MYSQL Servers.",
@@ -1962,7 +2015,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure TLS version on MySQL flexible servers is set to the default value.",
@@ -1984,7 +2038,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable audit_log_enabled on MySQL Servers.",
@@ -2006,7 +2061,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Set audit_log_enabled to include CONNECTION on MySQL Servers.",
@@ -2028,7 +2084,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Limiting your Cosmos DB to only communicate on whitelisted networks lowers its attack footprint.",
@@ -2050,7 +2107,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Private endpoints limit network traffic to approved sources.",
@@ -2072,7 +2130,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Cosmos DB can use tokens or AAD for client authentication which in turn will use Azure RBAC for authorization. Using AAD is significantly more secure because AAD handles the credentials and allows for MFA and centralized management, and the Azure RBAC better integrated with the rest of Azure.",
@@ -2094,7 +2153,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Enable Diagnostic settings for exporting activity logs. Diagnos tic settings are available for each individual resource within a subscription. Settings should be configured for allappropriate resources for your environment.",
@@ -2116,7 +2176,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Prerequisite: A Diagnostic Setting must exist. If a Diagnostic Setting does not exist, the navigation and options within this recommendation will not be available. Please review the recommendation at the beginning of this subsection titled: 'Ensure that a 'Diagnostic Setting' exists.' The diagnostic setting should be configured to log the appropriate activities from the control/management plane.",
@@ -2138,7 +2199,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The storage account container containing the activity log export should not be publicly accessible.",
@@ -2160,7 +2222,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Storage accounts with the activity log exports can be configured to use Customer Managed Keys (CMK).",
@@ -2182,7 +2245,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable AuditEvent logging for key vault instances to ensure interactions with key vaults are logged and available.",
@@ -2204,7 +2268,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
@@ -2226,7 +2291,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable AppServiceHTTPLogs diagnostic log category for Azure App Service instances to ensure all http requests are captured and centrally logged.",
@@ -2248,7 +2314,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create Policy Assignment event.",
@@ -2270,7 +2337,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Policy Assignment event.",
@@ -2292,7 +2360,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an Activity Log Alert for the Create or Update Network Security Group event.",
@@ -2314,7 +2383,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Network Security Group event.",
@@ -2336,7 +2406,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update Security Solution event.",
@@ -2358,7 +2429,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Security Solution event.",
@@ -2380,7 +2452,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update SQL Server Firewall Rule event.",
@@ -2402,7 +2475,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the 'Delete SQL Server Firewall Rule.'",
@@ -2424,7 +2498,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update Public IP Addresses rule.",
@@ -2446,7 +2521,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Public IP Address rule.",
@@ -2466,7 +2542,7 @@
"Checks": [],
"Attributes": [
{
"Section": "5.3 Configuring Application Insights",
"Section": "5. Logging and Monitoring",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Resource Logs capture activity to the data access plane while the Activity log is a subscription-level log for the control plane. Resource-level diagnostic logs provide insight into operations that were performed within that resource itself; for example, reading or updating a secret from a Key Vault. Currently, 95 Azure resources support Azure Monitoring (See the more information section for a complete list), including Network Security Groups, Load Balancers, Key Vault, AD, Logic Apps, and CosmosDB. The content of these logs varies by resource type. A number of back-end services were not configured to log and store Resource Logs for certain activities or for a sufficient length. It is crucial that monitoring is correctly configured to log all relevant activities and retain those logs for a sufficient length of time. Given that the mean time to detection in an enterprise is 240 days, a minimum retention period of two years is recommended.",
@@ -2486,7 +2562,7 @@
"Checks": [],
"Attributes": [
{
"Section": "5.3 Configuring Application Insights",
"Section": "5. Logging and Monitoring",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The use of Basic or Free SKUs in Azure whilst cost effective have significant limitations in terms of what can be monitored and what support can be realized from Microsoft. Typically, these SKUs do not have a service SLA and Microsoft will usually refuse to provide support for them. Consequently Basic/Free SKUs should never be used for production workloads.",
@@ -2508,7 +2584,8 @@
],
"Attributes": [
{
"Section": "5.3 Configuring Application Insights",
"Section": "5. Logging and Monitoring",
"SubSection": "5.3 Configuring Application Insights",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Application Insights within Azure act as an Application Performance Monitoring solution providing valuable data into how well an application performs and additional information when performing incident response. The types of log data collected include application metrics, telemetry data, and application trace logging data providing organizations with detailed information about application activity and application transactions. Both data sets help organizations adopt a proactive and retroactive means to handle security and performance related metrics within their modern applications.",
+147 -73
View File
@@ -494,7 +494,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults Security Defaults",
"Section": "1.Identity and Access Management",
"SubSection": "1.1 Security Defaults Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Security defaults in Microsoft Entra ID make it easier to be secure and help protect your organization. Security defaults contain preconfigured security settings for common attacks. Security defaults is available to everyone. The goal is to ensure that all organizations have a basic level of security enabled at no extra cost. You may turn on security defaults in the Azure portal.",
@@ -516,7 +517,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults Security Defaults",
"Section": "1.Identity and Access Management",
"SubSection": "1.1 Security Defaults Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Enable multi-factor authentication for all roles, groups, and users that have write access or permissions to Azure resources. These include custom created objects or built-in roles such as; - Service Co-Administrators - Subscription Owners - Contributors",
@@ -538,7 +540,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1.Identity and Access Management",
"SubSection": "1.1 Security Defaults Security Defaults",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable multi-factor authentication for all non-privileged users.",
@@ -558,7 +561,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1.Identity and Access Management",
"SubSection": "1.1 Security Defaults Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Do not allow users to remember multi-factor authentication on devices.",
@@ -580,7 +584,8 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Microsoft Entra ID Conditional Access allows an organization to configure `Named locations` and configure whether those locations are trusted or untrusted. These settings provide organizations the means to specify Geographical locations for use in conditional access policies, or define actual IP addresses and IP ranges and whether or not those IP addresses and/or ranges are trusted by the organization.",
@@ -600,7 +605,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "**CAUTION**: If these policies are created without first auditing and testing the result, misconfiguration can potentially lock out administrators or create undesired access issues. Conditional Access Policies can be used to block access from geographic locations that are deemed out-of-scope for your organization or application. The scope and variables for this policy should be carefully examined and defined.",
@@ -620,7 +626,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
@@ -640,7 +647,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
@@ -660,7 +668,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
@@ -682,7 +691,8 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "This recommendation ensures that users accessing the Windows Azure Service Management API (i.e. Azure Powershell, Azure CLI, Azure Resource Manager API, etc.) are required to use multifactor authentication (MFA) credentials when accessing resources through the Windows Azure Service Management API.",
@@ -702,7 +712,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "This recommendation ensures that users accessing Microsoft Admin Portals (i.e. Microsoft 365 Admin, Microsoft 365 Defender, Exchange Admin Center, Azure Portal, etc.) are required to use multifactor authentication (MFA) credentials when logging into an Admin Portal.",
@@ -724,7 +735,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Servers enables threat detection for Servers, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -746,7 +758,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for App Service enables threat detection for App Service, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -768,7 +781,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Azure SQL Databases enables threat detection for Managed Instance Azure SQL databases, providing threat intelligence, anomaly detection, and behavior analytics in Microsoft Defender for Cloud.",
@@ -790,7 +804,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for SQL servers on machines enables threat detection for SQL servers on machines, providing threat intelligence, anomaly detection, and behavior analytics in Microsoft Defender for Cloud.",
@@ -812,7 +827,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Open-source relational databases enables threat detection for Open-source relational databases, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -834,7 +850,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Microsoft Defender for Azure Cosmos DB scans all incoming network requests for threats to your Azure Cosmos DB resources.",
@@ -856,7 +873,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Storage enables threat detection for Storage, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -878,7 +896,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Containers enables threat detection for Container Registries including Kubernetes, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud. The following services will be enabled for container instances: - Defender agent in Azure - Azure Policy for Kubernetes - Agentless discovery for Kubernetes - Agentless container vulnerability assessment",
@@ -900,7 +919,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Key Vault enables threat detection for Key Vault, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -922,7 +942,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "[**NOTE:** As of August 1, customers with an existing subscription to Defender for DNS can continue to use the service, but new subscribers will receive alerts about suspicious DNS activity as part of Defender for Servers P2.] Microsoft Defender for DNS scans all network traffic exiting from within a subscription.",
@@ -944,7 +965,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Microsoft Defender for Resource Manager scans incoming administrative requests to change your infrastructure from both CLI and the Azure portal.",
@@ -966,7 +988,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that the latest OS patches for all virtual machines are applied.",
@@ -988,7 +1011,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "The Microsoft Cloud Security Benchmark (or MCSB) is an Azure Policy Initiative containing many security policies to evaluate resource configuration against best practice recommendations. If a policy in the MCSB is set with effect type `Disabled`, it is not evaluated and may prevent administrators from being informed of valuable security recommendations.",
@@ -1010,7 +1034,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable automatic provisioning of the monitoring agent to collect security data.",
@@ -1032,7 +1057,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable automatic provisioning of vulnerability assessment for machines on both Azure and hybrid (Arc enabled) machines.",
@@ -1052,7 +1078,8 @@
"Checks": [],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Enable automatic provisioning of the Microsoft Defender for Containers components.",
@@ -1074,7 +1101,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable security alert emails to subscription owners.",
@@ -1096,7 +1124,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Microsoft Defender for Cloud emails the subscription owners whenever a high-severity alert is triggered for their subscription. You should provide a security contact email address as an additional email address.",
@@ -1118,7 +1147,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enables emailing security alerts to the subscription owner or other designated security contact.",
@@ -1140,7 +1170,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "This integration setting enables Microsoft Defender for Cloud Apps (formerly 'Microsoft Cloud App Security' or 'MCAS' - see additional info) to communicate with Microsoft Defender for Cloud.",
@@ -1162,7 +1193,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "This integration setting enables Microsoft Defender for Endpoint (formerly 'Advanced Threat Protection' or 'ATP' or 'WDATP' - see additional info) to communicate with Microsoft Defender for Cloud. **IMPORTANT:** When enabling integration between DfE & DfC it needs to be taken into account that this will have some side effects that may be undesirable. 1. For server 2019 & above if defender is installed (default for these server SKU's) this will trigger a deployment of the new unified agent and link to any of the extended configuration in the Defender portal. 1. If the new unified agent is required for server SKU's of Win 2016 or Linux and lower there is additional integration that needs to be switched on and agents need to be aligned.",
@@ -1182,7 +1214,8 @@
"Checks": [],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "An organization's attack surface is the collection of assets with a public network identifier or URI that an external threat actor can see or access from outside your cloud. It is the set of points on the boundary of a system, a system element, system component, or an environment where an attacker can try to enter, cause an effect on, or extract data from, that system, system element, system component, or environment. The larger the attack surface, the harder it is to protect. This tool can be configured to scan your organization's online infrastructure such as specified domains, hosts, CIDR blocks, and SSL certificates, and store them in an Inventory. Inventory items can be added, reviewed, approved, and removed, and may contain enrichments (insights) and additional information collected from the tool's different scan engines and open-source intelligence sources. A Defender EASM workspace will generate an Inventory of publicly exposed assets by crawling and scanning the internet using _Seeds_ you provide when setting up the tool. Seeds can be FQDNs, IP CIDR blocks, and WHOIS records. Defender EASM will generate Insights within 24-48 hours after Seeds are provided, and these insights include vulnerability data (CVEs), ports and protocols, and weak or expired SSL certificates that could be used by an attacker for reconnaisance or exploitation. Results are classified High/Medium/Low and some of them include proposed mitigations.",
@@ -1204,7 +1237,8 @@
],
"Attributes": [
{
"Section": "2.2 Microsoft Defender for IoT",
"Section": "2. Microsoft Defender",
"SubSection": "2.2 Microsoft Defender for IoT",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Microsoft Defender for IoT acts as a central security hub for IoT devices within your organization.",
@@ -1586,7 +1620,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable auditing on SQL Servers.",
@@ -1608,7 +1643,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that no SQL Databases allow ingress from 0.0.0.0/0 (ANY IP).",
@@ -1630,7 +1666,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Transparent Data Encryption (TDE) with Customer-managed key support provides increased transparency and control over the TDE Protector, increased security with an HSM-backed external service, and promotion of separation of duties. With TDE, data is encrypted at rest with a symmetric key (called the database encryption key) stored in the database or data warehouse distribution. To protect this data encryption key (DEK) in the past, only a certificate that the Azure SQL Service managed could be used. Now, with Customer-managed key support for TDE, the DEK can be protected with an asymmetric key that is stored in the Azure Key Vault. The Azure Key Vault is a highly available and scalable cloud-based key store which offers central key management, leverages FIPS 140-2 Level 2 validated hardware security modules (HSMs), and allows separation of management of keys and data for additional security. Based on business needs or criticality of data/databases hosted on a SQL server, it is recommended that the TDE protector is encrypted by a key that is managed by the data owner (Customer-managed key).",
@@ -1652,7 +1689,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Use Microsoft Entra authentication for authentication with SQL Database to manage credentials in a single place.",
@@ -1674,7 +1712,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable Transparent Data Encryption on every SQL server.",
@@ -1696,7 +1735,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "SQL Server Audit Retention should be configured to be greater than 90 days.",
@@ -1718,7 +1758,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `SSL connection` on `PostgreSQL` Servers.",
@@ -1740,7 +1781,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `log_checkpoints` on `PostgreSQL Servers`.",
@@ -1762,7 +1804,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `log_connections` on `PostgreSQL Servers`.",
@@ -1784,7 +1827,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `log_disconnections` on `PostgreSQL Servers`.",
@@ -1806,7 +1850,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `connection_throttling` on `PostgreSQL Servers`.",
@@ -1828,7 +1873,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure `log_retention_days` on `PostgreSQL Servers` is set to an appropriate value.",
@@ -1850,7 +1896,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Disable access from Azure services to PostgreSQL Database Server.",
@@ -1870,7 +1917,8 @@
"Checks": [],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Azure Database for PostgreSQL servers should be created with 'infrastructure double encryption' enabled.",
@@ -1892,7 +1940,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `SSL connection` on `MYSQL` Servers.",
@@ -1914,7 +1963,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure `TLS version` on `MySQL flexible` servers is set to use TLS version 1.2 or higher.",
@@ -1936,7 +1986,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable audit_log_enabled on MySQL Servers.",
@@ -1958,7 +2009,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Set `audit_log_enabled` to include CONNECTION on MySQL Servers.",
@@ -1980,7 +2032,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Limiting your Cosmos DB to only communicate on whitelisted networks lowers its attack footprint.",
@@ -2002,7 +2055,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Private endpoints limit network traffic to approved sources.",
@@ -2024,7 +2078,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Cosmos DB can use tokens or Entra ID for client authentication which in turn will use Azure RBAC for authorization. Using Entra ID is significantly more secure because Entra ID handles the credentials and allows for MFA and centralized management, and the Azure RBAC better integrated with the rest of Azure.",
@@ -2086,7 +2141,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Enable Diagnostic settings for exporting activity logs. Diagnostic settings are available for each individual resource within a subscription. Settings should be configured for all appropriate resources for your environment.",
@@ -2108,7 +2164,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "**Prerequisite**: A Diagnostic Setting must exist. If a Diagnostic Setting does not exist, the navigation and options within this recommendation will not be available. Please review the recommendation at the beginning of this subsection titled: Ensure that a 'Diagnostic Setting' exists. The diagnostic setting should be configured to log the appropriate activities from the control/management plane.",
@@ -2130,7 +2187,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Storage accounts with the activity log exports can be configured to use Customer Managed Keys (CMK).",
@@ -2152,7 +2210,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable AuditEvent logging for key vault instances to ensure interactions with key vaults are logged and available.",
@@ -2174,7 +2233,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
@@ -2196,7 +2256,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable AppServiceHTTPLogs diagnostic log category for Azure App Service instances to ensure all http requests are captured and centrally logged.",
@@ -2218,7 +2279,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create Policy Assignment event.",
@@ -2240,7 +2302,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Policy Assignment event.",
@@ -2262,7 +2325,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an Activity Log Alert for the Create or Update Network Security Group event.",
@@ -2284,7 +2348,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Network Security Group event.",
@@ -2306,7 +2371,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update Security Solution event.",
@@ -2328,7 +2394,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Security Solution event.",
@@ -2350,7 +2417,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update SQL Server Firewall Rule event.",
@@ -2372,7 +2440,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete SQL Server Firewall Rule.",
@@ -2394,7 +2463,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update Public IP Addresses rule.",
@@ -2416,7 +2486,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Public IP Address rule.",
@@ -2438,7 +2509,8 @@
],
"Attributes": [
{
"Section": "5.3 Configuring Application Insights. Storage Accounts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.3 Configuring Application Insights. Storage Accounts",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Application Insights within Azure act as an Application Performance Monitoring solution providing valuable data into how well an application performs and additional information when performing incident response. The types of log data collected include application metrics, telemetry data, and application trace logging data providing organizations with detailed information about application activity and application transactions. Both data sets help organizations adopt a proactive and retroactive means to handle security and performance related metrics within their modern applications.",
@@ -3043,7 +3115,9 @@
{
"Id": "9.4",
"Description": "Ensure that Register with Entra ID is enabled on App Service",
"Checks": [],
"Checks": [
"app_register_with_identity"
],
"Attributes": [
{
"Section": "9. AppService",
File diff suppressed because one or more lines are too long
+38 -19
View File
@@ -1292,7 +1292,8 @@
"Checks": [],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances. This recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.",
@@ -1313,7 +1314,8 @@
],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`",
@@ -1334,7 +1336,8 @@
],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.",
@@ -1355,7 +1358,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are: - `TERSE` - `DEFAULT` - `VERBOSE` `TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information. `VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error. Ensure an appropriate value is set to 'DEFAULT' or stricter.",
@@ -1376,7 +1380,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`. Each severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.",
@@ -1397,7 +1402,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are: - `none` - `ddl` - `mod` - `all` The value `ddl` logs all data definition statements. The value `mod` logs all ddl statements, plus data-modifying statements. The statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included. A value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.",
@@ -1418,7 +1424,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Instance addresses can be public IP or private IP. Public IP means that the instance is accessible through the public internet. In contrast, instances using only private IP are not accessible through the public internet, but are accessible through a Virtual Private Cloud (VPC). Limiting network access to your database will limit potential attacks.",
@@ -1439,7 +1446,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.",
@@ -1460,7 +1468,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.",
@@ -1481,7 +1490,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.",
@@ -1502,7 +1512,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.",
@@ -1523,7 +1534,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`. Each severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.",
@@ -1544,7 +1556,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.",
@@ -1565,7 +1578,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`",
@@ -1586,7 +1600,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.",
@@ -1607,7 +1622,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.",
@@ -1628,7 +1644,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.",
@@ -1649,7 +1666,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `off`.",
@@ -1670,7 +1688,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.",
+36 -18
View File
@@ -1330,7 +1330,8 @@
"Checks": [],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances.This recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.",
@@ -1352,7 +1353,8 @@
],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`",
@@ -1374,7 +1376,8 @@
],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.",
@@ -1396,7 +1399,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are:- `TERSE`- `DEFAULT`- `VERBOSE``TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information.`VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error.Ensure an appropriate value is set to 'DEFAULT' or stricter.",
@@ -1418,7 +1422,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.",
@@ -1440,7 +1445,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.",
@@ -1462,7 +1468,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are:- `none`- `ddl`- `mod`- `all`The value `ddl` logs all data definition statements.The value `mod` logs all ddl statements, plus data-modifying statements.The statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included.A value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.",
@@ -1484,7 +1491,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include (from lowest to highest severity) `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.Each severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.",
@@ -1506,7 +1514,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include (from lowest to highest severity) `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.Each severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.",
@@ -1528,7 +1537,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.",
@@ -1550,7 +1560,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.",
@@ -1572,7 +1583,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`",
@@ -1594,7 +1606,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.This flag is deprecated for all SQL Server versions in CGP. Going forward, you can't set its value to on. However, if you have this flag enabled, we strongly recommend that you either remove the flag from your database or set it to off. For cross-database access, use the [Microsoft tutorial for signing stored procedures with a certificate](https://learn.microsoft.com/en-us/sql/relational-databases/tutorial-signing-stored-procedures-with-a-certificate?view=sql-server-ver16).",
@@ -1616,7 +1629,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.",
@@ -1638,7 +1652,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.",
@@ -1660,7 +1675,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.",
@@ -1682,7 +1698,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.",
@@ -1704,7 +1721,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended not to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `on`.",
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
+1 -1
View File
@@ -12,7 +12,7 @@ from prowler.lib.logger import logger
timestamp = datetime.today()
timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)
prowler_version = "5.1.0"
prowler_version = "5.2.0"
html_logo_url = "https://github.com/prowler-cloud/prowler/"
square_logo_img = "https://prowler.com/wp-content/uploads/logo-html.png"
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
+31
View File
@@ -378,6 +378,37 @@ aws:
# Minimum retention period in hours for Kinesis streams
min_kinesis_stream_retention_hours: 168 # 7 days
# Detect Secrets plugin configuration
detect_secrets_plugins: [
{"name": "ArtifactoryDetector"},
{"name": "AWSKeyDetector"},
{"name": "AzureStorageKeyDetector"},
{"name": "BasicAuthDetector"},
{"name": "CloudantDetector"},
{"name": "DiscordBotTokenDetector"},
{"name": "GitHubTokenDetector"},
{"name": "GitLabTokenDetector"},
{"name": "Base64HighEntropyString", "limit": 6.0},
{"name": "HexHighEntropyString", "limit": 3.0},
{"name": "IbmCloudIamDetector"},
{"name": "IbmCosHmacDetector"},
# {"name": "IPPublicDetector"}, https://github.com/Yelp/detect-secrets/pull/885
{"name": "JwtTokenDetector"},
{"name": "KeywordDetector"},
{"name": "MailchimpDetector"},
{"name": "NpmDetector"},
{"name": "OpenAIDetector"},
{"name": "PrivateKeyDetector"},
{"name": "PypiTokenDetector"},
{"name": "SendGridDetector"},
{"name": "SlackDetector"},
{"name": "SoftlayerDetector"},
{"name": "SquareOAuthDetector"},
{"name": "StripeDetector"},
# {"name": "TelegramBotTokenDetector"}, https://github.com/Yelp/detect-secrets/pull/878
{"name": "TwilioKeyDetector"},
]
# Azure Configuration
azure:
+1
View File
@@ -83,6 +83,7 @@ class CIS_Requirement_Attribute(BaseModel):
"""CIS Requirement Attribute"""
Section: str
SubSection: Optional[str]
Profile: CIS_Requirement_Attribute_Profile
AssessmentStatus: CIS_Requirement_Attribute_AssessmentStatus
Description: str
+76 -35
View File
@@ -3,9 +3,9 @@ import os
import re
import sys
from abc import ABC, abstractmethod
from dataclasses import dataclass
from dataclasses import asdict, dataclass, is_dataclass
from enum import Enum
from typing import Set
from typing import Any, Dict, Set
from pydantic import BaseModel, ValidationError, validator
@@ -405,15 +405,34 @@ class Check_Report:
status: str
status_extended: str
check_metadata: CheckMetadata
resource_metadata: dict
resource: dict
resource_details: str
resource_tags: list
muted: bool
def __init__(self, metadata, resource=None):
def __init__(self, metadata: Dict, resource: Any) -> None:
"""Initialize the Check's finding information.
Args:
metadata: The metadata of the check.
resource: Basic information about the resource. Defaults to None.
Only accepted dict, list, BaseModels (dict attribute), custom models (with to_dict attribute) and dataclasses.
"""
self.status = ""
self.check_metadata = CheckMetadata.parse_raw(metadata)
self.resource_metadata = resource.dict() if resource else {}
if isinstance(resource, dict):
self.resource = resource
elif hasattr(resource, "dict"):
self.resource = resource.dict()
elif hasattr(resource, "to_dict"):
self.resource = resource.to_dict()
elif is_dataclass(resource):
self.resource = asdict(resource)
else:
logger.error(
f"Resource metadata {type(resource)} in {self.check_metadata.CheckID} could not be converted to dict"
)
self.resource = {}
self.status_extended = ""
self.resource_details = ""
self.resource_tags = getattr(resource, "tags", []) if resource else []
@@ -428,20 +447,13 @@ class Check_Report_AWS(Check_Report):
resource_arn: str
region: str
def __init__(self, metadata, resource_metadata=None):
super().__init__(metadata, resource_metadata)
if resource_metadata:
self.resource_id = (
getattr(resource_metadata, "id", None)
or getattr(resource_metadata, "name", None)
or ""
)
self.resource_arn = getattr(resource_metadata, "arn", "")
self.region = getattr(resource_metadata, "region", "")
else:
self.resource_id = ""
self.resource_arn = ""
self.region = ""
def __init__(self, metadata: Dict, resource: Any) -> None:
super().__init__(metadata, resource)
self.resource_id = (
getattr(resource, "id", None) or getattr(resource, "name", None) or ""
)
self.resource_arn = getattr(resource, "arn", "")
self.region = getattr(resource, "region", "")
@dataclass
@@ -453,12 +465,20 @@ class Check_Report_Azure(Check_Report):
subscription: str
location: str
def __init__(self, metadata):
super().__init__(metadata)
self.resource_name = ""
self.resource_id = ""
def __init__(self, metadata: Dict, resource: Any) -> None:
"""Initialize the Azure Check's finding information.
Args:
metadata: The metadata of the check.
resource: Basic information about the resource. Defaults to None.
"""
super().__init__(metadata, resource)
self.resource_name = getattr(
resource, "name", getattr(resource, "resource_name", "")
)
self.resource_id = getattr(resource, "id", getattr(resource, "resource_id", ""))
self.subscription = ""
self.location = "global"
self.location = getattr(resource, "location", "global")
@dataclass
@@ -470,12 +490,29 @@ class Check_Report_GCP(Check_Report):
project_id: str
location: str
def __init__(self, metadata):
super().__init__(metadata)
self.resource_name = ""
self.resource_id = ""
self.project_id = ""
self.location = ""
def __init__(
self,
metadata: Dict,
resource: Any,
location=None,
resource_name=None,
resource_id=None,
project_id=None,
) -> None:
super().__init__(metadata, resource)
self.resource_id = (
resource_id
or getattr(resource, "id", None)
or getattr(resource, "name", None)
or ""
)
self.resource_name = resource_name or getattr(resource, "name", "")
self.project_id = project_id or getattr(resource, "project_id", "")
self.location = (
location
or getattr(resource, "location", "")
or getattr(resource, "region", "")
)
@dataclass
@@ -487,11 +524,15 @@ class Check_Report_Kubernetes(Check_Report):
resource_id: str
namespace: str
def __init__(self, metadata):
super().__init__(metadata)
self.resource_name = ""
self.resource_id = ""
self.namespace = ""
def __init__(self, metadata: Dict, resource: Any) -> None:
super().__init__(metadata, resource)
self.resource_id = (
getattr(resource, "uid", None) or getattr(resource, "name", None) or ""
)
self.resource_name = getattr(resource, "name", "")
self.namespace = getattr(resource, "namespace", "cluster-wide")
if not self.namespace:
self.namespace = "cluster-wide"
# Testing Pending
+1 -1
View File
@@ -14,7 +14,7 @@ def fill_common_finding_data(finding: dict, unix_timestamp: bool) -> dict:
"status_extended": finding.status_extended,
"muted": finding.muted,
"resource_details": finding.resource_details,
# "resource_metadata": finding.resource_metadata, TODO: add resource_metadata to the finding
"resource": finding.resource,
"resource_tags": unroll_tags(finding.resource_tags),
}
return finding_data
@@ -48,6 +48,7 @@ class AWSCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -78,6 +79,7 @@ class AWSCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -48,6 +48,7 @@ class AzureCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -79,6 +80,7 @@ class AzureCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -48,6 +48,7 @@ class GCPCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -78,6 +79,7 @@ class GCPCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -50,6 +50,7 @@ class KubernetesCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -81,6 +82,7 @@ class KubernetesCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -1,3 +1,5 @@
from typing import Optional
from pydantic import BaseModel
@@ -14,6 +16,7 @@ class AWSCISModel(BaseModel):
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str]
Requirements_Attributes_Profile: str
Requirements_Attributes_AssessmentStatus: str
Requirements_Attributes_Description: str
@@ -44,6 +47,7 @@ class AzureCISModel(BaseModel):
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str]
Requirements_Attributes_Profile: str
Requirements_Attributes_AssessmentStatus: str
Requirements_Attributes_Description: str
@@ -75,6 +79,7 @@ class GCPCISModel(BaseModel):
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str]
Requirements_Attributes_Profile: str
Requirements_Attributes_AssessmentStatus: str
Requirements_Attributes_Description: str
@@ -105,6 +110,7 @@ class KubernetesCISModel(BaseModel):
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str]
Requirements_Attributes_Profile: str
Requirements_Attributes_AssessmentStatus: str
Requirements_Attributes_Description: str
+2 -1
View File
@@ -35,7 +35,7 @@ class Finding(BaseModel):
status_extended: str
muted: bool = False
resource_uid: str
# resource_metadata: dict = Field(default_factory=dict) TODO: add resource_metadata to the finding
resource_metadata: dict = Field(default_factory=dict)
resource_name: str
resource_details: str
resource_tags: dict = Field(default_factory=dict)
@@ -121,6 +121,7 @@ class Finding(BaseModel):
)
try:
output_data["provider"] = provider.type
output_data["resource_metadata"] = check_output.resource
if provider.type == "aws":
output_data["account_uid"] = get_nested_attribute(
+22 -8
View File
@@ -1,4 +1,5 @@
import os
from datetime import datetime
from typing import List
from py_ocsf_models.events.base_event import SeverityID, StatusID
@@ -68,7 +69,11 @@ class OCSF(Output):
activity_name=finding_activity.name,
finding_info=FindingInformation(
created_time_dt=finding.timestamp,
created_time=int(finding.timestamp.timestamp()),
created_time=(
int(finding.timestamp.timestamp())
if isinstance(finding.timestamp, datetime)
else finding.timestamp
),
desc=finding.metadata.Description,
title=finding.metadata.CheckTitle,
uid=finding.uid,
@@ -77,7 +82,11 @@ class OCSF(Output):
types=finding.metadata.CheckType,
),
time_dt=finding.timestamp,
time=int(finding.timestamp.timestamp()),
time=(
int(finding.timestamp.timestamp())
if isinstance(finding.timestamp, datetime)
else finding.timestamp
),
remediation=Remediation(
desc=finding.metadata.Remediation.Recommendation.Text,
references=list(
@@ -113,7 +122,7 @@ class OCSF(Output):
region=finding.region,
data={
"details": finding.resource_details,
# "metadata": finding.resource_metadata, TODO: add the resource_metadata to the finding
"metadata": finding.resource_metadata,
},
)
]
@@ -127,7 +136,7 @@ class OCSF(Output):
type=finding.metadata.ResourceType,
data={
"details": finding.resource_details,
# "metadata": finding.resource_metadata, TODO: add the resource_metadata to the finding
"metadata": finding.resource_metadata,
},
namespace=finding.region.replace("namespace: ", ""),
)
@@ -193,10 +202,15 @@ class OCSF(Output):
):
self._file_descriptor.write("[")
for finding in self._data:
self._file_descriptor.write(
finding.json(exclude_none=True, indent=4)
)
self._file_descriptor.write(",")
try:
self._file_descriptor.write(
finding.json(exclude_none=True, indent=4)
)
self._file_descriptor.write(",")
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
if self._file_descriptor.tell() > 0:
if self._file_descriptor.tell() != 1:
self._file_descriptor.seek(
-8
View File
@@ -36,7 +36,6 @@ class Scan:
_service_checks_to_execute: dict[str, set[str]]
_service_checks_completed: dict[str, set[str]]
_progress: float = 0.0
_findings: list = []
_duration: int = 0
_status: list[str] = None
@@ -216,10 +215,6 @@ class Scan:
def duration(self) -> int:
return self._duration
@property
def findings(self) -> list:
return self._findings
def scan(
self,
custom_checks_metadata: dict = {},
@@ -282,9 +277,6 @@ class Scan:
if finding.status not in self._status:
check_findings.remove(finding)
# Store findings
self._findings.extend(check_findings)
# Remove the executed check
self._service_checks_to_execute[service].remove(check_name)
if len(self._service_checks_to_execute[service]) == 0:
+40 -30
View File
@@ -26,6 +26,36 @@ from detect_secrets.settings import transient_settings
from prowler.config.config import encoding_format_utf_8
from prowler.lib.logger import logger
default_detect_secrets_plugins = [
{"name": "ArtifactoryDetector"},
{"name": "AWSKeyDetector"},
{"name": "AzureStorageKeyDetector"},
{"name": "BasicAuthDetector"},
{"name": "CloudantDetector"},
{"name": "DiscordBotTokenDetector"},
{"name": "GitHubTokenDetector"},
{"name": "GitLabTokenDetector"},
{"name": "Base64HighEntropyString", "limit": 6.0},
{"name": "HexHighEntropyString", "limit": 3.0},
{"name": "IbmCloudIamDetector"},
{"name": "IbmCosHmacDetector"},
# {"name": "IPPublicDetector"}, https://github.com/Yelp/detect-secrets/pull/885
{"name": "JwtTokenDetector"},
{"name": "KeywordDetector"},
{"name": "MailchimpDetector"},
{"name": "NpmDetector"},
{"name": "OpenAIDetector"},
{"name": "PrivateKeyDetector"},
{"name": "PypiTokenDetector"},
{"name": "SendGridDetector"},
{"name": "SlackDetector"},
{"name": "SoftlayerDetector"},
{"name": "SquareOAuthDetector"},
{"name": "StripeDetector"},
# {"name": "TelegramBotTokenDetector"}, https://github.com/Yelp/detect-secrets/pull/878
{"name": "TwilioKeyDetector"},
]
def open_file(input_file: str, mode: str = "r") -> TextIOWrapper:
"""open_file returns a handler to the file using the specified mode."""
@@ -82,13 +112,17 @@ def hash_sha512(string: str) -> str:
def detect_secrets_scan(
data: str = None, file=None, excluded_secrets: list[str] = None
data: str = None,
file=None,
excluded_secrets: list[str] = None,
detect_secrets_plugins: dict = None,
) -> list[dict[str, str]]:
"""detect_secrets_scan scans the data or file for secrets using the detect-secrets library.
Args:
data (str): The data to scan for secrets.
file (str): The file to scan for secrets.
excluded_secrets (list): A list of regex patterns to exclude from the scan.
detect_secrets_plugins (dict): The settings to use for the scan.
Returns:
dict: The secrets found in the
Raises:
@@ -107,36 +141,11 @@ def detect_secrets_scan(
secrets = SecretsCollection()
if not detect_secrets_plugins:
detect_secrets_plugins = default_detect_secrets_plugins
settings = {
"plugins_used": [
{"name": "ArtifactoryDetector"},
{"name": "AWSKeyDetector"},
{"name": "AzureStorageKeyDetector"},
{"name": "BasicAuthDetector"},
{"name": "CloudantDetector"},
{"name": "DiscordBotTokenDetector"},
{"name": "GitHubTokenDetector"},
{"name": "GitLabTokenDetector"},
{"name": "Base64HighEntropyString", "limit": 6.0},
{"name": "HexHighEntropyString", "limit": 3.0},
{"name": "IbmCloudIamDetector"},
{"name": "IbmCosHmacDetector"},
# {"name": "IPPublicDetector"}, https://github.com/Yelp/detect-secrets/pull/885
{"name": "JwtTokenDetector"},
{"name": "KeywordDetector"},
{"name": "MailchimpDetector"},
{"name": "NpmDetector"},
{"name": "OpenAIDetector"},
{"name": "PrivateKeyDetector"},
{"name": "PypiTokenDetector"},
{"name": "SendGridDetector"},
{"name": "SlackDetector"},
{"name": "SoftlayerDetector"},
{"name": "SquareOAuthDetector"},
{"name": "StripeDetector"},
# {"name": "TelegramBotTokenDetector"}, https://github.com/Yelp/detect-secrets/pull/878
{"name": "TwilioKeyDetector"},
],
"plugins_used": detect_secrets_plugins,
"filters_used": [
{"path": "detect_secrets.filters.common.is_invalid_file"},
{"path": "detect_secrets.filters.common.is_known_false_positive"},
@@ -144,6 +153,7 @@ def detect_secrets_scan(
{"path": "detect_secrets.filters.heuristic.is_potential_secret"},
],
}
if excluded_secrets and len(excluded_secrets) > 0:
settings["filters_used"].append(
{
+109 -14
View File
@@ -29,6 +29,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -112,6 +113,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -351,6 +353,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -472,6 +475,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -517,6 +521,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -844,6 +849,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -889,6 +895,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -1015,6 +1022,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -1060,6 +1068,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -1116,6 +1125,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -1158,6 +1168,7 @@
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ap-southeast-7",
"ca-central-1",
"ca-west-1",
"eu-central-1",
@@ -1171,6 +1182,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -1248,6 +1260,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -1807,6 +1820,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -1852,6 +1866,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -1984,6 +1999,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -2052,6 +2068,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -2611,6 +2628,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -2941,6 +2959,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3132,6 +3151,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3192,6 +3212,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3237,6 +3258,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3399,6 +3421,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3444,6 +3467,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3489,6 +3513,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3534,6 +3559,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3579,6 +3605,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3634,6 +3661,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3679,6 +3707,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3724,6 +3753,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3769,6 +3799,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3782,20 +3813,6 @@
]
}
},
"elastic-inference": {
"regions": {
"aws": [
"ap-northeast-1",
"ap-northeast-2",
"eu-west-1",
"us-east-1",
"us-east-2",
"us-west-2"
],
"aws-cn": [],
"aws-us-gov": []
}
},
"elasticache": {
"regions": {
"aws": [
@@ -3825,6 +3842,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3923,6 +3941,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -3968,6 +3987,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -4013,6 +4033,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -4155,6 +4176,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -4200,6 +4222,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -4245,6 +4268,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -4307,6 +4331,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -5016,6 +5041,7 @@
"ap-southeast-2",
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ca-central-1",
"ca-west-1",
"eu-central-1",
@@ -5089,6 +5115,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -5221,6 +5248,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -5663,6 +5691,13 @@
]
}
},
"iotthingsgraph": {
"regions": {
"aws": [],
"aws-cn": [],
"aws-us-gov": []
}
},
"iottwinmaker": {
"regions": {
"aws": [
@@ -5878,6 +5913,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -5996,6 +6032,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -6085,6 +6122,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -6422,6 +6460,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -6694,6 +6733,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -7558,6 +7598,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -7635,6 +7676,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -7654,6 +7696,7 @@
"opensearchserverless": {
"regions": {
"aws": [
"ap-east-1",
"ap-northeast-1",
"ap-northeast-2",
"ap-south-1",
@@ -7662,6 +7705,7 @@
"ca-central-1",
"eu-central-1",
"eu-central-2",
"eu-north-1",
"eu-west-1",
"eu-west-2",
"eu-west-3",
@@ -7774,6 +7818,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -7922,6 +7967,23 @@
"aws-us-gov": []
}
},
"pcs": {
"regions": {
"aws": [
"ap-northeast-1",
"ap-southeast-1",
"ap-southeast-2",
"eu-central-1",
"eu-north-1",
"eu-west-1",
"us-east-1",
"us-east-2",
"us-west-2"
],
"aws-cn": [],
"aws-us-gov": []
}
},
"personalize": {
"regions": {
"aws": [
@@ -7972,6 +8034,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8188,6 +8251,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8361,6 +8425,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8406,6 +8471,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8451,6 +8517,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8527,6 +8594,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8558,6 +8626,7 @@
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ap-southeast-7",
"ca-central-1",
"ca-west-1",
"eu-central-1",
@@ -8571,6 +8640,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8760,6 +8830,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8805,6 +8876,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8852,6 +8924,8 @@
"ap-southeast-2",
"ap-southeast-3",
"ap-southeast-4",
"ap-southeast-5",
"ap-southeast-7",
"ca-central-1",
"ca-west-1",
"eu-central-1",
@@ -8865,6 +8939,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8948,6 +9023,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8990,6 +9066,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -9093,6 +9170,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -9172,6 +9250,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -9597,6 +9676,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -9766,6 +9846,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -9893,6 +9974,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -10207,6 +10289,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -10268,6 +10351,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -10313,6 +10397,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -10509,6 +10594,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -10598,6 +10684,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -10643,6 +10730,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -10699,6 +10787,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -10744,6 +10833,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -10997,6 +11087,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -11068,6 +11159,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -11233,6 +11325,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -11314,6 +11407,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -11662,6 +11756,7 @@
"il-central-1",
"me-central-1",
"me-south-1",
"mx-central-1",
"sa-east-1",
"us-east-1",
"us-east-2",
@@ -8,11 +8,7 @@ class accessanalyzer_enabled(Check):
def execute(self):
findings = []
for analyzer in accessanalyzer_client.analyzers:
report = Check_Report_AWS(self.metadata())
report.region = analyzer.region
report.resource_id = analyzer.name
report.resource_arn = analyzer.arn
report.resource_tags = analyzer.tags
report = Check_Report_AWS(metadata=self.metadata(), resource=analyzer)
if analyzer.status == "ACTIVE":
report.status = "PASS"
report.status_extended = (
@@ -8,14 +8,11 @@ class accessanalyzer_enabled_without_findings(Check):
def execute(self):
findings = []
for analyzer in accessanalyzer_client.analyzers:
report = Check_Report_AWS(self.metadata())
report.region = analyzer.region
report = Check_Report_AWS(metadata=self.metadata(), resource=analyzer)
if analyzer.status == "ACTIVE":
report.status = "PASS"
report.status_extended = f"IAM Access Analyzer {analyzer.name} does not have active findings."
report.resource_id = analyzer.name
report.resource_arn = analyzer.arn
report.resource_tags = analyzer.tags
if len(analyzer.findings) != 0:
active_finding_counter = 0
for finding in analyzer.findings:
@@ -25,9 +22,7 @@ class accessanalyzer_enabled_without_findings(Check):
if active_finding_counter > 0:
report.status = "FAIL"
report.status_extended = f"IAM Access Analyzer {analyzer.name} has {active_finding_counter} active findings."
report.resource_id = analyzer.name
report.resource_arn = analyzer.arn
report.resource_tags = analyzer.tags
findings.append(report)
return findings
@@ -8,7 +8,6 @@ from prowler.lib.scan_filters.scan_filters import is_resource_filtered
from prowler.providers.aws.lib.service.service import AWSService
################## AccessAnalyzer
class AccessAnalyzer(AWSService):
def __init__(self, provider):
# Call AWSService's __init__
@@ -6,7 +6,9 @@ from prowler.providers.aws.services.account.account_client import account_client
class account_maintain_current_contact_details(Check):
def execute(self):
report = Check_Report_AWS(self.metadata())
report = Check_Report_AWS(
metadata=self.metadata(), resource=account_client.contact_base
)
report.region = account_client.region
report.resource_id = account_client.audited_account
report.resource_arn = account_client.audited_account_arn
@@ -8,10 +8,12 @@ class account_maintain_different_contact_details_to_security_billing_and_operati
def execute(self):
findings = []
if account_client.contact_base:
report = Check_Report_AWS(self.metadata())
report.region = account_client.region
report = Check_Report_AWS(
metadata=self.metadata(), resource=account_client.contact_base
)
report.resource_id = account_client.audited_account
report.resource_arn = account_client.audited_account_arn
report.region = account_client.region
if (
len(account_client.contact_phone_numbers)
@@ -6,7 +6,9 @@ from prowler.providers.aws.services.account.account_client import account_client
class account_security_contact_information_is_registered(Check):
def execute(self):
report = Check_Report_AWS(self.metadata())
report = Check_Report_AWS(
metadata=self.metadata(), resource=account_client.contact_base
)
report.region = account_client.region
report.resource_id = account_client.audited_account
report.resource_arn = account_client.audited_account_arn
@@ -6,7 +6,9 @@ from prowler.providers.aws.services.account.account_client import account_client
class account_security_questions_are_registered_in_the_aws_account(Check):
def execute(self):
report = Check_Report_AWS(self.metadata())
report = Check_Report_AWS(
metadata=self.metadata(), resource=account_client.contacts_security
)
report.region = account_client.region
report.resource_id = account_client.audited_account
report.resource_arn = account_client.audited_account_arn
@@ -1,4 +1,3 @@
################## Account
from typing import Optional
from venv import logger
@@ -7,17 +7,14 @@ class acm_certificates_expiration_check(Check):
findings = []
for certificate in acm_client.certificates.values():
if certificate.in_use or acm_client.provider.scan_unused_services:
report = Check_Report_AWS(self.metadata())
report.region = certificate.region
report = Check_Report_AWS(
metadata=self.metadata(), resource=certificate
)
if certificate.expiration_days > acm_client.audit_config.get(
"days_to_expire_threshold", 7
):
report.status = "PASS"
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} expires in {certificate.expiration_days} days."
report.resource_id = certificate.id
report.resource_details = certificate.name
report.resource_arn = certificate.arn
report.resource_tags = certificate.tags
else:
report.status = "FAIL"
if certificate.expiration_days < 0:
@@ -26,11 +23,5 @@ class acm_certificates_expiration_check(Check):
else:
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} is about to expire in {certificate.expiration_days} days."
report.check_metadata.Severity = Severity.medium
report.resource_id = certificate.id
report.resource_details = certificate.name
report.resource_arn = certificate.arn
report.resource_tags = certificate.tags
findings.append(report)
return findings
@@ -7,29 +7,18 @@ class acm_certificates_transparency_logs_enabled(Check):
findings = []
for certificate in acm_client.certificates.values():
if certificate.in_use or acm_client.provider.scan_unused_services:
report = Check_Report_AWS(self.metadata())
report.region = certificate.region
report = Check_Report_AWS(
metadata=self.metadata(), resource=certificate
)
if certificate.type == "IMPORTED":
report.status = "PASS"
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} is imported."
report.resource_id = certificate.id
report.resource_details = certificate.name
report.resource_arn = certificate.arn
report.resource_tags = certificate.tags
else:
if not certificate.transparency_logging:
report.status = "FAIL"
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} has Certificate Transparency logging disabled."
report.resource_id = certificate.id
report.resource_details = certificate.name
report.resource_arn = certificate.arn
report.resource_tags = certificate.tags
else:
report.status = "PASS"
report.status_extended = f"ACM Certificate {certificate.id} for {certificate.name} has Certificate Transparency logging enabled."
report.resource_id = certificate.id
report.resource_details = certificate.name
report.resource_arn = certificate.arn
report.resource_tags = certificate.tags
findings.append(report)
return findings

Some files were not shown because too many files have changed in this diff Show More