mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-05-06 08:47:18 +00:00
Compare commits
138 Commits
ec102d1569
...
v5.2
| Author | SHA1 | Date | |
|---|---|---|---|
| 27329457be | |||
| 7189f3d526 | |||
| 58e7589c9d | |||
| d60f4b5ded | |||
| 4c2ec094f6 | |||
| 395ecaff5b | |||
| c39506ef7d | |||
| eb90d479e2 | |||
| b92a73f5ea | |||
| ad121f3059 | |||
| 70e4c5a44e | |||
| b5a46b7b59 | |||
| f1a97cd166 | |||
| 0774508093 | |||
| 0664ce6b94 | |||
| 407c779c52 | |||
| c60f13f23f | |||
| 37d912ef01 | |||
| d3de89c017 | |||
| cb22af25c6 | |||
| a534b94495 | |||
| 6262b4ff0b | |||
| 84ecd7ab2c | |||
| 1a5428445a | |||
| ac8e991ca0 | |||
| 83a0331472 | |||
| cce31e2971 | |||
| 0adf7d6e77 | |||
| 295f8b557e | |||
| bb2c5c3161 | |||
| 0018f36a36 | |||
| 857de84f49 | |||
| 9630f2242a | |||
| 1fe125867c | |||
| 0737893240 | |||
| 282fe3d348 | |||
| b5d83640ae | |||
| 2823d3ad21 | |||
| 00b93bfe86 | |||
| 84c253d887 | |||
| 2ab5a702c9 | |||
| 11d9cdf24e | |||
| b1de41619b | |||
| 18a4881a51 | |||
| de76a168c0 | |||
| bcc13a742d | |||
| 23b584f4bf | |||
| 074b7c1ff5 | |||
| c04e9ed914 | |||
| 1f1b126e79 | |||
| 9b0dd80f13 | |||
| b0807478f2 | |||
| 11dfa58ecd | |||
| 412f396e0a | |||
| 8100d43ff2 | |||
| bc96acef48 | |||
| 6e9876b61a | |||
| 32253ca4f7 | |||
| 4c6be5e283 | |||
| 69178fd7bd | |||
| cd4432c14b | |||
| 0e47172aec | |||
| efe18ae8b2 | |||
| d5e13df4fe | |||
| b0e84d74f2 | |||
| 28526b591c | |||
| 51e6a6edb1 | |||
| c1b132a29e | |||
| e2530e7d57 | |||
| f2fcb1599b | |||
| 160eafa0c9 | |||
| c2bc0f1368 | |||
| 27d27fff81 | |||
| 66e8f0ce18 | |||
| 0ceae8361b | |||
| 5e52fded83 | |||
| 0a7d07b4b6 | |||
| 34b5f483d7 | |||
| 9d04a1bc52 | |||
| b25c0aaa00 | |||
| 652b93ea45 | |||
| ccb7726511 | |||
| c514a4e451 | |||
| d841bd6890 | |||
| ff54e10ab2 | |||
| 718e562741 | |||
| ce05e2a939 | |||
| 90831d3084 | |||
| 2c928dead5 | |||
| 3ffe147664 | |||
| 3373b0f47c | |||
| b29db04560 | |||
| f964a0362e | |||
| 537b23dfae | |||
| 48cf3528b4 | |||
| 3c4b9d32c9 | |||
| fd8361ae2c | |||
| 33cadaa932 | |||
| cc126eb8b4 | |||
| c996b3f6aa | |||
| e2b406a300 | |||
| c2c69da603 | |||
| 4e51348ff2 | |||
| 3257b82706 | |||
| c98d764daa | |||
| 0448429a9f | |||
| b948ac6125 | |||
| 3acc09ea16 | |||
| 82d0d0de9d | |||
| f9cfc4d087 | |||
| 7d68ff455b | |||
| ddf4881971 | |||
| 9ad4944142 | |||
| 7f33ea76a4 | |||
| 1140c29384 | |||
| 2441a62f39 | |||
| c26a231fc1 | |||
| 2fb2315037 | |||
| a9e475481a | |||
| 826d7c4dc3 | |||
| b7f4b37f66 | |||
| 193d691bfe | |||
| a359bc581c | |||
| 9a28ff025a | |||
| f1c7050700 | |||
| 9391c27b9e | |||
| 4c54de092f | |||
| 690c482a43 | |||
| ad2d857c6f | |||
| 07ee59d2ef | |||
| bec4617d0a | |||
| 94916f8305 | |||
| 44de651be3 | |||
| bdcba9c642 | |||
| c172f75f1a | |||
| ec492fa13a | |||
| 702659959c | |||
| fef332a591 |
@@ -15,7 +15,8 @@ Please include a summary of the change and which issue is fixed. List any depend
|
||||
- [ ] Review if the code is being covered by tests.
|
||||
- [ ] Review if code is being documented following this specification https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings
|
||||
- [ ] Review if backport is needed.
|
||||
- [ ] Review if is needed to change the [Readme.md](https://github.com/prowler-cloud/prowler/blob/master/README.md)
|
||||
|
||||
### License
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
@@ -45,6 +45,7 @@ junit-reports/
|
||||
# Terraform
|
||||
.terraform*
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
|
||||
# .env
|
||||
ui/.env*
|
||||
|
||||
+19
-1
@@ -27,6 +27,7 @@ repos:
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
exclude: contrib
|
||||
|
||||
## PYTHON
|
||||
- repo: https://github.com/myint/autoflake
|
||||
rev: v2.3.1
|
||||
@@ -61,8 +62,25 @@ repos:
|
||||
rev: 1.8.0
|
||||
hooks:
|
||||
- id: poetry-check
|
||||
name: API - poetry-check
|
||||
args: ["--directory=./api"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-lock
|
||||
args: ["--no-update"]
|
||||
name: API - poetry-lock
|
||||
args: ["--no-update", "--directory=./api"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-check
|
||||
name: SDK - poetry-check
|
||||
args: ["--directory=./"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-lock
|
||||
name: SDK - poetry-lock
|
||||
args: ["--no-update", "--directory=./"]
|
||||
pass_filenames: false
|
||||
|
||||
|
||||
- repo: https://github.com/hadolint/hadolint
|
||||
rev: v2.13.0-beta
|
||||
|
||||
@@ -73,8 +73,8 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
|
||||
|---|---|---|---|---|
|
||||
| AWS | 561 | 81 -> `prowler aws --list-services` | 30 -> `prowler aws --list-compliance` | 9 -> `prowler aws --list-categories` |
|
||||
| GCP | 77 | 13 -> `prowler gcp --list-services` | 4 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
|
||||
| Azure | 139 | 18 -> `prowler azure --list-services` | 4 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
|
||||
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | 1 -> `prowler kubernetes --list-compliance` | 7 -> `prowler kubernetes --list-categories` |
|
||||
| Azure | 139 | 18 -> `prowler azure --list-services` | 5 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
|
||||
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | 2 -> `prowler kubernetes --list-compliance` | 7 -> `prowler kubernetes --list-categories` |
|
||||
|
||||
# 💻 Installation
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ start_prod_server() {
|
||||
|
||||
start_worker() {
|
||||
echo "Starting the worker..."
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans -E
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans -E --max-tasks-per-child 1
|
||||
}
|
||||
|
||||
start_worker_beat() {
|
||||
|
||||
Generated
+474
-392
File diff suppressed because it is too large
Load Diff
+4
-3
@@ -8,11 +8,11 @@ description = "Prowler's API (Django/DRF)"
|
||||
license = "Apache-2.0"
|
||||
name = "prowler-api"
|
||||
package-mode = false
|
||||
version = "1.1.0"
|
||||
version = "1.3.2"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
celery = {extras = ["pytest"], version = "^5.4.0"}
|
||||
django = "5.1.4"
|
||||
django = "5.1.5"
|
||||
django-celery-beat = "^2.7.0"
|
||||
django-celery-results = "^2.5.1"
|
||||
django-cors-headers = "4.4.0"
|
||||
@@ -27,7 +27,7 @@ drf-nested-routers = "^0.94.1"
|
||||
drf-spectacular = "0.27.2"
|
||||
drf-spectacular-jsonapi = "0.5.1"
|
||||
gunicorn = "23.0.0"
|
||||
prowler = "^5.0"
|
||||
prowler = {git = "https://github.com/prowler-cloud/prowler.git", branch = "v5.2"}
|
||||
psycopg2-binary = "2.9.9"
|
||||
pytest-celery = {extras = ["redis"], version = "^1.0.1"}
|
||||
# Needed for prowler compatibility
|
||||
@@ -37,6 +37,7 @@ uuid6 = "2024.7.10"
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
bandit = "1.7.9"
|
||||
coverage = "7.5.4"
|
||||
django-silk = "5.3.2"
|
||||
docker = "7.1.0"
|
||||
freezegun = "1.5.1"
|
||||
mypy = "1.10.1"
|
||||
|
||||
@@ -4,13 +4,17 @@ class MainRouter:
|
||||
|
||||
def db_for_read(self, model, **hints): # noqa: F841
|
||||
model_table_name = model._meta.db_table
|
||||
if model_table_name.startswith("django_"):
|
||||
if model_table_name.startswith("django_") or model_table_name.startswith(
|
||||
"silk_"
|
||||
):
|
||||
return self.admin_db
|
||||
return None
|
||||
|
||||
def db_for_write(self, model, **hints): # noqa: F841
|
||||
model_table_name = model._meta.db_table
|
||||
if model_table_name.startswith("django_"):
|
||||
if model_table_name.startswith("django_") or model_table_name.startswith(
|
||||
"silk_"
|
||||
):
|
||||
return self.admin_db
|
||||
return None
|
||||
|
||||
|
||||
@@ -319,6 +319,28 @@ class FindingFilter(FilterSet):
|
||||
field_name="resources__type", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# resource_tag_key = CharFilter(field_name="resources__tags__key")
|
||||
# resource_tag_key__in = CharInFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_key__icontains = CharFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tag_value = CharFilter(field_name="resources__tags__value")
|
||||
# resource_tag_value__in = CharInFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_value__icontains = CharFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tags = CharInFilter(
|
||||
# method="filter_resource_tag",
|
||||
# lookup_expr="in",
|
||||
# help_text="Filter by resource tags `key:value` pairs.\nMultiple values may be "
|
||||
# "separated by commas.",
|
||||
# )
|
||||
|
||||
scan = UUIDFilter(method="filter_scan_id")
|
||||
scan__in = UUIDInFilter(method="filter_scan_id_in")
|
||||
|
||||
@@ -426,6 +448,16 @@ class FindingFilter(FilterSet):
|
||||
|
||||
return queryset.filter(id__lte=end).filter(inserted_at__lte=value)
|
||||
|
||||
def filter_resource_tag(self, queryset, name, value):
|
||||
overall_query = Q()
|
||||
for key_value_pair in value:
|
||||
tag_key, tag_value = key_value_pair.split(":", 1)
|
||||
overall_query |= Q(
|
||||
resources__tags__key__icontains=tag_key,
|
||||
resources__tags__value__icontains=tag_value,
|
||||
)
|
||||
return queryset.filter(overall_query).distinct()
|
||||
|
||||
@staticmethod
|
||||
def maybe_date_to_datetime(value):
|
||||
dt = value
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.823Z",
|
||||
"updated_at": "2024-10-18T10:46:04.841Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.823Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -61,6 +62,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.855Z",
|
||||
"updated_at": "2024-10-18T10:46:04.858Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.855Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -116,6 +118,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.869Z",
|
||||
"updated_at": "2024-10-18T10:46:04.876Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.869Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -171,6 +174,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.888Z",
|
||||
"updated_at": "2024-10-18T10:46:04.892Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.888Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -226,6 +230,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.901Z",
|
||||
"updated_at": "2024-10-18T10:46:04.905Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.901Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -281,6 +286,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.915Z",
|
||||
"updated_at": "2024-10-18T10:46:04.919Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.915Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -336,6 +342,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.929Z",
|
||||
"updated_at": "2024-10-18T10:46:04.934Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.929Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -391,6 +398,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.944Z",
|
||||
"updated_at": "2024-10-18T10:46:04.947Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.944Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -446,6 +454,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.957Z",
|
||||
"updated_at": "2024-10-18T10:46:04.962Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.957Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
|
||||
"delta": "new",
|
||||
"status": "PASS",
|
||||
@@ -501,6 +510,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.971Z",
|
||||
"updated_at": "2024-10-18T10:46:04.975Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.971Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -556,6 +566,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.984Z",
|
||||
"updated_at": "2024-10-18T10:46:04.989Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.984Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -611,6 +622,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.999Z",
|
||||
"updated_at": "2024-10-18T10:46:05.003Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.999Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -666,6 +678,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.013Z",
|
||||
"updated_at": "2024-10-18T10:46:05.018Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.013Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -721,6 +734,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.029Z",
|
||||
"updated_at": "2024-10-18T10:46:05.033Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.029Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -776,6 +790,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.045Z",
|
||||
"updated_at": "2024-10-18T10:46:05.050Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.045Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -831,6 +846,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.061Z",
|
||||
"updated_at": "2024-10-18T10:46:05.065Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.061Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -886,6 +902,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.080Z",
|
||||
"updated_at": "2024-10-18T10:46:05.085Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.080Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -941,6 +958,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.099Z",
|
||||
"updated_at": "2024-10-18T10:46:05.104Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.099Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -996,6 +1014,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.115Z",
|
||||
"updated_at": "2024-10-18T10:46:05.121Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.115Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -1051,6 +1070,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.489Z",
|
||||
"updated_at": "2024-10-18T11:16:24.506Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.823Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1106,6 +1126,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.518Z",
|
||||
"updated_at": "2024-10-18T11:16:24.521Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.855Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1161,6 +1182,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.526Z",
|
||||
"updated_at": "2024-10-18T11:16:24.529Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.869Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1216,6 +1238,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.535Z",
|
||||
"updated_at": "2024-10-18T11:16:24.538Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.888Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1271,6 +1294,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.544Z",
|
||||
"updated_at": "2024-10-18T11:16:24.546Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.901Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1326,6 +1350,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.551Z",
|
||||
"updated_at": "2024-10-18T11:16:24.554Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.915Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1381,6 +1406,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.560Z",
|
||||
"updated_at": "2024-10-18T11:16:24.562Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.929Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1436,6 +1462,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.567Z",
|
||||
"updated_at": "2024-10-18T11:16:24.569Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.944Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1491,6 +1518,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.573Z",
|
||||
"updated_at": "2024-10-18T11:16:24.575Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.957Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
|
||||
"delta": null,
|
||||
"status": "PASS",
|
||||
@@ -1546,6 +1574,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.580Z",
|
||||
"updated_at": "2024-10-18T11:16:24.582Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.971Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1601,6 +1630,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.587Z",
|
||||
"updated_at": "2024-10-18T11:16:24.589Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.984Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1656,6 +1686,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.595Z",
|
||||
"updated_at": "2024-10-18T11:16:24.597Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.999Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1711,6 +1742,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.602Z",
|
||||
"updated_at": "2024-10-18T11:16:24.604Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.013Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1766,6 +1798,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.610Z",
|
||||
"updated_at": "2024-10-18T11:16:24.612Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.029Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1821,6 +1854,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.617Z",
|
||||
"updated_at": "2024-10-18T11:16:24.620Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.045Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1876,6 +1910,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.625Z",
|
||||
"updated_at": "2024-10-18T11:16:24.627Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.061Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1931,6 +1966,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.632Z",
|
||||
"updated_at": "2024-10-18T11:16:24.634Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.080Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1986,6 +2022,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.639Z",
|
||||
"updated_at": "2024-10-18T11:16:24.642Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.099Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -2041,6 +2078,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.646Z",
|
||||
"updated_at": "2024-10-18T11:16:24.648Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.115Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -2096,6 +2134,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:26.033Z",
|
||||
"updated_at": "2024-10-18T11:16:26.045Z",
|
||||
"first_seen_at": "2024-10-18T11:16:26.033Z",
|
||||
"uid": "prowler-aws-account_security_contact_information_is_registered-112233445566-us-east-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "MANUAL",
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0005_rbac_missing_admin_roles"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="finding",
|
||||
name="first_seen_at",
|
||||
field=models.DateTimeField(editable=False, null=True),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,25 @@
|
||||
# Generated by Django 5.1.5 on 2025-01-28 15:03
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0006_findings_first_seen"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddIndex(
|
||||
model_name="scan",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "provider_id", "state", "inserted_at"],
|
||||
name="scans_prov_state_insert_idx",
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="scansummary",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id"], name="scan_summaries_tenant_scan_idx"
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -428,6 +428,10 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
fields=["provider", "state", "trigger", "scheduled_at"],
|
||||
name="scans_prov_state_trig_sche_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id", "state", "inserted_at"],
|
||||
name="scans_prov_state_insert_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -615,6 +619,7 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
id = models.UUIDField(primary_key=True, default=uuid7, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
updated_at = models.DateTimeField(auto_now=True, editable=False)
|
||||
first_seen_at = models.DateTimeField(editable=False, null=True)
|
||||
|
||||
uid = models.CharField(max_length=300)
|
||||
delta = FindingDeltaEnumField(
|
||||
@@ -1099,6 +1104,12 @@ class ScanSummary(RowLevelSecurityProtectedModel):
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id"],
|
||||
name="scan_summaries_tenant_scan_idx",
|
||||
)
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "scan-summaries"
|
||||
|
||||
+536
-120
@@ -1,7 +1,7 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Prowler API
|
||||
version: 1.1.0
|
||||
version: 1.3.2
|
||||
description: |-
|
||||
Prowler API specification.
|
||||
|
||||
@@ -257,6 +257,7 @@ paths:
|
||||
- raw_result
|
||||
- inserted_at
|
||||
- updated_at
|
||||
- first_seen_at
|
||||
- url
|
||||
- scan
|
||||
- resources
|
||||
@@ -668,8 +669,6 @@ paths:
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- id
|
||||
- -id
|
||||
- status
|
||||
- -status
|
||||
- severity
|
||||
@@ -715,6 +714,7 @@ paths:
|
||||
- raw_result
|
||||
- inserted_at
|
||||
- updated_at
|
||||
- first_seen_at
|
||||
- url
|
||||
- scan
|
||||
- resources
|
||||
@@ -1150,8 +1150,430 @@ paths:
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- id
|
||||
- -id
|
||||
- status
|
||||
- -status
|
||||
- severity
|
||||
- -severity
|
||||
- check_id
|
||||
- -check_id
|
||||
- inserted_at
|
||||
- -inserted_at
|
||||
- updated_at
|
||||
- -updated_at
|
||||
explode: false
|
||||
tags:
|
||||
- Finding
|
||||
security:
|
||||
- jwtAuth: []
|
||||
deprecated: true
|
||||
responses:
|
||||
'200':
|
||||
content:
|
||||
application/vnd.api+json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FindingDynamicFilterResponse'
|
||||
description: ''
|
||||
/api/v1/findings/metadata:
|
||||
get:
|
||||
operationId: findings_metadata_retrieve
|
||||
description: Fetch unique metadata values from a set of findings. This is useful
|
||||
for dynamic filtering.
|
||||
summary: Retrieve metadata values from findings
|
||||
parameters:
|
||||
- in: query
|
||||
name: fields[findings-metadata]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- services
|
||||
- regions
|
||||
- resource_types
|
||||
description: endpoint return only specific fields in the response on a per-type
|
||||
basis by including a fields[TYPE] query parameter.
|
||||
explode: false
|
||||
- in: query
|
||||
name: filter[check_id]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[check_id__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[check_id__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[delta]
|
||||
schema:
|
||||
type: string
|
||||
nullable: true
|
||||
enum:
|
||||
- changed
|
||||
- new
|
||||
description: |-
|
||||
* `new` - New
|
||||
* `changed` - Changed
|
||||
- in: query
|
||||
name: filter[delta__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[id]
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
- in: query
|
||||
name: filter[id__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
format: uuid
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[impact]
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- critical
|
||||
- high
|
||||
- informational
|
||||
- low
|
||||
- medium
|
||||
description: |-
|
||||
* `critical` - Critical
|
||||
* `high` - High
|
||||
* `medium` - Medium
|
||||
* `low` - Low
|
||||
* `informational` - Informational
|
||||
- in: query
|
||||
name: filter[impact__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[inserted_at]
|
||||
schema:
|
||||
type: string
|
||||
format: date
|
||||
- in: query
|
||||
name: filter[inserted_at__date]
|
||||
schema:
|
||||
type: string
|
||||
format: date
|
||||
- in: query
|
||||
name: filter[inserted_at__gte]
|
||||
schema:
|
||||
type: string
|
||||
format: date
|
||||
- in: query
|
||||
name: filter[inserted_at__lte]
|
||||
schema:
|
||||
type: string
|
||||
format: date
|
||||
- in: query
|
||||
name: filter[provider]
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
- in: query
|
||||
name: filter[provider__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
format: uuid
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[provider_alias]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[provider_alias__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[provider_alias__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[provider_type]
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
description: |-
|
||||
Multiple values may be separated by commas.
|
||||
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[provider_uid]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[provider_uid__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[provider_uid__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[region]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[region__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[region__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[resource_name]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_name__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_name__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[resource_type]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_type__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_type__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[resource_uid]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_uid__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_uid__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[resources]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
format: uuid
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[scan]
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
- in: query
|
||||
name: filter[scan__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
format: uuid
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- name: filter[search]
|
||||
required: false
|
||||
in: query
|
||||
description: A search term.
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[service]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[service__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[service__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[severity]
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- critical
|
||||
- high
|
||||
- informational
|
||||
- low
|
||||
- medium
|
||||
description: |-
|
||||
* `critical` - Critical
|
||||
* `high` - High
|
||||
* `medium` - Medium
|
||||
* `low` - Low
|
||||
* `informational` - Informational
|
||||
- in: query
|
||||
name: filter[severity__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[status]
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- FAIL
|
||||
- MANUAL
|
||||
- MUTED
|
||||
- PASS
|
||||
description: |-
|
||||
* `FAIL` - Fail
|
||||
* `PASS` - Pass
|
||||
* `MANUAL` - Manual
|
||||
* `MUTED` - Muted
|
||||
- in: query
|
||||
name: filter[status__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[uid]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[uid__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[updated_at]
|
||||
schema:
|
||||
type: string
|
||||
format: date
|
||||
- in: query
|
||||
name: filter[updated_at__gte]
|
||||
schema:
|
||||
type: string
|
||||
format: date-time
|
||||
- in: query
|
||||
name: filter[updated_at__lte]
|
||||
schema:
|
||||
type: string
|
||||
format: date-time
|
||||
- name: sort
|
||||
required: false
|
||||
in: query
|
||||
description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- status
|
||||
- -status
|
||||
- severity
|
||||
@@ -1168,11 +1590,11 @@ paths:
|
||||
security:
|
||||
- jwtAuth: []
|
||||
responses:
|
||||
'201':
|
||||
'200':
|
||||
content:
|
||||
application/vnd.api+json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/OpenApiResponseResponse'
|
||||
$ref: '#/components/schemas/FindingMetadataResponse'
|
||||
description: ''
|
||||
/api/v1/invitations/accept:
|
||||
post:
|
||||
@@ -2948,9 +3370,7 @@ paths:
|
||||
- name
|
||||
- manage_users
|
||||
- manage_account
|
||||
- manage_billing
|
||||
- manage_providers
|
||||
- manage_integrations
|
||||
- manage_scans
|
||||
- permission_state
|
||||
- unlimited_visibility
|
||||
@@ -3068,12 +3488,8 @@ paths:
|
||||
- -manage_users
|
||||
- manage_account
|
||||
- -manage_account
|
||||
- manage_billing
|
||||
- -manage_billing
|
||||
- manage_providers
|
||||
- -manage_providers
|
||||
- manage_integrations
|
||||
- -manage_integrations
|
||||
- manage_scans
|
||||
- -manage_scans
|
||||
- permission_state
|
||||
@@ -3147,9 +3563,7 @@ paths:
|
||||
- name
|
||||
- manage_users
|
||||
- manage_account
|
||||
- manage_billing
|
||||
- manage_providers
|
||||
- manage_integrations
|
||||
- manage_scans
|
||||
- permission_state
|
||||
- unlimited_visibility
|
||||
@@ -4825,8 +5239,8 @@ paths:
|
||||
description: ''
|
||||
delete:
|
||||
operationId: users_destroy
|
||||
description: Remove a user account from the system.
|
||||
summary: Delete a user account
|
||||
description: Remove the current user account from the system.
|
||||
summary: Delete the user account
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
@@ -5395,6 +5809,11 @@ components:
|
||||
type: string
|
||||
format: date-time
|
||||
readOnly: true
|
||||
first_seen_at:
|
||||
type: string
|
||||
format: date-time
|
||||
readOnly: true
|
||||
nullable: true
|
||||
required:
|
||||
- uid
|
||||
- status
|
||||
@@ -5458,6 +5877,89 @@ components:
|
||||
readOnly: true
|
||||
required:
|
||||
- scan
|
||||
FindingDynamicFilter:
|
||||
type: object
|
||||
required:
|
||||
- type
|
||||
- id
|
||||
additionalProperties: false
|
||||
properties:
|
||||
type:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/FindingDynamicFilterTypeEnum'
|
||||
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
|
||||
member is used to describe resource objects that share common attributes
|
||||
and relationships.
|
||||
id: {}
|
||||
attributes:
|
||||
type: object
|
||||
properties:
|
||||
services:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
regions:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
required:
|
||||
- services
|
||||
- regions
|
||||
FindingDynamicFilterResponse:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/FindingDynamicFilter'
|
||||
required:
|
||||
- data
|
||||
FindingDynamicFilterTypeEnum:
|
||||
type: string
|
||||
enum:
|
||||
- finding-dynamic-filters
|
||||
FindingMetadata:
|
||||
type: object
|
||||
required:
|
||||
- type
|
||||
- id
|
||||
additionalProperties: false
|
||||
properties:
|
||||
type:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/FindingMetadataTypeEnum'
|
||||
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
|
||||
member is used to describe resource objects that share common attributes
|
||||
and relationships.
|
||||
id: {}
|
||||
attributes:
|
||||
type: object
|
||||
properties:
|
||||
services:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
regions:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
resource_types:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
required:
|
||||
- services
|
||||
- regions
|
||||
- resource_types
|
||||
FindingMetadataResponse:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/FindingMetadata'
|
||||
required:
|
||||
- data
|
||||
FindingMetadataTypeEnum:
|
||||
type: string
|
||||
enum:
|
||||
- findings-metadata
|
||||
FindingResponse:
|
||||
type: object
|
||||
properties:
|
||||
@@ -5902,8 +6404,6 @@ components:
|
||||
- data
|
||||
description: A related resource object from type roles
|
||||
title: roles
|
||||
required:
|
||||
- roles
|
||||
InvitationUpdateResponse:
|
||||
type: object
|
||||
properties:
|
||||
@@ -5915,7 +6415,6 @@ components:
|
||||
type: object
|
||||
required:
|
||||
- type
|
||||
- id
|
||||
additionalProperties: false
|
||||
properties:
|
||||
type:
|
||||
@@ -5924,9 +6423,6 @@ components:
|
||||
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
|
||||
member is used to describe resource objects that share common attributes
|
||||
and relationships.
|
||||
id:
|
||||
type: string
|
||||
format: uuid
|
||||
attributes:
|
||||
type: object
|
||||
properties:
|
||||
@@ -6112,7 +6608,7 @@ components:
|
||||
type: integer
|
||||
fail:
|
||||
type: integer
|
||||
manual:
|
||||
muted:
|
||||
type: integer
|
||||
total:
|
||||
type: integer
|
||||
@@ -6437,8 +6933,6 @@ components:
|
||||
- data
|
||||
description: A related resource object from type roles
|
||||
title: roles
|
||||
required:
|
||||
- roles
|
||||
required:
|
||||
- data
|
||||
PatchedProviderGroupMembershipRequest:
|
||||
@@ -6644,6 +7138,9 @@ components:
|
||||
type: string
|
||||
description: The Amazon Resource Name (ARN) of the role to
|
||||
assume. Required for AWS role assumption.
|
||||
external_id:
|
||||
type: string
|
||||
description: An identifier to enhance security for role assumption.
|
||||
aws_access_key_id:
|
||||
type: string
|
||||
description: The AWS access key ID. Only required if the environment
|
||||
@@ -6662,10 +7159,6 @@ components:
|
||||
maximum: 43200
|
||||
default: 3600
|
||||
description: The duration (in seconds) for the role session.
|
||||
external_id:
|
||||
type: string
|
||||
description: An optional identifier to enhance security for
|
||||
role assumption; may be required by the role administrator.
|
||||
role_session_name:
|
||||
type: string
|
||||
description: |-
|
||||
@@ -6678,6 +7171,7 @@ components:
|
||||
pattern: ^[a-zA-Z0-9=,.@_-]+$
|
||||
required:
|
||||
- role_arn
|
||||
- external_id
|
||||
- type: object
|
||||
title: Azure Static Credentials
|
||||
properties:
|
||||
@@ -6850,12 +7344,8 @@ components:
|
||||
type: boolean
|
||||
manage_account:
|
||||
type: boolean
|
||||
manage_billing:
|
||||
type: boolean
|
||||
manage_providers:
|
||||
type: boolean
|
||||
manage_integrations:
|
||||
type: boolean
|
||||
manage_scans:
|
||||
type: boolean
|
||||
permission_state:
|
||||
@@ -7131,37 +7621,6 @@ components:
|
||||
required:
|
||||
- name
|
||||
- email
|
||||
relationships:
|
||||
type: object
|
||||
properties:
|
||||
roles:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
format: uuid
|
||||
title: Resource Identifier
|
||||
description: The identifier of the related object.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- roles
|
||||
title: Resource Type Name
|
||||
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
|
||||
member is used to describe resource objects that share
|
||||
common attributes and relationships.
|
||||
required:
|
||||
- id
|
||||
- type
|
||||
required:
|
||||
- data
|
||||
description: A related resource object from type roles
|
||||
title: roles
|
||||
required:
|
||||
- data
|
||||
Provider:
|
||||
@@ -7890,6 +8349,9 @@ components:
|
||||
type: string
|
||||
description: The Amazon Resource Name (ARN) of the role to assume.
|
||||
Required for AWS role assumption.
|
||||
external_id:
|
||||
type: string
|
||||
description: An identifier to enhance security for role assumption.
|
||||
aws_access_key_id:
|
||||
type: string
|
||||
description: The AWS access key ID. Only required if the environment
|
||||
@@ -7907,10 +8369,6 @@ components:
|
||||
maximum: 43200
|
||||
default: 3600
|
||||
description: The duration (in seconds) for the role session.
|
||||
external_id:
|
||||
type: string
|
||||
description: An optional identifier to enhance security for role
|
||||
assumption; may be required by the role administrator.
|
||||
role_session_name:
|
||||
type: string
|
||||
description: |-
|
||||
@@ -7923,6 +8381,7 @@ components:
|
||||
pattern: ^[a-zA-Z0-9=,.@_-]+$
|
||||
required:
|
||||
- role_arn
|
||||
- external_id
|
||||
- type: object
|
||||
title: Azure Static Credentials
|
||||
properties:
|
||||
@@ -8071,6 +8530,9 @@ components:
|
||||
type: string
|
||||
description: The Amazon Resource Name (ARN) of the role to
|
||||
assume. Required for AWS role assumption.
|
||||
external_id:
|
||||
type: string
|
||||
description: An identifier to enhance security for role assumption.
|
||||
aws_access_key_id:
|
||||
type: string
|
||||
description: The AWS access key ID. Only required if the environment
|
||||
@@ -8089,10 +8551,6 @@ components:
|
||||
maximum: 43200
|
||||
default: 3600
|
||||
description: The duration (in seconds) for the role session.
|
||||
external_id:
|
||||
type: string
|
||||
description: An optional identifier to enhance security for
|
||||
role assumption; may be required by the role administrator.
|
||||
role_session_name:
|
||||
type: string
|
||||
description: |-
|
||||
@@ -8105,6 +8563,7 @@ components:
|
||||
pattern: ^[a-zA-Z0-9=,.@_-]+$
|
||||
required:
|
||||
- role_arn
|
||||
- external_id
|
||||
- type: object
|
||||
title: Azure Static Credentials
|
||||
properties:
|
||||
@@ -8270,6 +8729,9 @@ components:
|
||||
type: string
|
||||
description: The Amazon Resource Name (ARN) of the role to assume.
|
||||
Required for AWS role assumption.
|
||||
external_id:
|
||||
type: string
|
||||
description: An identifier to enhance security for role assumption.
|
||||
aws_access_key_id:
|
||||
type: string
|
||||
description: The AWS access key ID. Only required if the environment
|
||||
@@ -8287,10 +8749,6 @@ components:
|
||||
maximum: 43200
|
||||
default: 3600
|
||||
description: The duration (in seconds) for the role session.
|
||||
external_id:
|
||||
type: string
|
||||
description: An optional identifier to enhance security for role
|
||||
assumption; may be required by the role administrator.
|
||||
role_session_name:
|
||||
type: string
|
||||
description: |-
|
||||
@@ -8303,6 +8761,7 @@ components:
|
||||
pattern: ^[a-zA-Z0-9=,.@_-]+$
|
||||
required:
|
||||
- role_arn
|
||||
- external_id
|
||||
- type: object
|
||||
title: Azure Static Credentials
|
||||
properties:
|
||||
@@ -8537,12 +8996,8 @@ components:
|
||||
type: boolean
|
||||
manage_account:
|
||||
type: boolean
|
||||
manage_billing:
|
||||
type: boolean
|
||||
manage_providers:
|
||||
type: boolean
|
||||
manage_integrations:
|
||||
type: boolean
|
||||
manage_scans:
|
||||
type: boolean
|
||||
permission_state:
|
||||
@@ -8670,12 +9125,8 @@ components:
|
||||
type: boolean
|
||||
manage_account:
|
||||
type: boolean
|
||||
manage_billing:
|
||||
type: boolean
|
||||
manage_providers:
|
||||
type: boolean
|
||||
manage_integrations:
|
||||
type: boolean
|
||||
manage_scans:
|
||||
type: boolean
|
||||
permission_state:
|
||||
@@ -8808,12 +9259,8 @@ components:
|
||||
type: boolean
|
||||
manage_account:
|
||||
type: boolean
|
||||
manage_billing:
|
||||
type: boolean
|
||||
manage_providers:
|
||||
type: boolean
|
||||
manage_integrations:
|
||||
type: boolean
|
||||
manage_scans:
|
||||
type: boolean
|
||||
permission_state:
|
||||
@@ -9877,37 +10324,6 @@ components:
|
||||
required:
|
||||
- name
|
||||
- email
|
||||
relationships:
|
||||
type: object
|
||||
properties:
|
||||
roles:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
format: uuid
|
||||
title: Resource Identifier
|
||||
description: The identifier of the related object.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- roles
|
||||
title: Resource Type Name
|
||||
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
|
||||
member is used to describe resource objects that share common
|
||||
attributes and relationships.
|
||||
required:
|
||||
- id
|
||||
- type
|
||||
required:
|
||||
- data
|
||||
description: A related resource object from type roles
|
||||
title: roles
|
||||
UserUpdateResponse:
|
||||
type: object
|
||||
properties:
|
||||
|
||||
@@ -261,6 +261,16 @@ class TestUserViewSet:
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
assert not User.objects.filter(id=create_test_user.id).exists()
|
||||
|
||||
def test_users_destroy_other_user(
|
||||
self, authenticated_client, create_test_user, users_fixture
|
||||
):
|
||||
user = users_fixture[2]
|
||||
response = authenticated_client.delete(
|
||||
reverse("user-detail", kwargs={"pk": str(user.id)})
|
||||
)
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert User.objects.filter(id=create_test_user.id).exists()
|
||||
|
||||
def test_users_destroy_invalid_user(self, authenticated_client, create_test_user):
|
||||
another_user = User.objects.create_user(
|
||||
password="otherpassword", email="other@example.com"
|
||||
@@ -268,7 +278,7 @@ class TestUserViewSet:
|
||||
response = authenticated_client.delete(
|
||||
reverse("user-detail", kwargs={"pk": another_user.id})
|
||||
)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert User.objects.filter(id=another_user.id).exists()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -2444,6 +2454,16 @@ class TestFindingViewSet:
|
||||
("search", "ec2", 2),
|
||||
# full text search on finding tags
|
||||
("search", "value2", 2),
|
||||
# Temporary disabled until we implement tag filtering in the UI
|
||||
# ("resource_tag_key", "key", 2),
|
||||
# ("resource_tag_key__in", "key,key2", 2),
|
||||
# ("resource_tag_key__icontains", "key", 2),
|
||||
# ("resource_tag_value", "value", 2),
|
||||
# ("resource_tag_value__in", "value,value2", 2),
|
||||
# ("resource_tag_value__icontains", "value", 2),
|
||||
# ("resource_tags", "key:value", 2),
|
||||
# ("resource_tags", "not:exists", 0),
|
||||
# ("resource_tags", "not:exists,key:value", 2),
|
||||
]
|
||||
),
|
||||
)
|
||||
@@ -2582,30 +2602,35 @@ class TestFindingViewSet:
|
||||
)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
def test_findings_services_regions_retrieve(
|
||||
self, authenticated_client, findings_fixture
|
||||
):
|
||||
def test_findings_metadata_retrieve(self, authenticated_client, findings_fixture):
|
||||
finding_1, *_ = findings_fixture
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{"filter[inserted_at]": finding_1.updated_at.strftime("%Y-%m-%d")},
|
||||
)
|
||||
data = response.json()
|
||||
|
||||
expected_services = {"ec2", "s3"}
|
||||
expected_regions = {"eu-west-1", "us-east-1"}
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# expected_tags = {"key": ["value"], "key2": ["value2"]}
|
||||
expected_resource_types = {"prowler-test"}
|
||||
|
||||
assert data["data"]["type"] == "finding-dynamic-filters"
|
||||
assert data["data"]["type"] == "findings-metadata"
|
||||
assert data["data"]["id"] is None
|
||||
assert set(data["data"]["attributes"]["services"]) == expected_services
|
||||
assert set(data["data"]["attributes"]["regions"]) == expected_regions
|
||||
assert (
|
||||
set(data["data"]["attributes"]["resource_types"]) == expected_resource_types
|
||||
)
|
||||
# assert data["data"]["attributes"]["tags"] == expected_tags
|
||||
|
||||
def test_findings_services_regions_severity_retrieve(
|
||||
def test_findings_metadata_severity_retrieve(
|
||||
self, authenticated_client, findings_fixture
|
||||
):
|
||||
finding_1, *_ = findings_fixture
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{
|
||||
"filter[severity__in]": ["low", "medium"],
|
||||
"filter[inserted_at]": finding_1.updated_at.strftime("%Y-%m-%d"),
|
||||
@@ -2615,26 +2640,36 @@ class TestFindingViewSet:
|
||||
|
||||
expected_services = {"s3"}
|
||||
expected_regions = {"eu-west-1"}
|
||||
# Temporary disabled until we implement tag filtering in the UI
|
||||
# expected_tags = {"key": ["value"], "key2": ["value2"]}
|
||||
expected_resource_types = {"prowler-test"}
|
||||
|
||||
assert data["data"]["type"] == "finding-dynamic-filters"
|
||||
assert data["data"]["type"] == "findings-metadata"
|
||||
assert data["data"]["id"] is None
|
||||
assert set(data["data"]["attributes"]["services"]) == expected_services
|
||||
assert set(data["data"]["attributes"]["regions"]) == expected_regions
|
||||
assert (
|
||||
set(data["data"]["attributes"]["resource_types"]) == expected_resource_types
|
||||
)
|
||||
# assert data["data"]["attributes"]["tags"] == expected_tags
|
||||
|
||||
def test_findings_services_regions_future_date(self, authenticated_client):
|
||||
def test_findings_metadata_future_date(self, authenticated_client):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{"filter[inserted_at]": "2048-01-01"},
|
||||
)
|
||||
data = response.json()
|
||||
assert data["data"]["type"] == "finding-dynamic-filters"
|
||||
assert data["data"]["type"] == "findings-metadata"
|
||||
assert data["data"]["id"] is None
|
||||
assert data["data"]["attributes"]["services"] == []
|
||||
assert data["data"]["attributes"]["regions"] == []
|
||||
# Temporary disabled until we implement tag filtering in the UI
|
||||
# assert data["data"]["attributes"]["tags"] == {}
|
||||
assert data["data"]["attributes"]["resource_types"] == []
|
||||
|
||||
def test_findings_services_regions_invalid_date(self, authenticated_client):
|
||||
def test_findings_metadata_invalid_date(self, authenticated_client):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{"filter[inserted_at]": "2048-01-011"},
|
||||
)
|
||||
assert response.json() == {
|
||||
@@ -4249,18 +4284,15 @@ class TestOverviewViewSet:
|
||||
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
|
||||
|
||||
def test_overview_providers_list(
|
||||
self, authenticated_client, findings_fixture, resources_fixture
|
||||
self, authenticated_client, scan_summaries_fixture, resources_fixture
|
||||
):
|
||||
response = authenticated_client.get(reverse("overview-providers"))
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
# Only findings from one provider
|
||||
assert len(response.json()["data"]) == 1
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["total"] == len(
|
||||
findings_fixture
|
||||
)
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["pass"] == 0
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["fail"] == 2
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["manual"] == 0
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["total"] == 4
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["pass"] == 2
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["fail"] == 1
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["muted"] == 1
|
||||
assert response.json()["data"][0]["attributes"]["resources"]["total"] == len(
|
||||
resources_fixture
|
||||
)
|
||||
|
||||
@@ -905,6 +905,7 @@ class FindingSerializer(RLSSerializer):
|
||||
"raw_result",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"first_seen_at",
|
||||
"url",
|
||||
# Relationships
|
||||
"scan",
|
||||
@@ -917,6 +918,7 @@ class FindingSerializer(RLSSerializer):
|
||||
}
|
||||
|
||||
|
||||
# To be removed when the related endpoint is removed as well
|
||||
class FindingDynamicFilterSerializer(serializers.Serializer):
|
||||
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
@@ -925,6 +927,19 @@ class FindingDynamicFilterSerializer(serializers.Serializer):
|
||||
resource_name = "finding-dynamic-filters"
|
||||
|
||||
|
||||
class FindingMetadataSerializer(serializers.Serializer):
|
||||
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
resource_types = serializers.ListField(
|
||||
child=serializers.CharField(), allow_empty=True
|
||||
)
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# tags = serializers.JSONField(help_text="Tags are described as key-value pairs.")
|
||||
|
||||
class Meta:
|
||||
resource_name = "findings-metadata"
|
||||
|
||||
|
||||
# Provider secrets
|
||||
class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
|
||||
@staticmethod
|
||||
@@ -997,7 +1012,7 @@ class KubernetesProviderSecret(serializers.Serializer):
|
||||
|
||||
class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
role_arn = serializers.CharField()
|
||||
external_id = serializers.CharField(required=False)
|
||||
external_id = serializers.CharField()
|
||||
role_session_name = serializers.CharField(required=False)
|
||||
session_duration = serializers.IntegerField(
|
||||
required=False, min_value=900, max_value=43200
|
||||
@@ -1044,6 +1059,10 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"description": "The Amazon Resource Name (ARN) of the role to assume. Required for AWS role "
|
||||
"assumption.",
|
||||
},
|
||||
"external_id": {
|
||||
"type": "string",
|
||||
"description": "An identifier to enhance security for role assumption.",
|
||||
},
|
||||
"aws_access_key_id": {
|
||||
"type": "string",
|
||||
"description": "The AWS access key ID. Only required if the environment lacks pre-configured "
|
||||
@@ -1065,11 +1084,6 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"default": 3600,
|
||||
"description": "The duration (in seconds) for the role session.",
|
||||
},
|
||||
"external_id": {
|
||||
"type": "string",
|
||||
"description": "An optional identifier to enhance security for role assumption; may be "
|
||||
"required by the role administrator.",
|
||||
},
|
||||
"role_session_name": {
|
||||
"type": "string",
|
||||
"description": "An identifier for the role session, useful for tracking sessions in AWS logs. "
|
||||
@@ -1083,7 +1097,7 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"pattern": "^[a-zA-Z0-9=,.@_-]+$",
|
||||
},
|
||||
},
|
||||
"required": ["role_arn"],
|
||||
"required": ["role_arn", "external_id"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
@@ -1722,7 +1736,7 @@ class OverviewProviderSerializer(serializers.Serializer):
|
||||
"properties": {
|
||||
"pass": {"type": "integer"},
|
||||
"fail": {"type": "integer"},
|
||||
"manual": {"type": "integer"},
|
||||
"muted": {"type": "integer"},
|
||||
"total": {"type": "integer"},
|
||||
},
|
||||
}
|
||||
@@ -1731,7 +1745,7 @@ class OverviewProviderSerializer(serializers.Serializer):
|
||||
return {
|
||||
"pass": obj["findings_passed"],
|
||||
"fail": obj["findings_failed"],
|
||||
"manual": obj["findings_manual"],
|
||||
"muted": obj["findings_muted"],
|
||||
"total": obj["total_findings"],
|
||||
}
|
||||
|
||||
|
||||
@@ -1,30 +1,31 @@
|
||||
from django.conf import settings
|
||||
from django.urls import include, path
|
||||
from drf_spectacular.views import SpectacularRedocView
|
||||
from rest_framework_nested import routers
|
||||
|
||||
from api.v1.views import (
|
||||
ComplianceOverviewViewSet,
|
||||
CustomTokenObtainView,
|
||||
CustomTokenRefreshView,
|
||||
FindingViewSet,
|
||||
MembershipViewSet,
|
||||
ProviderGroupViewSet,
|
||||
ProviderGroupProvidersRelationshipView,
|
||||
ProviderSecretViewSet,
|
||||
InvitationViewSet,
|
||||
InvitationAcceptViewSet,
|
||||
RoleViewSet,
|
||||
RoleProviderGroupRelationshipView,
|
||||
UserRoleRelationshipView,
|
||||
InvitationViewSet,
|
||||
MembershipViewSet,
|
||||
OverviewViewSet,
|
||||
ComplianceOverviewViewSet,
|
||||
ProviderGroupProvidersRelationshipView,
|
||||
ProviderGroupViewSet,
|
||||
ProviderSecretViewSet,
|
||||
ProviderViewSet,
|
||||
ResourceViewSet,
|
||||
RoleProviderGroupRelationshipView,
|
||||
RoleViewSet,
|
||||
ScanViewSet,
|
||||
ScheduleViewSet,
|
||||
SchemaView,
|
||||
TaskViewSet,
|
||||
TenantMembersViewSet,
|
||||
TenantViewSet,
|
||||
UserRoleRelationshipView,
|
||||
UserViewSet,
|
||||
)
|
||||
|
||||
@@ -112,3 +113,6 @@ urlpatterns = [
|
||||
path("schema", SchemaView.as_view(), name="schema"),
|
||||
path("docs", SpectacularRedocView.as_view(url_name="schema"), name="docs"),
|
||||
]
|
||||
|
||||
if settings.DEBUG:
|
||||
urlpatterns += [path("silk/", include("silk.urls", namespace="silk"))]
|
||||
|
||||
+106
-51
@@ -4,6 +4,7 @@ from django.contrib.postgres.aggregates import ArrayAgg
|
||||
from django.contrib.postgres.search import SearchQuery
|
||||
from django.db import transaction
|
||||
from django.db.models import Count, F, OuterRef, Prefetch, Q, Subquery, Sum
|
||||
from django.db.models.functions import Coalesce
|
||||
from django.urls import reverse
|
||||
from django.utils.decorators import method_decorator
|
||||
from django.views.decorators.cache import cache_control
|
||||
@@ -73,7 +74,6 @@ from api.models import (
|
||||
ScanSummary,
|
||||
SeverityChoices,
|
||||
StateChoices,
|
||||
StatusChoices,
|
||||
Task,
|
||||
User,
|
||||
UserRoleRelationship,
|
||||
@@ -87,6 +87,7 @@ from api.v1.serializers import (
|
||||
ComplianceOverviewFullSerializer,
|
||||
ComplianceOverviewSerializer,
|
||||
FindingDynamicFilterSerializer,
|
||||
FindingMetadataSerializer,
|
||||
FindingSerializer,
|
||||
InvitationAcceptSerializer,
|
||||
InvitationCreateSerializer,
|
||||
@@ -192,7 +193,7 @@ class SchemaView(SpectacularAPIView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.1.1"
|
||||
spectacular_settings.VERSION = "1.3.2"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -275,8 +276,8 @@ class SchemaView(SpectacularAPIView):
|
||||
),
|
||||
destroy=extend_schema(
|
||||
tags=["User"],
|
||||
summary="Delete a user account",
|
||||
description="Remove a user account from the system.",
|
||||
summary="Delete the user account",
|
||||
description="Remove the current user account from the system.",
|
||||
),
|
||||
me=extend_schema(
|
||||
tags=["User"],
|
||||
@@ -340,6 +341,12 @@ class UserViewSet(BaseUserViewset):
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
if kwargs["pk"] != str(self.request.user.id):
|
||||
raise ValidationError("Only the current user can be deleted.")
|
||||
|
||||
return super().destroy(request, *args, **kwargs)
|
||||
|
||||
@extend_schema(
|
||||
parameters=[
|
||||
OpenApiParameter(
|
||||
@@ -1044,7 +1051,7 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"""
|
||||
if self.request.method in SAFE_METHODS:
|
||||
# No permissions required for GET requests
|
||||
self.required_permissions = [Permissions.MANAGE_PROVIDERS]
|
||||
self.required_permissions = []
|
||||
else:
|
||||
# Require permission for non-GET requests
|
||||
self.required_permissions = [Permissions.MANAGE_SCANS]
|
||||
@@ -1274,7 +1281,13 @@ class ResourceViewSet(BaseRLSViewSet):
|
||||
tags=["Finding"],
|
||||
summary="Retrieve the services and regions that are impacted by findings",
|
||||
description="Fetch services and regions affected in findings.",
|
||||
responses={201: OpenApiResponse(response=MembershipSerializer)},
|
||||
filters=True,
|
||||
deprecated=True,
|
||||
),
|
||||
metadata=extend_schema(
|
||||
tags=["Finding"],
|
||||
summary="Retrieve metadata values from findings",
|
||||
description="Fetch unique metadata values from a set of findings. This is useful for dynamic filtering.",
|
||||
filters=True,
|
||||
),
|
||||
)
|
||||
@@ -1292,9 +1305,8 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
}
|
||||
http_method_names = ["get"]
|
||||
filterset_class = FindingFilter
|
||||
ordering = ["-id"]
|
||||
ordering = ["-inserted_at"]
|
||||
ordering_fields = [
|
||||
"id",
|
||||
"status",
|
||||
"severity",
|
||||
"check_id",
|
||||
@@ -1308,6 +1320,8 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
def get_serializer_class(self):
|
||||
if self.action == "findings_services_regions":
|
||||
return FindingDynamicFilterSerializer
|
||||
elif self.action == "metadata":
|
||||
return FindingMetadataSerializer
|
||||
|
||||
return super().get_serializer_class()
|
||||
|
||||
@@ -1376,6 +1390,62 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
|
||||
return Response(data=serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="metadata")
|
||||
def metadata(self, request):
|
||||
tenant_id = self.request.tenant_id
|
||||
queryset = self.get_queryset()
|
||||
filtered_queryset = self.filter_queryset(queryset)
|
||||
|
||||
relevant_resources = Resource.objects.filter(
|
||||
tenant_id=tenant_id, findings__in=filtered_queryset
|
||||
).distinct()
|
||||
|
||||
services = (
|
||||
relevant_resources.values_list("service", flat=True)
|
||||
.distinct()
|
||||
.order_by("service")
|
||||
)
|
||||
|
||||
regions = (
|
||||
relevant_resources.exclude(region="")
|
||||
.values_list("region", flat=True)
|
||||
.distinct()
|
||||
.order_by("region")
|
||||
)
|
||||
|
||||
resource_types = (
|
||||
relevant_resources.values_list("type", flat=True)
|
||||
.distinct()
|
||||
.order_by("type")
|
||||
)
|
||||
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# tag_data = (
|
||||
# relevant_resources
|
||||
# .filter(tags__key__isnull=False, tags__value__isnull=False)
|
||||
# .exclude(tags__key="")
|
||||
# .exclude(tags__value="")
|
||||
# .values("tags__key", "tags__value")
|
||||
# .distinct()
|
||||
# .order_by("tags__key", "tags__value")
|
||||
# )
|
||||
#
|
||||
# tags_dict = {}
|
||||
# for row in tag_data:
|
||||
# k, v = row["tags__key"], row["tags__value"]
|
||||
# tags_dict.setdefault(k, []).append(v)
|
||||
|
||||
result = {
|
||||
"services": list(services),
|
||||
"regions": list(regions),
|
||||
"resource_types": list(resource_types),
|
||||
# "tags": tags_dict
|
||||
}
|
||||
|
||||
serializer = self.get_serializer(data=result)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
@@ -1938,68 +2008,53 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
@action(detail=False, methods=["get"], url_name="providers")
|
||||
def providers(self, request):
|
||||
tenant_id = self.request.tenant_id
|
||||
# Subquery to get the most recent finding for each uid
|
||||
latest_finding_ids = (
|
||||
Finding.objects.filter(
|
||||
|
||||
latest_scan_ids = (
|
||||
Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
uid=OuterRef("uid"),
|
||||
scan__provider=OuterRef("scan__provider"),
|
||||
state=StateChoices.COMPLETED,
|
||||
)
|
||||
.order_by("-id") # Most recent by id
|
||||
.values("id")[:1]
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
|
||||
# Filter findings to only include the most recent for each uid
|
||||
recent_findings = Finding.objects.filter(
|
||||
tenant_id=tenant_id, id__in=Subquery(latest_finding_ids)
|
||||
)
|
||||
|
||||
# Aggregate findings by provider
|
||||
findings_aggregated = (
|
||||
recent_findings.values("scan__provider__provider")
|
||||
ScanSummary.objects.filter(tenant_id=tenant_id, scan_id__in=latest_scan_ids)
|
||||
.values("scan__provider__provider")
|
||||
.annotate(
|
||||
findings_passed=Count("id", filter=Q(status=StatusChoices.PASS.value)),
|
||||
findings_failed=Count("id", filter=Q(status=StatusChoices.FAIL.value)),
|
||||
findings_manual=Count(
|
||||
"id", filter=Q(status=StatusChoices.MANUAL.value)
|
||||
),
|
||||
total_findings=Count("id"),
|
||||
findings_passed=Coalesce(Sum("_pass"), 0),
|
||||
findings_failed=Coalesce(Sum("fail"), 0),
|
||||
findings_muted=Coalesce(Sum("muted"), 0),
|
||||
total_findings=Coalesce(Sum("total"), 0),
|
||||
)
|
||||
.order_by("-findings_failed")
|
||||
)
|
||||
|
||||
# Aggregate total resources by provider
|
||||
resources_aggregated = (
|
||||
Resource.objects.filter(tenant_id=tenant_id)
|
||||
.values("provider__provider")
|
||||
.annotate(total_resources=Count("id"))
|
||||
)
|
||||
resources_dict = {
|
||||
row["provider__provider"]: row["total_resources"]
|
||||
for row in resources_aggregated
|
||||
}
|
||||
|
||||
# Combine findings and resources data
|
||||
overview = []
|
||||
for findings in findings_aggregated:
|
||||
provider = findings["scan__provider__provider"]
|
||||
total_resources = next(
|
||||
(
|
||||
res["total_resources"]
|
||||
for res in resources_aggregated
|
||||
if res["provider__provider"] == provider
|
||||
),
|
||||
0,
|
||||
)
|
||||
for row in findings_aggregated:
|
||||
provider_type = row["scan__provider__provider"]
|
||||
overview.append(
|
||||
{
|
||||
"provider": provider,
|
||||
"total_resources": total_resources,
|
||||
"total_findings": findings["total_findings"],
|
||||
"findings_passed": findings["findings_passed"],
|
||||
"findings_failed": findings["findings_failed"],
|
||||
"findings_manual": findings["findings_manual"],
|
||||
"provider": provider_type,
|
||||
"total_resources": resources_dict.get(provider_type, 0),
|
||||
"total_findings": row["total_findings"],
|
||||
"findings_passed": row["findings_passed"],
|
||||
"findings_failed": row["findings_failed"],
|
||||
"findings_muted": row["findings_muted"],
|
||||
}
|
||||
)
|
||||
|
||||
serializer = OverviewProviderSerializer(overview, many=True)
|
||||
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="findings")
|
||||
@@ -2014,7 +2069,7 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-id")
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
)
|
||||
|
||||
@@ -2059,7 +2114,7 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-id")
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
)
|
||||
|
||||
@@ -2095,7 +2150,7 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-id")
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
)
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=True)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["*"])
|
||||
|
||||
@@ -38,3 +37,9 @@ REST_FRAMEWORK["DEFAULT_FILTER_BACKENDS"] = tuple( # noqa: F405
|
||||
) + ("api.filters.CustomDjangoFilterBackend",)
|
||||
|
||||
SECRETS_ENCRYPTION_KEY = "ZMiYVo7m4Fbe2eXXPyrwxdJss2WSalXSv3xHBcJkPl0="
|
||||
|
||||
MIDDLEWARE += [ # noqa: F405
|
||||
"silk.middleware.SilkyMiddleware",
|
||||
]
|
||||
|
||||
INSTALLED_APPS += ["silk"] # noqa: F405
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=False)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=False)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
|
||||
|
||||
|
||||
@@ -626,6 +626,7 @@ def findings_fixture(scans_fixture, resources_fixture):
|
||||
"CheckId": "test_check_id",
|
||||
"Description": "test description apple sauce",
|
||||
},
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
|
||||
finding1.add_resources([resource1])
|
||||
@@ -651,6 +652,7 @@ def findings_fixture(scans_fixture, resources_fixture):
|
||||
"CheckId": "test_check_id",
|
||||
"Description": "test description orange juice",
|
||||
},
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
|
||||
finding2.add_resources([resource2])
|
||||
|
||||
@@ -152,6 +152,9 @@ def perform_prowler_scan(
|
||||
|
||||
for progress, findings in prowler_scan.scan():
|
||||
for finding in findings:
|
||||
if finding is None:
|
||||
logger.error(f"None finding detected on scan {scan_id}.")
|
||||
continue
|
||||
for attempt in range(CELERY_DEADLOCK_ATTEMPTS):
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
@@ -176,7 +179,10 @@ def perform_prowler_scan(
|
||||
|
||||
# Update resource fields if necessary
|
||||
updated_fields = []
|
||||
if resource_instance.region != finding.region:
|
||||
if (
|
||||
finding.region
|
||||
and resource_instance.region != finding.region
|
||||
):
|
||||
resource_instance.region = finding.region
|
||||
updated_fields.append("region")
|
||||
if resource_instance.service != finding.service_name:
|
||||
@@ -219,24 +225,30 @@ def perform_prowler_scan(
|
||||
# Process finding
|
||||
with rls_transaction(tenant_id):
|
||||
finding_uid = finding.uid
|
||||
last_first_seen_at = None
|
||||
if finding_uid not in last_status_cache:
|
||||
most_recent_finding = (
|
||||
Finding.objects.filter(uid=finding_uid)
|
||||
.order_by("-id")
|
||||
.values("status")
|
||||
Finding.all_objects.filter(
|
||||
tenant_id=tenant_id, uid=finding_uid
|
||||
)
|
||||
.order_by("-inserted_at")
|
||||
.values("status", "first_seen_at")
|
||||
.first()
|
||||
)
|
||||
last_status = (
|
||||
most_recent_finding["status"]
|
||||
if most_recent_finding
|
||||
else None
|
||||
)
|
||||
last_status_cache[finding_uid] = last_status
|
||||
last_status = None
|
||||
if most_recent_finding:
|
||||
last_status = most_recent_finding["status"]
|
||||
last_first_seen_at = most_recent_finding["first_seen_at"]
|
||||
last_status_cache[finding_uid] = last_status, last_first_seen_at
|
||||
else:
|
||||
last_status = last_status_cache[finding_uid]
|
||||
last_status, last_first_seen_at = last_status_cache[finding_uid]
|
||||
|
||||
status = FindingStatus[finding.status]
|
||||
delta = _create_finding_delta(last_status, status)
|
||||
# For the findings prior to the change, when a first finding is found with delta!="new" it will be assigned a current date as first_seen_at and the successive findings with the same UID will always get the date of the previous finding.
|
||||
# For new findings, when a finding (delta="new") is found for the first time, the first_seen_at attribute will be assigned the current date, the following findings will get that date.
|
||||
if not last_first_seen_at:
|
||||
last_first_seen_at = datetime.now(tz=timezone.utc)
|
||||
|
||||
# Create the finding
|
||||
finding_instance = Finding.objects.create(
|
||||
@@ -251,6 +263,7 @@ def perform_prowler_scan(
|
||||
raw_result=finding.raw,
|
||||
check_id=finding.check_id,
|
||||
scan=scan_instance,
|
||||
first_seen_at=last_first_seen_at,
|
||||
)
|
||||
finding_instance.add_resources([resource_instance])
|
||||
|
||||
@@ -367,7 +380,7 @@ def aggregate_findings(tenant_id: str, scan_id: str):
|
||||
- muted_changed: Muted findings with a delta of 'changed'.
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
findings = Finding.objects.filter(scan_id=scan_id)
|
||||
findings = Finding.objects.filter(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
aggregation = findings.values(
|
||||
"check_id",
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
apiVersion: v2
|
||||
name: prowler-api
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "5.1.1"
|
||||
@@ -0,0 +1,22 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prowler-api.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prowler-api.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prowler-api.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prowler-api.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
@@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "prowler-api.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "prowler-api.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "prowler-api.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "prowler-api.labels" -}}
|
||||
helm.sh/chart: {{ include "prowler-api.chart" . }}
|
||||
{{ include "prowler-api.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "prowler-api.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "prowler-api.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "prowler-api.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "prowler-api.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}-config
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
data:
|
||||
config.yaml: |-
|
||||
{{- toYaml .Values.mainConfig | nindent 4 }}
|
||||
@@ -0,0 +1,85 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "prowler-api.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "prowler-api.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
{{- range $name,$config := .Values.containers }}
|
||||
{{- if $config.enabled }}
|
||||
- name: {{ $name }}
|
||||
securityContext:
|
||||
{{- toYaml $config.securityContext | nindent 12 }}
|
||||
image: "{{ $config.image.repository }}:{{ $config.image.tag | default $.Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ $config.image.pullPolicy }}
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "prowler-api.fullname" $ }}
|
||||
command:
|
||||
{{- toYaml $config.command | nindent 12 }}
|
||||
{{- if $config.ports }}
|
||||
ports:
|
||||
{{- toYaml $config.ports | nindent 12 }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml $config.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml $config.readinessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml $config.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: {{ include "prowler-api.fullname" $ }}-config
|
||||
mountPath: {{ $.Values.releaseConfigRoot }}{{ $.Values.releaseConfigPath }}
|
||||
subPath: config.yaml
|
||||
{{- with .volumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: {{ include "prowler-api.fullname" . }}-config
|
||||
configMap:
|
||||
name: {{ include "prowler-api.fullname" . }}-config
|
||||
{{- with .Values.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,43 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.ingress.className }}
|
||||
ingressClassName: {{ . }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- with .pathType }}
|
||||
pathType: {{ . }}
|
||||
{{- end }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "prowler-api.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- range $k, $v := .Values.secrets }}
|
||||
{{ $k }}: {{ $v | toString | b64enc | quote }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
{{- range $name,$config := .Values.containers }}
|
||||
{{- if $config.ports }}
|
||||
{{- range $p := $config.ports }}
|
||||
- port: {{ $p.containerPort }}
|
||||
targetPort: {{ $p.containerPort }}
|
||||
protocol: TCP
|
||||
name: {{ $config.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "prowler-api.selectorLabels" . | nindent 4 }}
|
||||
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "prowler-api.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,625 @@
|
||||
# Default values for prowler-api.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
|
||||
replicaCount: 1
|
||||
|
||||
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
|
||||
containers:
|
||||
prowler-api:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
command: ["/home/prowler/docker-entrypoint.sh", "prod"]
|
||||
worker:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
command: ["/home/prowler/docker-entrypoint.sh", "worker"]
|
||||
worker-beat:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
command: ["../docker-entrypoint.sh", "beat"]
|
||||
|
||||
secrets:
|
||||
POSTGRES_HOST:
|
||||
POSTGRES_PORT: 5432
|
||||
POSTGRES_ADMIN_USER:
|
||||
POSTGRES_ADMIN_PASSWORD:
|
||||
POSTGRES_USER:
|
||||
POSTGRES_PASSWORD:
|
||||
POSTGRES_DB:
|
||||
# Valkey settings
|
||||
VALKEY_HOST: valkey-headless
|
||||
VALKEY_PORT: "6379"
|
||||
VALKEY_DB: "0"
|
||||
# Django settings
|
||||
DJANGO_ALLOWED_HOSTS: localhost,127.0.0.1,prowler-api
|
||||
DJANGO_BIND_ADDRESS: 0.0.0.0
|
||||
DJANGO_PORT: "8080"
|
||||
DJANGO_DEBUG: False
|
||||
DJANGO_SETTINGS_MODULE: config.django.production
|
||||
# Select one of [ndjson|human_readable]
|
||||
DJANGO_LOGGING_FORMATTER: human_readable
|
||||
# Select one of [DEBUG|INFO|WARNING|ERROR|CRITICAL]
|
||||
# Applies to both Django and Celery Workers
|
||||
DJANGO_LOGGING_LEVEL: INFO
|
||||
# Defaults to the maximum available based on CPU cores if not set.
|
||||
DJANGO_WORKERS: 2
|
||||
# Token lifetime is in minutes
|
||||
DJANGO_ACCESS_TOKEN_LIFETIME: "30"
|
||||
# Token lifetime is in minutes
|
||||
DJANGO_REFRESH_TOKEN_LIFETIME: "1440"
|
||||
DJANGO_CACHE_MAX_AGE: "3600"
|
||||
DJANGO_STALE_WHILE_REVALIDATE: "60"
|
||||
DJANGO_MANAGE_DB_PARTITIONS: "False"
|
||||
# openssl genrsa -out private.pem 2048
|
||||
DJANGO_TOKEN_SIGNING_KEY:
|
||||
# openssl rsa -in private.pem -pubout -out public.pem
|
||||
DJANGO_TOKEN_VERIFYING_KEY:
|
||||
# openssl rand -base64 32
|
||||
DJANGO_SECRETS_ENCRYPTION_KEY:
|
||||
DJANGO_BROKER_VISIBILITY_TIMEOUT: 86400
|
||||
|
||||
releaseConfigRoot: /home/prowler/.cache/pypoetry/virtualenvs/prowler-api-NnJNioq7-py3.12/lib/python3.12/site-packages/
|
||||
releaseConfigPath: prowler/config/config.yaml
|
||||
|
||||
mainConfig:
|
||||
# AWS Configuration
|
||||
aws:
|
||||
# AWS Global Configuration
|
||||
# aws.mute_non_default_regions --> Set to True to muted failed findings in non-default regions for AccessAnalyzer, GuardDuty, SecurityHub, DRS and Config
|
||||
mute_non_default_regions: False
|
||||
# If you want to mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w mutelist.yaml`:
|
||||
# Mutelist:
|
||||
# Accounts:
|
||||
# "*":
|
||||
# Checks:
|
||||
# "*":
|
||||
# Regions:
|
||||
# - "ap-southeast-1"
|
||||
# - "ap-southeast-2"
|
||||
# Resources:
|
||||
# - "*"
|
||||
|
||||
# AWS IAM Configuration
|
||||
# aws.iam_user_accesskey_unused --> CIS recommends 45 days
|
||||
max_unused_access_keys_days: 45
|
||||
# aws.iam_user_console_access_unused --> CIS recommends 45 days
|
||||
max_console_access_days: 45
|
||||
|
||||
# AWS EC2 Configuration
|
||||
# aws.ec2_elastic_ip_shodan
|
||||
# TODO: create common config
|
||||
shodan_api_key: null
|
||||
# aws.ec2_securitygroup_with_many_ingress_egress_rules --> by default is 50 rules
|
||||
max_security_group_rules: 50
|
||||
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
|
||||
max_ec2_instance_age_in_days: 180
|
||||
# aws.ec2_securitygroup_allow_ingress_from_internet_to_any_port
|
||||
# allowed network interface types for security groups open to the Internet
|
||||
ec2_allowed_interface_types:
|
||||
[
|
||||
"api_gateway_managed",
|
||||
"vpc_endpoint",
|
||||
]
|
||||
# allowed network interface owners for security groups open to the Internet
|
||||
ec2_allowed_instance_owners:
|
||||
[
|
||||
"amazon-elb"
|
||||
]
|
||||
# aws.ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports
|
||||
ec2_high_risk_ports:
|
||||
[
|
||||
25,
|
||||
110,
|
||||
135,
|
||||
143,
|
||||
445,
|
||||
3000,
|
||||
4333,
|
||||
5000,
|
||||
5500,
|
||||
8080,
|
||||
8088,
|
||||
]
|
||||
|
||||
# AWS ECS Configuration
|
||||
# aws.ecs_service_fargate_latest_platform_version
|
||||
fargate_linux_latest_version: "1.4.0"
|
||||
fargate_windows_latest_version: "1.0.0"
|
||||
|
||||
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
|
||||
# AWS SSM Configuration (aws.ssm_documents_set_as_public)
|
||||
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
|
||||
# Multi account environment: Any additional trusted account number should be added as a space separated list, e.g.
|
||||
# trusted_account_ids : ["123456789012", "098765432109", "678901234567"]
|
||||
trusted_account_ids: []
|
||||
|
||||
# AWS Cloudwatch Configuration
|
||||
# aws.cloudwatch_log_group_retention_policy_specific_days_enabled --> by default is 365 days
|
||||
log_group_retention_days: 365
|
||||
|
||||
# AWS CloudFormation Configuration
|
||||
# cloudformation_stack_cdktoolkit_bootstrap_version --> by default is 21
|
||||
recommended_cdk_bootstrap_version: 21
|
||||
|
||||
# AWS AppStream Session Configuration
|
||||
# aws.appstream_fleet_session_idle_disconnect_timeout
|
||||
max_idle_disconnect_timeout_in_seconds: 600 # 10 Minutes
|
||||
# aws.appstream_fleet_session_disconnect_timeout
|
||||
max_disconnect_timeout_in_seconds: 300 # 5 Minutes
|
||||
# aws.appstream_fleet_maximum_session_duration
|
||||
max_session_duration_seconds: 36000 # 10 Hours
|
||||
|
||||
# AWS Lambda Configuration
|
||||
# aws.awslambda_function_using_supported_runtimes
|
||||
obsolete_lambda_runtimes:
|
||||
[
|
||||
"java8",
|
||||
"go1.x",
|
||||
"provided",
|
||||
"python3.6",
|
||||
"python2.7",
|
||||
"python3.7",
|
||||
"nodejs4.3",
|
||||
"nodejs4.3-edge",
|
||||
"nodejs6.10",
|
||||
"nodejs",
|
||||
"nodejs8.10",
|
||||
"nodejs10.x",
|
||||
"nodejs12.x",
|
||||
"nodejs14.x",
|
||||
"nodejs16.x",
|
||||
"dotnet5.0",
|
||||
"dotnet7",
|
||||
"dotnetcore1.0",
|
||||
"dotnetcore2.0",
|
||||
"dotnetcore2.1",
|
||||
"dotnetcore3.1",
|
||||
"ruby2.5",
|
||||
"ruby2.7",
|
||||
]
|
||||
# aws.awslambda_function_vpc_is_in_multi_azs
|
||||
lambda_min_azs: 2
|
||||
|
||||
# AWS Organizations
|
||||
# aws.organizations_scp_check_deny_regions
|
||||
# aws.organizations_enabled_regions: [
|
||||
# "eu-central-1",
|
||||
# "eu-west-1",
|
||||
# "us-east-1"
|
||||
# ]
|
||||
organizations_enabled_regions: []
|
||||
organizations_trusted_delegated_administrators: []
|
||||
|
||||
# AWS ECR
|
||||
# aws.ecr_repositories_scan_vulnerabilities_in_latest_image
|
||||
# CRITICAL
|
||||
# HIGH
|
||||
# MEDIUM
|
||||
ecr_repository_vulnerability_minimum_severity: "MEDIUM"
|
||||
|
||||
# AWS Trusted Advisor
|
||||
# aws.trustedadvisor_premium_support_plan_subscribed
|
||||
verify_premium_support_plans: True
|
||||
|
||||
# AWS CloudTrail Configuration
|
||||
# aws.cloudtrail_threat_detection_privilege_escalation
|
||||
threat_detection_privilege_escalation_threshold: 0.2 # Percentage of actions found to decide if it is an privilege_escalation attack event, by default is 0.2 (20%)
|
||||
threat_detection_privilege_escalation_minutes: 1440 # Past minutes to search from now for privilege_escalation attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_privilege_escalation_actions:
|
||||
[
|
||||
"AddPermission",
|
||||
"AddRoleToInstanceProfile",
|
||||
"AddUserToGroup",
|
||||
"AssociateAccessPolicy",
|
||||
"AssumeRole",
|
||||
"AttachGroupPolicy",
|
||||
"AttachRolePolicy",
|
||||
"AttachUserPolicy",
|
||||
"ChangePassword",
|
||||
"CreateAccessEntry",
|
||||
"CreateAccessKey",
|
||||
"CreateDevEndpoint",
|
||||
"CreateEventSourceMapping",
|
||||
"CreateFunction",
|
||||
"CreateGroup",
|
||||
"CreateJob",
|
||||
"CreateKeyPair",
|
||||
"CreateLoginProfile",
|
||||
"CreatePipeline",
|
||||
"CreatePolicyVersion",
|
||||
"CreateRole",
|
||||
"CreateStack",
|
||||
"DeleteRolePermissionsBoundary",
|
||||
"DeleteRolePolicy",
|
||||
"DeleteUserPermissionsBoundary",
|
||||
"DeleteUserPolicy",
|
||||
"DetachRolePolicy",
|
||||
"DetachUserPolicy",
|
||||
"GetCredentialsForIdentity",
|
||||
"GetId",
|
||||
"GetPolicyVersion",
|
||||
"GetUserPolicy",
|
||||
"Invoke",
|
||||
"ModifyInstanceAttribute",
|
||||
"PassRole",
|
||||
"PutGroupPolicy",
|
||||
"PutPipelineDefinition",
|
||||
"PutRolePermissionsBoundary",
|
||||
"PutRolePolicy",
|
||||
"PutUserPermissionsBoundary",
|
||||
"PutUserPolicy",
|
||||
"ReplaceIamInstanceProfileAssociation",
|
||||
"RunInstances",
|
||||
"SetDefaultPolicyVersion",
|
||||
"UpdateAccessKey",
|
||||
"UpdateAssumeRolePolicy",
|
||||
"UpdateDevEndpoint",
|
||||
"UpdateEventSourceMapping",
|
||||
"UpdateFunctionCode",
|
||||
"UpdateJob",
|
||||
"UpdateLoginProfile",
|
||||
]
|
||||
# aws.cloudtrail_threat_detection_enumeration
|
||||
threat_detection_enumeration_threshold: 0.3 # Percentage of actions found to decide if it is an enumeration attack event, by default is 0.3 (30%)
|
||||
threat_detection_enumeration_minutes: 1440 # Past minutes to search from now for enumeration attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_enumeration_actions:
|
||||
[
|
||||
"DescribeAccessEntry",
|
||||
"DescribeAccountAttributes",
|
||||
"DescribeAvailabilityZones",
|
||||
"DescribeBundleTasks",
|
||||
"DescribeCarrierGateways",
|
||||
"DescribeClientVpnRoutes",
|
||||
"DescribeCluster",
|
||||
"DescribeDhcpOptions",
|
||||
"DescribeFlowLogs",
|
||||
"DescribeImages",
|
||||
"DescribeInstanceAttribute",
|
||||
"DescribeInstanceInformation",
|
||||
"DescribeInstanceTypes",
|
||||
"DescribeInstances",
|
||||
"DescribeInstances",
|
||||
"DescribeKeyPairs",
|
||||
"DescribeLogGroups",
|
||||
"DescribeLogStreams",
|
||||
"DescribeOrganization",
|
||||
"DescribeRegions",
|
||||
"DescribeSecurityGroups",
|
||||
"DescribeSnapshotAttribute",
|
||||
"DescribeSnapshotTierStatus",
|
||||
"DescribeSubscriptionFilters",
|
||||
"DescribeTransitGatewayMulticastDomains",
|
||||
"DescribeVolumes",
|
||||
"DescribeVolumesModifications",
|
||||
"DescribeVpcEndpointConnectionNotifications",
|
||||
"DescribeVpcs",
|
||||
"GetAccount",
|
||||
"GetAccountAuthorizationDetails",
|
||||
"GetAccountSendingEnabled",
|
||||
"GetBucketAcl",
|
||||
"GetBucketLogging",
|
||||
"GetBucketPolicy",
|
||||
"GetBucketReplication",
|
||||
"GetBucketVersioning",
|
||||
"GetCallerIdentity",
|
||||
"GetCertificate",
|
||||
"GetConsoleScreenshot",
|
||||
"GetCostAndUsage",
|
||||
"GetDetector",
|
||||
"GetEbsDefaultKmsKeyId",
|
||||
"GetEbsEncryptionByDefault",
|
||||
"GetFindings",
|
||||
"GetFlowLogsIntegrationTemplate",
|
||||
"GetIdentityVerificationAttributes",
|
||||
"GetInstances",
|
||||
"GetIntrospectionSchema",
|
||||
"GetLaunchTemplateData",
|
||||
"GetLaunchTemplateData",
|
||||
"GetLogRecord",
|
||||
"GetParameters",
|
||||
"GetPolicyVersion",
|
||||
"GetPublicAccessBlock",
|
||||
"GetQueryResults",
|
||||
"GetRegions",
|
||||
"GetSMSAttributes",
|
||||
"GetSMSSandboxAccountStatus",
|
||||
"GetSendQuota",
|
||||
"GetTransitGatewayRouteTableAssociations",
|
||||
"GetUserPolicy",
|
||||
"HeadObject",
|
||||
"ListAccessKeys",
|
||||
"ListAccounts",
|
||||
"ListAllMyBuckets",
|
||||
"ListAssociatedAccessPolicies",
|
||||
"ListAttachedUserPolicies",
|
||||
"ListClusters",
|
||||
"ListDetectors",
|
||||
"ListDomains",
|
||||
"ListFindings",
|
||||
"ListHostedZones",
|
||||
"ListIPSets",
|
||||
"ListIdentities",
|
||||
"ListInstanceProfiles",
|
||||
"ListObjects",
|
||||
"ListOrganizationalUnitsForParent",
|
||||
"ListOriginationNumbers",
|
||||
"ListPolicyVersions",
|
||||
"ListRoles",
|
||||
"ListRoles",
|
||||
"ListRules",
|
||||
"ListServiceQuotas",
|
||||
"ListSubscriptions",
|
||||
"ListTargetsByRule",
|
||||
"ListTopics",
|
||||
"ListUsers",
|
||||
"LookupEvents",
|
||||
"Search",
|
||||
]
|
||||
# aws.cloudtrail_threat_detection_llm_jacking
|
||||
threat_detection_llm_jacking_threshold: 0.4 # Percentage of actions found to decide if it is an LLM Jacking attack event, by default is 0.4 (40%)
|
||||
threat_detection_llm_jacking_minutes: 1440 # Past minutes to search from now for LLM Jacking attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_llm_jacking_actions:
|
||||
[
|
||||
"PutUseCaseForModelAccess", # Submits a use case for model access, providing justification (Write).
|
||||
"PutFoundationModelEntitlement", # Grants entitlement for accessing a foundation model (Write).
|
||||
"PutModelInvocationLoggingConfiguration", # Configures logging for model invocations (Write).
|
||||
"CreateFoundationModelAgreement", # Creates a new agreement to use a foundation model (Write).
|
||||
"InvokeModel", # Invokes a specified Bedrock model for inference using provided prompt and parameters (Read).
|
||||
"InvokeModelWithResponseStream", # Invokes a Bedrock model for inference with real-time token streaming (Read).
|
||||
"GetUseCaseForModelAccess", # Retrieves an existing use case for model access (Read).
|
||||
"GetModelInvocationLoggingConfiguration", # Fetches the logging configuration for model invocations (Read).
|
||||
"GetFoundationModelAvailability", # Checks the availability of a foundation model for use (Read).
|
||||
"ListFoundationModelAgreementOffers", # Lists available agreement offers for accessing foundation models (List).
|
||||
"ListFoundationModels", # Lists the available foundation models in Bedrock (List).
|
||||
"ListProvisionedModelThroughputs", # Lists the provisioned throughput for previously created models (List).
|
||||
]
|
||||
|
||||
# AWS RDS Configuration
|
||||
# aws.rds_instance_backup_enabled
|
||||
# Whether to check RDS instance replicas or not
|
||||
check_rds_instance_replicas: False
|
||||
|
||||
# AWS ACM Configuration
|
||||
# aws.acm_certificates_expiration_check
|
||||
days_to_expire_threshold: 7
|
||||
# aws.acm_certificates_with_secure_key_algorithms
|
||||
insecure_key_algorithms:
|
||||
[
|
||||
"RSA-1024",
|
||||
"P-192",
|
||||
"SHA-1",
|
||||
]
|
||||
|
||||
# AWS EKS Configuration
|
||||
# aws.eks_control_plane_logging_all_types_enabled
|
||||
# EKS control plane logging types that must be enabled
|
||||
eks_required_log_types:
|
||||
[
|
||||
"api",
|
||||
"audit",
|
||||
"authenticator",
|
||||
"controllerManager",
|
||||
"scheduler",
|
||||
]
|
||||
|
||||
# aws.eks_cluster_uses_a_supported_version
|
||||
# EKS clusters must be version 1.28 or higher
|
||||
eks_cluster_oldest_version_supported: "1.28"
|
||||
|
||||
# AWS CodeBuild Configuration
|
||||
# aws.codebuild_project_no_secrets_in_variables
|
||||
# CodeBuild sensitive variables that are excluded from the check
|
||||
excluded_sensitive_environment_variables:
|
||||
[
|
||||
|
||||
]
|
||||
|
||||
# AWS ELB Configuration
|
||||
# aws.elb_is_in_multiple_az
|
||||
# Minimum number of Availability Zones that an CLB must be in
|
||||
elb_min_azs: 2
|
||||
|
||||
# AWS ELBv2 Configuration
|
||||
# aws.elbv2_is_in_multiple_az
|
||||
# Minimum number of Availability Zones that an ELBv2 must be in
|
||||
elbv2_min_azs: 2
|
||||
|
||||
|
||||
# AWS Secrets Configuration
|
||||
# Patterns to ignore in the secrets checks
|
||||
secrets_ignore_patterns: []
|
||||
|
||||
# AWS Secrets Manager Configuration
|
||||
# aws.secretsmanager_secret_unused
|
||||
# Maximum number of days a secret can be unused
|
||||
max_days_secret_unused: 90
|
||||
|
||||
# aws.secretsmanager_secret_rotated_periodically
|
||||
# Maximum number of days a secret should be rotated
|
||||
max_days_secret_unrotated: 90
|
||||
|
||||
# AWS Kinesis Configuration
|
||||
# Minimum retention period in hours for Kinesis streams
|
||||
min_kinesis_stream_retention_hours: 168 # 7 days
|
||||
|
||||
|
||||
# Azure Configuration
|
||||
azure:
|
||||
# Azure Network Configuration
|
||||
# azure.network_public_ip_shodan
|
||||
# TODO: create common config
|
||||
shodan_api_key: null
|
||||
|
||||
# Azure App Service
|
||||
# azure.app_ensure_php_version_is_latest
|
||||
php_latest_version: "8.2"
|
||||
# azure.app_ensure_python_version_is_latest
|
||||
python_latest_version: "3.12"
|
||||
# azure.app_ensure_java_version_is_latest
|
||||
java_latest_version: "17"
|
||||
|
||||
# Azure SQL Server
|
||||
# azure.sqlserver_minimal_tls_version
|
||||
recommended_minimal_tls_versions:
|
||||
[
|
||||
"1.2",
|
||||
"1.3",
|
||||
]
|
||||
|
||||
# GCP Configuration
|
||||
gcp:
|
||||
# GCP Compute Configuration
|
||||
# gcp.compute_public_address_shodan
|
||||
shodan_api_key: null
|
||||
|
||||
# Kubernetes Configuration
|
||||
kubernetes:
|
||||
# Kubernetes API Server
|
||||
# kubernetes.apiserver_audit_log_maxbackup_set
|
||||
audit_log_maxbackup: 10
|
||||
# kubernetes.apiserver_audit_log_maxsize_set
|
||||
audit_log_maxsize: 100
|
||||
# kubernetes.apiserver_audit_log_maxage_set
|
||||
audit_log_maxage: 30
|
||||
# kubernetes.apiserver_strong_ciphers_only
|
||||
apiserver_strong_ciphers:
|
||||
[
|
||||
"TLS_AES_128_GCM_SHA256",
|
||||
"TLS_AES_256_GCM_SHA384",
|
||||
"TLS_CHACHA20_POLY1305_SHA256",
|
||||
]
|
||||
# Kubelet
|
||||
# kubernetes.kubelet_strong_ciphers_only
|
||||
kubelet_strong_ciphers:
|
||||
[
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
]
|
||||
|
||||
|
||||
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# This is for setting Kubernetes Annotations to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
podAnnotations: {}
|
||||
# This is for setting Kubernetes Labels to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
podLabels: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
|
||||
service:
|
||||
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
|
||||
type: ClusterIP
|
||||
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
|
||||
port: 80
|
||||
|
||||
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
|
||||
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes: []
|
||||
# - name: foo
|
||||
# secret:
|
||||
# secretName: mysecret
|
||||
# optional: false
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts: []
|
||||
# - name: foo
|
||||
# mountPath: "/etc/foo"
|
||||
# readOnly: true
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -39,4 +39,3 @@ spec:
|
||||
path: {{ $value }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -0,0 +1,6 @@
|
||||
apiVersion: v2
|
||||
name: prowler-ui
|
||||
description: A Helm chart for Kubernetes
|
||||
type: application
|
||||
version: 0.1.0
|
||||
appVersion: "5.1.1"
|
||||
@@ -0,0 +1,22 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prowler-ui.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prowler-ui.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prowler-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prowler-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
@@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "prowler-ui.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "prowler-ui.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "prowler-ui.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "prowler-ui.labels" -}}
|
||||
helm.sh/chart: {{ include "prowler-ui.chart" . }}
|
||||
{{ include "prowler-ui.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "prowler-ui.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "prowler-ui.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "prowler-ui.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "prowler-ui.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,72 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "prowler-ui.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "prowler-ui.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "prowler-ui.fullname" $ }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml .Values.readinessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,43 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.ingress.className }}
|
||||
ingressClassName: {{ . }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- with .pathType }}
|
||||
pathType: {{ . }}
|
||||
{{- end }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "prowler-ui.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- range $k, $v := .Values.secrets }}
|
||||
{{ $k }}: {{ $v | toString | b64enc | quote }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
{{- include "prowler-ui.selectorLabels" . | nindent 4 }}
|
||||
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,132 @@
|
||||
# Default values for prowler-ui.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
|
||||
replicaCount: 1
|
||||
|
||||
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
|
||||
image:
|
||||
repository: prowlercloud/prowler-ui
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
secrets:
|
||||
SITE_URL: http://localhost:3000
|
||||
API_BASE_URL: http://prowler-api:8080/api/v1
|
||||
NEXT_PUBLIC_API_DOCS_URL: http://prowler-api:8080/api/v1/docs
|
||||
AUTH_TRUST_HOST: True
|
||||
UI_PORT: 3000
|
||||
# openssl rand -base64 32
|
||||
AUTH_SECRET:
|
||||
|
||||
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# This is for setting Kubernetes Annotations to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
podAnnotations: {}
|
||||
# This is for setting Kubernetes Labels to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
podLabels: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
|
||||
service:
|
||||
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
|
||||
type: ClusterIP
|
||||
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
|
||||
port: 3000
|
||||
|
||||
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
|
||||
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes: []
|
||||
# - name: foo
|
||||
# secret:
|
||||
# secretName: mysecret
|
||||
# optional: false
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts: []
|
||||
# - name: foo
|
||||
# mountPath: "/etc/foo"
|
||||
# readOnly: true
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -0,0 +1,25 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -532,8 +532,8 @@ def get_bar_graph(df, column_name):
|
||||
|
||||
# Cut the text if it is too long
|
||||
for i in range(len(colums)):
|
||||
if len(colums[i]) > 15:
|
||||
colums[i] = colums[i][:15] + "..."
|
||||
if len(colums[i]) > 43:
|
||||
colums[i] = colums[i][:43] + "..."
|
||||
|
||||
fig = px.bar(
|
||||
df,
|
||||
|
||||
@@ -56,7 +56,6 @@ from prowler.providers.<provider>.lib.service.service import ServiceParentClass
|
||||
|
||||
|
||||
# Create a class for the Service
|
||||
################## <Service>
|
||||
class <Service>(ServiceParentClass):
|
||||
def __init__(self, provider):
|
||||
# Call Service Parent Class __init__
|
||||
|
||||
@@ -669,8 +669,9 @@ class Test_app_ensure_http_is_redirected_to_https:
|
||||
# Create the custom App object to be tested
|
||||
app_client.apps = {
|
||||
AZURE_SUBSCRIPTION_ID: {
|
||||
"app_id-1": WebApp(
|
||||
resource_id: WebApp(
|
||||
resource_id=resource_id,
|
||||
name="app_id-1",
|
||||
auth_enabled=True,
|
||||
configurations=mock.MagicMock(),
|
||||
client_cert_mode="Ignore",
|
||||
@@ -716,8 +717,9 @@ class Test_app_ensure_http_is_redirected_to_https:
|
||||
|
||||
app_client.apps = {
|
||||
AZURE_SUBSCRIPTION_ID: {
|
||||
"app_id-1": WebApp(
|
||||
resource_id: WebApp(
|
||||
resource_id=resource_id,
|
||||
name="app_id-1",
|
||||
auth_enabled=True,
|
||||
configurations=mock.MagicMock(),
|
||||
client_cert_mode="Ignore",
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow",
|
||||
"Sid": "AllowMoreReadForProwler"
|
||||
"Sid": "AllowMoreReadOnly"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
@@ -60,9 +60,10 @@
|
||||
"apigateway:GET"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:apigateway:*::/restapis/*",
|
||||
"arn:aws:apigateway:*::/apis/*"
|
||||
]
|
||||
"arn:*:apigateway:*::/restapis/*",
|
||||
"arn:*:apigateway:*::/apis/*"
|
||||
],
|
||||
"Sid": "AllowAPIGatewayReadOnly"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"securityhub:BatchImportFindings",
|
||||
"securityhub:GetFindings"
|
||||
],
|
||||
"Effect": "Allow",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,127 @@
|
||||
AWSTemplateFormatVersion: "2010-09-09"
|
||||
|
||||
# You can invoke CloudFormation and pass the principal ARN from a command line like this:
|
||||
# aws cloudformation create-stack \
|
||||
# --capabilities CAPABILITY_IAM --capabilities CAPABILITY_NAMED_IAM \
|
||||
# --template-body "file://prowler-pro-saas-scan-role.yaml" \
|
||||
# --stack-name "ProwlerProSaaSScanRole" \
|
||||
# --parameters "ParameterKey=ExternalId,ParameterValue=ProvidedExternalID"
|
||||
|
||||
Description: |
|
||||
This template creates the ProwlerScan IAM Role in this account with
|
||||
all read-only permissions to scan your account for security issues.
|
||||
Contains two AWS managed policies (SecurityAudit and ViewOnlyAccess) and an inline policy.
|
||||
It sets the trust policy on that IAM Role to permit Prowler to assume that role.
|
||||
Parameters:
|
||||
ExternalId:
|
||||
Description: |
|
||||
This is the External ID that Prowler will use to assume the role ProwlerScan IAM Role.
|
||||
Type: String
|
||||
MinLength: 1
|
||||
AllowedPattern: ".+"
|
||||
ConstraintDescription: "ExternalId must not be empty."
|
||||
AccountId:
|
||||
Description: |
|
||||
AWS Account ID that will assume the role created, if you are deploying this template to be used in Prowler Cloud please do not edit this.
|
||||
Type: String
|
||||
Default: "232136659152"
|
||||
MinLength: 12
|
||||
MaxLength: 12
|
||||
AllowedPattern: "[0-9]{12}"
|
||||
ConstraintDescription: "AccountId must be a valid AWS Account ID."
|
||||
IAMPrincipal:
|
||||
Description: |
|
||||
The IAM principal type and name that will be allowed to assume the role created, leave an * for all the IAM principals in your AWS account. If you are deploying this template to be used in Prowler Cloud please do not edit this.
|
||||
Type: String
|
||||
Default: role/prowler*
|
||||
|
||||
Resources:
|
||||
ProwlerScan:
|
||||
Type: AWS::IAM::Role
|
||||
Properties:
|
||||
RoleName: ProwlerScan
|
||||
AssumeRolePolicyDocument:
|
||||
Version: "2012-10-17"
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Principal:
|
||||
AWS: !Sub "arn:${AWS::Partition}:iam::${AccountId}:root"
|
||||
Action: "sts:AssumeRole"
|
||||
Condition:
|
||||
StringEquals:
|
||||
"sts:ExternalId": !Sub ${ExternalId}
|
||||
StringLike:
|
||||
"aws:PrincipalArn": !Sub "arn:${AWS::Partition}:iam::${AccountId}:${IAMPrincipal}"
|
||||
MaxSessionDuration: 3600
|
||||
ManagedPolicyArns:
|
||||
- "arn:aws:iam::aws:policy/SecurityAudit"
|
||||
- "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"
|
||||
Policies:
|
||||
- PolicyName: ProwlerScan
|
||||
PolicyDocument:
|
||||
Version: "2012-10-17"
|
||||
Statement:
|
||||
- Sid: AllowMoreReadOnly
|
||||
Effect: Allow
|
||||
Action:
|
||||
- "account:Get*"
|
||||
- "appstream:Describe*"
|
||||
- "appstream:List*"
|
||||
- "backup:List*"
|
||||
- "bedrock:List*"
|
||||
- "bedrock:Get*"
|
||||
- "cloudtrail:GetInsightSelectors"
|
||||
- "codeartifact:List*"
|
||||
- "codebuild:BatchGet*"
|
||||
- "codebuild:ListReportGroups"
|
||||
- "cognito-idp:GetUserPoolMfaConfig"
|
||||
- "dlm:Get*"
|
||||
- "drs:Describe*"
|
||||
- "ds:Get*"
|
||||
- "ds:Describe*"
|
||||
- "ds:List*"
|
||||
- "dynamodb:GetResourcePolicy"
|
||||
- "ec2:GetEbsEncryptionByDefault"
|
||||
- "ec2:GetSnapshotBlockPublicAccessState"
|
||||
- "ec2:GetInstanceMetadataDefaults"
|
||||
- "ecr:Describe*"
|
||||
- "ecr:GetRegistryScanningConfiguration"
|
||||
- "elasticfilesystem:DescribeBackupPolicy"
|
||||
- "glue:GetConnections"
|
||||
- "glue:GetSecurityConfiguration*"
|
||||
- "glue:SearchTables"
|
||||
- "lambda:GetFunction*"
|
||||
- "logs:FilterLogEvents"
|
||||
- "lightsail:GetRelationalDatabases"
|
||||
- "macie2:GetMacieSession"
|
||||
- "macie2:GetAutomatedDiscoveryConfiguration"
|
||||
- "s3:GetAccountPublicAccessBlock"
|
||||
- "shield:DescribeProtection"
|
||||
- "shield:GetSubscriptionState"
|
||||
- "securityhub:BatchImportFindings"
|
||||
- "securityhub:GetFindings"
|
||||
- "servicecatalog:Describe*"
|
||||
- "servicecatalog:List*"
|
||||
- "ssm:GetDocument"
|
||||
- "ssm-incidents:List*"
|
||||
- "states:ListTagsForResource"
|
||||
- "support:Describe*"
|
||||
- "tag:GetTagKeys"
|
||||
- "wellarchitected:List*"
|
||||
Resource: "*"
|
||||
- Sid: AllowAPIGatewayReadOnly
|
||||
Effect: Allow
|
||||
Action:
|
||||
- "apigateway:GET"
|
||||
Resource:
|
||||
- "arn:*:apigateway:*::/restapis/*"
|
||||
- "arn:*:apigateway:*::/apis/*"
|
||||
Tags:
|
||||
- Key: "Service"
|
||||
Value: "https://prowler.com"
|
||||
- Key: "Support"
|
||||
Value: "support@prowler.com"
|
||||
- Key: "CloudFormation"
|
||||
Value: "true"
|
||||
- Key: "Name"
|
||||
Value: "ProwlerScan"
|
||||
@@ -0,0 +1,10 @@
|
||||
## Deployment using Terraform
|
||||
|
||||
To deploy the Prowler Scan Role in order to allow to scan you AWS account from Prowler, please run the following commands in your terminal:
|
||||
1. `terraform init`
|
||||
2. `terraform plan`
|
||||
3. `terraform apply`
|
||||
|
||||
During the `terraform plan` and `terraform apply` steps you will be asked for an External ID to be configured in the `ProwlerScan` IAM role.
|
||||
|
||||
> Note that Terraform will use the AWS credentials of your default profile.
|
||||
@@ -0,0 +1,111 @@
|
||||
# Variables
|
||||
###################################
|
||||
variable "external_id" {
|
||||
type = string
|
||||
description = "This is the External ID that Prowler will use to assume the role ProwlerScan IAM Role."
|
||||
|
||||
validation {
|
||||
condition = length(var.external_id) > 0
|
||||
error_message = "ExternalId must not be empty."
|
||||
}
|
||||
}
|
||||
|
||||
variable "account_id" {
|
||||
type = string
|
||||
description = "AWS Account ID that will assume the role created, if you are deploying this template to be used in Prowler Cloud please do not edit this."
|
||||
default = "232136659152"
|
||||
|
||||
validation {
|
||||
condition = length(var.account_id) == 12
|
||||
error_message = "AccountId must be a valid AWS Account ID."
|
||||
}
|
||||
}
|
||||
|
||||
variable "iam_principal" {
|
||||
type = string
|
||||
description = "The IAM principal type and name that will be allowed to assume the role created, leave an * for all the IAM principals in your AWS account. If you are deploying this template to be used in Prowler Cloud please do not edit this."
|
||||
default = "role/prowler*"
|
||||
}
|
||||
|
||||
##### PLEASE, DO NOT EDIT BELOW THIS LINE #####
|
||||
|
||||
|
||||
# Terraform Provider Configuration
|
||||
###################################
|
||||
terraform {
|
||||
required_version = ">= 1.5"
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 5.83"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
default_tags {
|
||||
tags = {
|
||||
"Name" = "ProwlerScan",
|
||||
"Terraform" = "true",
|
||||
"Service" = "https://prowler.com",
|
||||
"Support" = "support@prowler.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_partition" "current" {}
|
||||
|
||||
|
||||
# IAM Role
|
||||
###################################
|
||||
data "aws_iam_policy_document" "prowler_assume_role_policy" {
|
||||
statement {
|
||||
actions = ["sts:AssumeRole"]
|
||||
principals {
|
||||
type = "AWS"
|
||||
identifiers = ["arn:${data.aws_partition.current.partition}:iam::${var.account_id}:root"]
|
||||
}
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "sts:ExternalId"
|
||||
values = [
|
||||
var.external_id,
|
||||
]
|
||||
}
|
||||
condition {
|
||||
test = "StringLike"
|
||||
variable = "aws:PrincipalArn"
|
||||
values = [
|
||||
"arn:${data.aws_partition.current.partition}:iam::${var.account_id}:${var.iam_principal}",
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "prowler_scan" {
|
||||
name = "ProwlerScan"
|
||||
assume_role_policy = data.aws_iam_policy_document.prowler_assume_role_policy.json
|
||||
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "prowler_scan_policy" {
|
||||
name = "ProwlerScan"
|
||||
description = "Prowler Scan Policy"
|
||||
policy = file("../../prowler-additions-policy.json")
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "prowler_scan_policy_attachment" {
|
||||
role = aws_iam_role.prowler_scan.name
|
||||
policy_arn = aws_iam_policy.prowler_scan_policy.arn
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "prowler_scan_securityaudit_policy_attachment" {
|
||||
role = aws_iam_role.prowler_scan.name
|
||||
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/SecurityAudit"
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "prowler_scan_viewonly_policy_attachment" {
|
||||
role = aws_iam_role.prowler_scan.name
|
||||
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/job-function/ViewOnlyAccess"
|
||||
}
|
||||
Generated
+942
-919
File diff suppressed because it is too large
Load Diff
+9
-6
@@ -327,18 +327,21 @@ def prowler():
|
||||
# Outputs
|
||||
# TODO: this part is needed since the checks generates a Check_Report_XXX and the output uses Finding
|
||||
# This will be refactored for the outputs generate directly the Finding
|
||||
finding_outputs = [
|
||||
Finding.generate_output(global_provider, finding, output_options)
|
||||
for finding in findings
|
||||
]
|
||||
finding_outputs = []
|
||||
for finding in findings:
|
||||
try:
|
||||
finding_outputs.append(
|
||||
Finding.generate_output(global_provider, finding, output_options)
|
||||
)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
generated_outputs = {"regular": [], "compliance": []}
|
||||
|
||||
if args.output_formats:
|
||||
for mode in args.output_formats:
|
||||
filename = (
|
||||
f"{output_options.output_directory}/"
|
||||
f"{output_options.output_filename}"
|
||||
f"{output_options.output_directory}/{output_options.output_filename}"
|
||||
)
|
||||
if mode == "csv":
|
||||
csv_output = CSV(
|
||||
|
||||
@@ -28,7 +28,9 @@
|
||||
"Service": "ebs"
|
||||
}
|
||||
],
|
||||
"Checks": []
|
||||
"Checks": [
|
||||
"ec2_ebs_volume_snapshots_exists"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.0.3",
|
||||
@@ -42,7 +44,8 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"ec2_ebs_default_encryption"
|
||||
"ec2_ebs_default_encryption",
|
||||
"ec2_ebs_volume_encryption"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -87,7 +90,9 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"iam_user_mfa_enabled_console_access"
|
||||
"iam_user_mfa_enabled_console_access",
|
||||
"iam_user_hardware_mfa_enabled",
|
||||
"iam_root_mfa_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -102,7 +107,9 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"iam_user_mfa_enabled_console_access"
|
||||
"iam_user_mfa_enabled_console_access",
|
||||
"iam_user_hardware_mfa_enabled",
|
||||
"iam_root_mfa_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -117,7 +124,9 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"iam_root_mfa_enabled"
|
||||
"iam_root_mfa_enabled",
|
||||
"iam_root_hardware_mfa_enabled",
|
||||
"iam_user_mfa_enabled_console_access"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -162,7 +171,10 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"rds_instance_no_public_access"
|
||||
"rds_instance_no_public_access",
|
||||
"s3_bucket_public_access",
|
||||
"s3_bucket_public_list_acl",
|
||||
"s3_account_level_public_access_blocks"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -192,7 +204,8 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"rds_instance_storage_encrypted"
|
||||
"rds_instance_storage_encrypted",
|
||||
"rds_instance_transport_encrypted"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -455,7 +455,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
|
||||
@@ -476,7 +477,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
|
||||
@@ -497,7 +499,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -518,7 +521,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -540,7 +544,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -561,7 +566,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
|
||||
@@ -578,11 +584,13 @@
|
||||
"Id": "2.3.1",
|
||||
"Description": "Ensure that encryption is enabled for RDS Instances",
|
||||
"Checks": [
|
||||
"rds_instance_storage_encrypted"
|
||||
"rds_instance_storage_encrypted",
|
||||
"rds_instance_transport_encrypted"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
|
||||
@@ -455,7 +455,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
|
||||
@@ -476,7 +477,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
|
||||
@@ -497,7 +499,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -518,7 +521,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -540,7 +544,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -561,7 +566,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
|
||||
@@ -578,11 +584,13 @@
|
||||
"Id": "2.3.1",
|
||||
"Description": "Ensure that encryption is enabled for RDS Instances",
|
||||
"Checks": [
|
||||
"rds_instance_storage_encrypted"
|
||||
"rds_instance_storage_encrypted",
|
||||
"rds_instance_transport_encrypted"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
@@ -603,7 +611,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
|
||||
@@ -624,7 +633,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.",
|
||||
@@ -645,7 +655,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.4 Elastic File System (EFS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.4 Elastic File System (EFS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",
|
||||
|
||||
@@ -303,7 +303,9 @@
|
||||
{
|
||||
"Id": "1.22",
|
||||
"Description": "Ensure access to AWSCloudShellFullAccess is restricted",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"iam_policy_cloudshell_admin_not_attached"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
@@ -474,7 +476,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
|
||||
@@ -491,11 +494,13 @@
|
||||
"Id": "2.1.2",
|
||||
"Description": "Ensure MFA Delete is enabled on S3 buckets",
|
||||
"Checks": [
|
||||
"s3_bucket_no_mfa_delete"
|
||||
"s3_bucket_no_mfa_delete",
|
||||
"cloudtrail_bucket_requires_mfa_delete"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -516,7 +521,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -538,7 +544,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -559,7 +566,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
|
||||
@@ -576,11 +584,13 @@
|
||||
"Id": "2.3.1",
|
||||
"Description": "Ensure that encryption is enabled for RDS Instances",
|
||||
"Checks": [
|
||||
"rds_instance_storage_encrypted"
|
||||
"rds_instance_storage_encrypted",
|
||||
"rds_instance_transport_encrypted"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
@@ -601,7 +611,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
|
||||
@@ -622,7 +633,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.",
|
||||
@@ -643,7 +655,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.4 Elastic File System (EFS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.4 Elastic File System (EFS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",
|
||||
@@ -1338,7 +1351,8 @@
|
||||
"Id": "5.6",
|
||||
"Description": "Ensure that EC2 Metadata Service only allows IMDSv2",
|
||||
"Checks": [
|
||||
"ec2_instance_imdsv2_enabled"
|
||||
"ec2_instance_imdsv2_enabled",
|
||||
"ec2_instance_account_imdsv2_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
|
||||
@@ -474,7 +474,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
|
||||
@@ -495,7 +496,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -516,7 +518,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -538,7 +541,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -559,7 +563,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
|
||||
@@ -580,7 +585,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
@@ -601,7 +607,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
|
||||
@@ -622,7 +629,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to anypublicly accessible RDS database instance, you must disable the database PubliclyAccessible flag and update the VPC security group associated with the instance",
|
||||
@@ -643,7 +651,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.4 Elastic File System (EFS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.4 Elastic File System (EFS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Security defaults in Azure Active Directory (Azure AD) make it easier to be secure and help protect your organization. Security defaults contain preconfigured security settings for common attacks. Security defaults is available to everyone. The goal is to ensure that all organizations have a basic level of security",
|
||||
@@ -34,7 +35,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable multi-factor authentication for all roles, groups, and users that have write access or permissions to Azure resources. These include custom created objects or built-in roles such as; • Service Co-Administrators • Subscription Owners • Contributors",
|
||||
@@ -56,7 +58,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable multi-factor authentication for all non-privileged users.",
|
||||
@@ -76,7 +79,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Do not allow users to remember multi-factor authentication on devices.",
|
||||
@@ -98,7 +102,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Azure Active Directory Conditional Access allows an organization to configure Named locations and configure whether those locations are trusted or untrusted. These settings provide organizations the means to specify Geographical locations for use in conditional access policies, or define actual IP addresses and IP ranges and whether or not those IP addresses and/or ranges are trusted by the organization.",
|
||||
@@ -118,7 +123,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "CAUTION: If these policies are created without first auditing and testing the result, misconfiguration can potentially lock out administrators or create undesired access issues. Conditional Access Policies can be used to block access from geographic locations that are deemed out-of-scope for your organization or application. The scope and variables for this policy should be carefully examined and defined.",
|
||||
@@ -138,7 +144,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
|
||||
@@ -158,7 +165,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
|
||||
@@ -178,7 +186,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
|
||||
@@ -198,7 +207,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
|
||||
@@ -220,7 +230,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Require administrators or appropriately delegated users to create new tenants.",
|
||||
@@ -240,7 +250,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This recommendation extends guest access review by utilizing the Azure AD Privileged Identity Management feature provided in Azure AD Premium P2. Azure AD is extended to include Azure AD B2B collaboration, allowing you to invite people from outside your organization to be guest users in your cloud account and sign in with their own work, school, or social identities. Guest users allow you to share your company's applications and services with users from any other organization, while maintaining control over your own corporate data. Work with external partners, large or small, even if they don't have Azure AD or an IT department. A simple invitation and redemption process lets partners use their own credentials to access your company's resources a a guest user.",
|
||||
@@ -260,7 +270,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Azure AD is extended to include Azure AD B2B collaboration, allowing you to invite people from outside your organization to be guest users in your cloud account and sign in with their own work, school, or social identities. Guest users allow you to share your company's applications and services with users from any other organization, while maintaining control over your own corporate data. Work with external partners, large or small, even if they don't have Azure AD or an IT department. A simple invitation and redemption process lets partners use their own credentials to access your company's resources as a guest user. Guest users in every subscription should be review on a regular basis to ensure that inactive and unneeded accounts are removed.",
|
||||
@@ -280,7 +290,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensures that two alternate forms of identification are provided before allowing a password reset.",
|
||||
@@ -300,7 +310,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Azure provides a Global Banned Password policy that applies to Azure administrative and normal user accounts. This is not applied to user accounts that are synced from an on-premise Active Directory unless Azure AD Connect is used and you enable EnforceCloudPasswordPolicyForPasswordSyncedUsers. Please see the list in default values on the specifics of this policy. To further password security, it is recommended to further define a custom banned password policy.",
|
||||
@@ -320,7 +330,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that the number of days before users are asked to re-confirm their authentication information is not set to 0.",
|
||||
@@ -340,7 +350,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that users are notified on their primary and secondary emails on password resets.",
|
||||
@@ -360,7 +370,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that all Global Administrators are notified if any other administrator resets their password.",
|
||||
@@ -382,7 +392,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Require administrators to provide consent for applications before use.",
|
||||
@@ -404,7 +414,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Allow users to provide consent for selected permissions when a request is coming from a verified publisher.",
|
||||
@@ -424,7 +434,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Require administrators to provide consent for the apps before use.",
|
||||
@@ -446,7 +456,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Require administrators or appropriately delegated users to register third-party applications.",
|
||||
@@ -468,7 +478,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Limit guest user permissions.",
|
||||
@@ -490,7 +500,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict invitations to users with specific administrative roles only.",
|
||||
@@ -510,7 +520,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict access to the Azure AD administration portal to administrators only. NOTE: This only affects access to the Azure AD administrator's web portal. This setting does not prohibit privileged users from using other methods such as Rest API or Powershell to obtain sensitive information from Azure AD.",
|
||||
@@ -530,7 +540,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restricts group creation to administrators with permissions only.",
|
||||
@@ -552,7 +562,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict security group creation to administrators only.",
|
||||
@@ -572,7 +582,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict security group management to administrators only.",
|
||||
@@ -594,7 +604,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict Microsoft 365 group creation to administrators only.",
|
||||
@@ -614,7 +624,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Joining or registering devices to the active directory should require Multi-factor authentication.",
|
||||
@@ -636,7 +646,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The principle of least privilege should be followed and only necessary privileges should be assigned instead of allowing full administrative access.",
|
||||
@@ -658,7 +668,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Resource locking is a powerful protection mechanism that can prevent inadvertent modification/deletion of resources within Azure subscriptions/Resource Groups and is a recommended NIST configuration.",
|
||||
@@ -678,7 +688,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Users who are set as subscription owners are able to make administrative changes to the subscriptions and move them into and out of Azure Active Directories.",
|
||||
@@ -700,7 +710,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Servers enables threat detection for Servers, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -722,7 +733,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for App Service enables threat detection for App Service, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -744,7 +756,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Databases enables threat detection for the instances running your database software. This provides threat intelligence, anomaly detection, and behavior analytics in the Azure Microsoft Defender for Cloud. Instead of being enabled on services like Platform as a Service (PaaS), this implementation will run within your instances as Infrastructure as a Service (IaaS) on the Operating Systems hosting your databases.",
|
||||
@@ -766,7 +779,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Azure SQL Databases enables threat detection for Azure SQL database servers, providing threat intelligence, anomaly detection, andbehavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -788,7 +802,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for SQL servers on machines enables threat detection for SQL servers on machines, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -810,7 +825,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Open-source relational databases enables threat detection for Open-source relational databases, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -832,7 +848,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Storage enables threat detection for Storage, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -854,7 +871,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Containers enables threat detection for Container Registries including Kubernetes, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -876,7 +894,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Defender for Azure Cosmos DB scans all incoming network requests for threats to your Azure Cosmos DB resources.",
|
||||
@@ -898,7 +917,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Key Vault enables threat detection for Key Vault, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -920,7 +940,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Defender for DNS scans all network traffic exiting from within a subscription.",
|
||||
@@ -942,7 +963,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Defender for Resource Manager scans incoming administrative requests to change your infrastructure from both CLI and the Azure portal.",
|
||||
@@ -964,7 +986,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that the latest OS patches for all virtual machines are applied.",
|
||||
@@ -986,7 +1009,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "None of the settings offered by ASC Default policy should be set to effect Disabled.",
|
||||
@@ -1008,7 +1032,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable automatic provisioning of the monitoring agent to collect security data.",
|
||||
@@ -1030,7 +1055,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable automatic provisioning of vulnerability assessment for machines on both Azure and hybrid (Arc enabled) machines.",
|
||||
@@ -1050,7 +1076,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable automatic provisioning of the Microsoft Defender for Containers components.",
|
||||
@@ -1072,7 +1099,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable security alert emails to subscription owners.",
|
||||
@@ -1094,7 +1122,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Microsoft Defender for Cloud emails the subscription owners whenever a high-severity alert is triggered for their subscription. You should provide a security contact email address as an additional email address.",
|
||||
@@ -1116,7 +1145,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enables emailing security alerts to the subscription owner or other designated security contact.",
|
||||
@@ -1138,7 +1168,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "This integration setting enables Microsoft Defender for Cloud Apps (formerly 'Microsoft Cloud App Security' or 'MCAS' - see additional info) to communicate with Microsoft Defender for Cloud.",
|
||||
@@ -1160,7 +1191,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This integration setting enables Microsoft Defender for Endpoint (formerly 'Advanced Threat Protection' or 'ATP' or 'WDATP' - see additional info) to communicate with Microsoft Defender for Cloud. IMPORTANT: When enabling integration between DfE & DfC it needs to be taken into account that this will have some side effects that may be undesirable. 1. For server 2019 & above if defender is installed (default for these server SKU's) this will trigger a deployment of the new unified agent and link to any of the extended configuration in the Defender portal. 2. If the new unified agent is required for server SKU's of Win 2016 or Linux and lower there is additional integration that needs to be switched on and agents need to be aligned.",
|
||||
@@ -1182,7 +1214,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2 Microsoft Defender for IoT",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.2 Microsoft Defender for IoT",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Defender for IoT acts as a central security hub for IoT devices within your organization.",
|
||||
@@ -1524,7 +1557,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable auditing on SQL Servers.",
|
||||
@@ -1546,7 +1580,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that no SQL Databases allow ingress from 0.0.0.0/0 (ANY IP).",
|
||||
@@ -1568,7 +1603,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Transparent Data Encryption (TDE) with Customer-managed key support provides increased transparency and control over the TDE Protector, increased security with an HSM-backed external service, and promotion of separation of duties. With TDE, data is encrypted at rest with a symmetric key (called the database encryption key) stored in the database or data warehouse distribution. To protect this data encryption key (DEK) in the past, only a certificate that the Azure SQL Service managed could be used. Now, with Customer-managed key support for TDE, the DEK can be protected with an asymmetric key that is stored in the Azure Key Vault. The Azure Key Vault is a highly available and scalable cloud-based key store which offers central key management, leverages FIPS 140-2 Level 2 validated hardware security modules (HSMs), and allows separation of management of keys and data for additional security. Based on business needs or criticality of data/databases hosted on a SQL server, it is recommended that the TDE protector is encrypted by a key that is managed by the data owner (Customer-managed key).",
|
||||
@@ -1590,7 +1626,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Use Azure Active Directory Authentication for authentication with SQL Database to manage credentials in a single place.",
|
||||
@@ -1612,7 +1649,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable Transparent Data Encryption on every SQL server.",
|
||||
@@ -1634,7 +1672,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "SQL Server Audit Retention should be configured to be greater than 90 days.",
|
||||
@@ -1656,7 +1695,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable 'Microsoft Defender for SQL' on critical SQL Servers.",
|
||||
@@ -1678,7 +1718,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable Vulnerability Assessment (VA) service scans for critical SQL servers and corresponding SQL databases.",
|
||||
@@ -1700,7 +1741,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable Vulnerability Assessment (VA) service scans for critical SQL servers and corresponding SQL databases.",
|
||||
@@ -1722,7 +1764,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Configure 'Send scan reports to' with email addresses of concerned data owners/stakeholders for a critical SQL servers",
|
||||
@@ -1744,7 +1787,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable Vulnerability Assessment (VA) setting 'Also send email notifications to admins and subscription owners'.",
|
||||
@@ -1766,7 +1810,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable SSL connection on PostgreSQL Servers.",
|
||||
@@ -1788,7 +1833,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable log_checkpoints on PostgreSQL Servers.",
|
||||
@@ -1810,7 +1856,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable log_connections on PostgreSQL Servers.",
|
||||
@@ -1832,7 +1879,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable log_disconnections on PostgreSQL Servers.",
|
||||
@@ -1854,7 +1902,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable connection_throttling on PostgreSQL Servers.",
|
||||
@@ -1876,7 +1925,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure log_retention_days on PostgreSQL Servers is set to an appropriate value.",
|
||||
@@ -1898,7 +1948,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Disable access from Azure services to PostgreSQL Database Server.",
|
||||
@@ -1918,7 +1969,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Azure Database for PostgreSQL servers should be created with 'infrastructure double encryption' enabled.",
|
||||
@@ -1940,7 +1992,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable SSL connection on MYSQL Servers.",
|
||||
@@ -1962,7 +2015,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure TLS version on MySQL flexible servers is set to the default value.",
|
||||
@@ -1984,7 +2038,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable audit_log_enabled on MySQL Servers.",
|
||||
@@ -2006,7 +2061,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Set audit_log_enabled to include CONNECTION on MySQL Servers.",
|
||||
@@ -2028,7 +2084,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Limiting your Cosmos DB to only communicate on whitelisted networks lowers its attack footprint.",
|
||||
@@ -2050,7 +2107,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Private endpoints limit network traffic to approved sources.",
|
||||
@@ -2072,7 +2130,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Cosmos DB can use tokens or AAD for client authentication which in turn will use Azure RBAC for authorization. Using AAD is significantly more secure because AAD handles the credentials and allows for MFA and centralized management, and the Azure RBAC better integrated with the rest of Azure.",
|
||||
@@ -2094,7 +2153,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable Diagnostic settings for exporting activity logs. Diagnos tic settings are available for each individual resource within a subscription. Settings should be configured for allappropriate resources for your environment.",
|
||||
@@ -2116,7 +2176,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Prerequisite: A Diagnostic Setting must exist. If a Diagnostic Setting does not exist, the navigation and options within this recommendation will not be available. Please review the recommendation at the beginning of this subsection titled: 'Ensure that a 'Diagnostic Setting' exists.' The diagnostic setting should be configured to log the appropriate activities from the control/management plane.",
|
||||
@@ -2138,7 +2199,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The storage account container containing the activity log export should not be publicly accessible.",
|
||||
@@ -2160,7 +2222,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Storage accounts with the activity log exports can be configured to use Customer Managed Keys (CMK).",
|
||||
@@ -2182,7 +2245,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable AuditEvent logging for key vault instances to ensure interactions with key vaults are logged and available.",
|
||||
@@ -2204,7 +2268,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
|
||||
@@ -2226,7 +2291,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable AppServiceHTTPLogs diagnostic log category for Azure App Service instances to ensure all http requests are captured and centrally logged.",
|
||||
@@ -2248,7 +2314,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create Policy Assignment event.",
|
||||
@@ -2270,7 +2337,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Policy Assignment event.",
|
||||
@@ -2292,7 +2360,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an Activity Log Alert for the Create or Update Network Security Group event.",
|
||||
@@ -2314,7 +2383,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Network Security Group event.",
|
||||
@@ -2336,7 +2406,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update Security Solution event.",
|
||||
@@ -2358,7 +2429,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Security Solution event.",
|
||||
@@ -2380,7 +2452,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update SQL Server Firewall Rule event.",
|
||||
@@ -2402,7 +2475,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the 'Delete SQL Server Firewall Rule.'",
|
||||
@@ -2424,7 +2498,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update Public IP Addresses rule.",
|
||||
@@ -2446,7 +2521,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Public IP Address rule.",
|
||||
@@ -2466,7 +2542,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.3 Configuring Application Insights",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Resource Logs capture activity to the data access plane while the Activity log is a subscription-level log for the control plane. Resource-level diagnostic logs provide insight into operations that were performed within that resource itself; for example, reading or updating a secret from a Key Vault. Currently, 95 Azure resources support Azure Monitoring (See the more information section for a complete list), including Network Security Groups, Load Balancers, Key Vault, AD, Logic Apps, and CosmosDB. The content of these logs varies by resource type. A number of back-end services were not configured to log and store Resource Logs for certain activities or for a sufficient length. It is crucial that monitoring is correctly configured to log all relevant activities and retain those logs for a sufficient length of time. Given that the mean time to detection in an enterprise is 240 days, a minimum retention period of two years is recommended.",
|
||||
@@ -2486,7 +2562,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.3 Configuring Application Insights",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The use of Basic or Free SKUs in Azure whilst cost effective have significant limitations in terms of what can be monitored and what support can be realized from Microsoft. Typically, these SKU’s do not have a service SLA and Microsoft will usually refuse to provide support for them. Consequently Basic/Free SKUs should never be used for production workloads.",
|
||||
@@ -2508,7 +2584,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.3 Configuring Application Insights",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.3 Configuring Application Insights",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Application Insights within Azure act as an Application Performance Monitoring solution providing valuable data into how well an application performs and additional information when performing incident response. The types of log data collected include application metrics, telemetry data, and application trace logging data providing organizations with detailed information about application activity and application transactions. Both data sets help organizations adopt a proactive and retroactive means to handle security and performance related metrics within their modern applications.",
|
||||
|
||||
@@ -494,7 +494,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults Security Defaults",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Security defaults in Microsoft Entra ID make it easier to be secure and help protect your organization. Security defaults contain preconfigured security settings for common attacks. Security defaults is available to everyone. The goal is to ensure that all organizations have a basic level of security enabled at no extra cost. You may turn on security defaults in the Azure portal.",
|
||||
@@ -516,7 +517,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults Security Defaults",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable multi-factor authentication for all roles, groups, and users that have write access or permissions to Azure resources. These include custom created objects or built-in roles such as; - Service Co-Administrators - Subscription Owners - Contributors",
|
||||
@@ -538,7 +540,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable multi-factor authentication for all non-privileged users.",
|
||||
@@ -558,7 +561,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Do not allow users to remember multi-factor authentication on devices.",
|
||||
@@ -580,7 +584,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Entra ID Conditional Access allows an organization to configure `Named locations` and configure whether those locations are trusted or untrusted. These settings provide organizations the means to specify Geographical locations for use in conditional access policies, or define actual IP addresses and IP ranges and whether or not those IP addresses and/or ranges are trusted by the organization.",
|
||||
@@ -600,7 +605,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "**CAUTION**: If these policies are created without first auditing and testing the result, misconfiguration can potentially lock out administrators or create undesired access issues. Conditional Access Policies can be used to block access from geographic locations that are deemed out-of-scope for your organization or application. The scope and variables for this policy should be carefully examined and defined.",
|
||||
@@ -620,7 +626,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
|
||||
@@ -640,7 +647,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
|
||||
@@ -660,7 +668,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
|
||||
@@ -682,7 +691,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This recommendation ensures that users accessing the Windows Azure Service Management API (i.e. Azure Powershell, Azure CLI, Azure Resource Manager API, etc.) are required to use multifactor authentication (MFA) credentials when accessing resources through the Windows Azure Service Management API.",
|
||||
@@ -702,7 +712,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This recommendation ensures that users accessing Microsoft Admin Portals (i.e. Microsoft 365 Admin, Microsoft 365 Defender, Exchange Admin Center, Azure Portal, etc.) are required to use multifactor authentication (MFA) credentials when logging into an Admin Portal.",
|
||||
@@ -724,7 +735,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Servers enables threat detection for Servers, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -746,7 +758,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for App Service enables threat detection for App Service, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -768,7 +781,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Azure SQL Databases enables threat detection for Managed Instance Azure SQL databases, providing threat intelligence, anomaly detection, and behavior analytics in Microsoft Defender for Cloud.",
|
||||
@@ -790,7 +804,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for SQL servers on machines enables threat detection for SQL servers on machines, providing threat intelligence, anomaly detection, and behavior analytics in Microsoft Defender for Cloud.",
|
||||
@@ -812,7 +827,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Open-source relational databases enables threat detection for Open-source relational databases, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -834,7 +850,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Microsoft Defender for Azure Cosmos DB scans all incoming network requests for threats to your Azure Cosmos DB resources.",
|
||||
@@ -856,7 +873,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Storage enables threat detection for Storage, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -878,7 +896,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Containers enables threat detection for Container Registries including Kubernetes, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud. The following services will be enabled for container instances: - Defender agent in Azure - Azure Policy for Kubernetes - Agentless discovery for Kubernetes - Agentless container vulnerability assessment",
|
||||
@@ -900,7 +919,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Key Vault enables threat detection for Key Vault, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -922,7 +942,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "[**NOTE:** As of August 1, customers with an existing subscription to Defender for DNS can continue to use the service, but new subscribers will receive alerts about suspicious DNS activity as part of Defender for Servers P2.] Microsoft Defender for DNS scans all network traffic exiting from within a subscription.",
|
||||
@@ -944,7 +965,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Microsoft Defender for Resource Manager scans incoming administrative requests to change your infrastructure from both CLI and the Azure portal.",
|
||||
@@ -966,7 +988,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that the latest OS patches for all virtual machines are applied.",
|
||||
@@ -988,7 +1011,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "The Microsoft Cloud Security Benchmark (or MCSB) is an Azure Policy Initiative containing many security policies to evaluate resource configuration against best practice recommendations. If a policy in the MCSB is set with effect type `Disabled`, it is not evaluated and may prevent administrators from being informed of valuable security recommendations.",
|
||||
@@ -1010,7 +1034,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable automatic provisioning of the monitoring agent to collect security data.",
|
||||
@@ -1032,7 +1057,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable automatic provisioning of vulnerability assessment for machines on both Azure and hybrid (Arc enabled) machines.",
|
||||
@@ -1052,7 +1078,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable automatic provisioning of the Microsoft Defender for Containers components.",
|
||||
@@ -1074,7 +1101,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable security alert emails to subscription owners.",
|
||||
@@ -1096,7 +1124,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Microsoft Defender for Cloud emails the subscription owners whenever a high-severity alert is triggered for their subscription. You should provide a security contact email address as an additional email address.",
|
||||
@@ -1118,7 +1147,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enables emailing security alerts to the subscription owner or other designated security contact.",
|
||||
@@ -1140,7 +1170,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This integration setting enables Microsoft Defender for Cloud Apps (formerly 'Microsoft Cloud App Security' or 'MCAS' - see additional info) to communicate with Microsoft Defender for Cloud.",
|
||||
@@ -1162,7 +1193,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This integration setting enables Microsoft Defender for Endpoint (formerly 'Advanced Threat Protection' or 'ATP' or 'WDATP' - see additional info) to communicate with Microsoft Defender for Cloud. **IMPORTANT:** When enabling integration between DfE & DfC it needs to be taken into account that this will have some side effects that may be undesirable. 1. For server 2019 & above if defender is installed (default for these server SKU's) this will trigger a deployment of the new unified agent and link to any of the extended configuration in the Defender portal. 1. If the new unified agent is required for server SKU's of Win 2016 or Linux and lower there is additional integration that needs to be switched on and agents need to be aligned.",
|
||||
@@ -1182,7 +1214,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "An organization's attack surface is the collection of assets with a public network identifier or URI that an external threat actor can see or access from outside your cloud. It is the set of points on the boundary of a system, a system element, system component, or an environment where an attacker can try to enter, cause an effect on, or extract data from, that system, system element, system component, or environment. The larger the attack surface, the harder it is to protect. This tool can be configured to scan your organization's online infrastructure such as specified domains, hosts, CIDR blocks, and SSL certificates, and store them in an Inventory. Inventory items can be added, reviewed, approved, and removed, and may contain enrichments (insights) and additional information collected from the tool's different scan engines and open-source intelligence sources. A Defender EASM workspace will generate an Inventory of publicly exposed assets by crawling and scanning the internet using _Seeds_ you provide when setting up the tool. Seeds can be FQDNs, IP CIDR blocks, and WHOIS records. Defender EASM will generate Insights within 24-48 hours after Seeds are provided, and these insights include vulnerability data (CVEs), ports and protocols, and weak or expired SSL certificates that could be used by an attacker for reconnaisance or exploitation. Results are classified High/Medium/Low and some of them include proposed mitigations.",
|
||||
@@ -1204,7 +1237,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2 Microsoft Defender for IoT",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.2 Microsoft Defender for IoT",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Defender for IoT acts as a central security hub for IoT devices within your organization.",
|
||||
@@ -1586,7 +1620,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable auditing on SQL Servers.",
|
||||
@@ -1608,7 +1643,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that no SQL Databases allow ingress from 0.0.0.0/0 (ANY IP).",
|
||||
@@ -1630,7 +1666,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Transparent Data Encryption (TDE) with Customer-managed key support provides increased transparency and control over the TDE Protector, increased security with an HSM-backed external service, and promotion of separation of duties. With TDE, data is encrypted at rest with a symmetric key (called the database encryption key) stored in the database or data warehouse distribution. To protect this data encryption key (DEK) in the past, only a certificate that the Azure SQL Service managed could be used. Now, with Customer-managed key support for TDE, the DEK can be protected with an asymmetric key that is stored in the Azure Key Vault. The Azure Key Vault is a highly available and scalable cloud-based key store which offers central key management, leverages FIPS 140-2 Level 2 validated hardware security modules (HSMs), and allows separation of management of keys and data for additional security. Based on business needs or criticality of data/databases hosted on a SQL server, it is recommended that the TDE protector is encrypted by a key that is managed by the data owner (Customer-managed key).",
|
||||
@@ -1652,7 +1689,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Use Microsoft Entra authentication for authentication with SQL Database to manage credentials in a single place.",
|
||||
@@ -1674,7 +1712,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable Transparent Data Encryption on every SQL server.",
|
||||
@@ -1696,7 +1735,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "SQL Server Audit Retention should be configured to be greater than 90 days.",
|
||||
@@ -1718,7 +1758,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `SSL connection` on `PostgreSQL` Servers.",
|
||||
@@ -1740,7 +1781,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `log_checkpoints` on `PostgreSQL Servers`.",
|
||||
@@ -1762,7 +1804,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `log_connections` on `PostgreSQL Servers`.",
|
||||
@@ -1784,7 +1827,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `log_disconnections` on `PostgreSQL Servers`.",
|
||||
@@ -1806,7 +1850,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `connection_throttling` on `PostgreSQL Servers`.",
|
||||
@@ -1828,7 +1873,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure `log_retention_days` on `PostgreSQL Servers` is set to an appropriate value.",
|
||||
@@ -1850,7 +1896,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Disable access from Azure services to PostgreSQL Database Server.",
|
||||
@@ -1870,7 +1917,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Azure Database for PostgreSQL servers should be created with 'infrastructure double encryption' enabled.",
|
||||
@@ -1892,7 +1940,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `SSL connection` on `MYSQL` Servers.",
|
||||
@@ -1914,7 +1963,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure `TLS version` on `MySQL flexible` servers is set to use TLS version 1.2 or higher.",
|
||||
@@ -1936,7 +1986,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable audit_log_enabled on MySQL Servers.",
|
||||
@@ -1958,7 +2009,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Set `audit_log_enabled` to include CONNECTION on MySQL Servers.",
|
||||
@@ -1980,7 +2032,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Limiting your Cosmos DB to only communicate on whitelisted networks lowers its attack footprint.",
|
||||
@@ -2002,7 +2055,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Private endpoints limit network traffic to approved sources.",
|
||||
@@ -2024,7 +2078,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Cosmos DB can use tokens or Entra ID for client authentication which in turn will use Azure RBAC for authorization. Using Entra ID is significantly more secure because Entra ID handles the credentials and allows for MFA and centralized management, and the Azure RBAC better integrated with the rest of Azure.",
|
||||
@@ -2086,7 +2141,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable Diagnostic settings for exporting activity logs. Diagnostic settings are available for each individual resource within a subscription. Settings should be configured for all appropriate resources for your environment.",
|
||||
@@ -2108,7 +2164,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "**Prerequisite**: A Diagnostic Setting must exist. If a Diagnostic Setting does not exist, the navigation and options within this recommendation will not be available. Please review the recommendation at the beginning of this subsection titled: Ensure that a 'Diagnostic Setting' exists. The diagnostic setting should be configured to log the appropriate activities from the control/management plane.",
|
||||
@@ -2130,7 +2187,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Storage accounts with the activity log exports can be configured to use Customer Managed Keys (CMK).",
|
||||
@@ -2152,7 +2210,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable AuditEvent logging for key vault instances to ensure interactions with key vaults are logged and available.",
|
||||
@@ -2174,7 +2233,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
|
||||
@@ -2196,7 +2256,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable AppServiceHTTPLogs diagnostic log category for Azure App Service instances to ensure all http requests are captured and centrally logged.",
|
||||
@@ -2218,7 +2279,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create Policy Assignment event.",
|
||||
@@ -2240,7 +2302,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Policy Assignment event.",
|
||||
@@ -2262,7 +2325,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an Activity Log Alert for the Create or Update Network Security Group event.",
|
||||
@@ -2284,7 +2348,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Network Security Group event.",
|
||||
@@ -2306,7 +2371,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update Security Solution event.",
|
||||
@@ -2328,7 +2394,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Security Solution event.",
|
||||
@@ -2350,7 +2417,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update SQL Server Firewall Rule event.",
|
||||
@@ -2372,7 +2440,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete SQL Server Firewall Rule.",
|
||||
@@ -2394,7 +2463,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update Public IP Addresses rule.",
|
||||
@@ -2416,7 +2486,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Public IP Address rule.",
|
||||
@@ -2438,7 +2509,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.3 Configuring Application Insights. Storage Accounts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.3 Configuring Application Insights. Storage Accounts",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Application Insights within Azure act as an Application Performance Monitoring solution providing valuable data into how well an application performs and additional information when performing incident response. The types of log data collected include application metrics, telemetry data, and application trace logging data providing organizations with detailed information about application activity and application transactions. Both data sets help organizations adopt a proactive and retroactive means to handle security and performance related metrics within their modern applications.",
|
||||
@@ -3043,7 +3115,9 @@
|
||||
{
|
||||
"Id": "9.4",
|
||||
"Description": "Ensure that Register with Entra ID is enabled on App Service",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"app_register_with_identity"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9. AppService",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1292,7 +1292,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances. This recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.",
|
||||
@@ -1313,7 +1314,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`",
|
||||
@@ -1334,7 +1336,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.",
|
||||
@@ -1355,7 +1358,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are: - `TERSE` - `DEFAULT` - `VERBOSE` `TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information. `VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error. Ensure an appropriate value is set to 'DEFAULT' or stricter.",
|
||||
@@ -1376,7 +1380,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`. Each severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.",
|
||||
@@ -1397,7 +1402,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are: - `none` - `ddl` - `mod` - `all` The value `ddl` logs all data definition statements. The value `mod` logs all ddl statements, plus data-modifying statements. The statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included. A value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.",
|
||||
@@ -1418,7 +1424,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Instance addresses can be public IP or private IP. Public IP means that the instance is accessible through the public internet. In contrast, instances using only private IP are not accessible through the public internet, but are accessible through a Virtual Private Cloud (VPC). Limiting network access to your database will limit potential attacks.",
|
||||
@@ -1439,7 +1446,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.",
|
||||
@@ -1460,7 +1468,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.",
|
||||
@@ -1481,7 +1490,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.",
|
||||
@@ -1502,7 +1512,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.",
|
||||
@@ -1523,7 +1534,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`. Each severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.",
|
||||
@@ -1544,7 +1556,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.",
|
||||
@@ -1565,7 +1578,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`",
|
||||
@@ -1586,7 +1600,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.",
|
||||
@@ -1607,7 +1622,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.",
|
||||
@@ -1628,7 +1644,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.",
|
||||
@@ -1649,7 +1666,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `off`.",
|
||||
@@ -1670,7 +1688,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.",
|
||||
|
||||
@@ -1330,7 +1330,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances.This recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.",
|
||||
@@ -1352,7 +1353,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`",
|
||||
@@ -1374,7 +1376,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.",
|
||||
@@ -1396,7 +1399,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are:- `TERSE`- `DEFAULT`- `VERBOSE``TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information.`VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error.Ensure an appropriate value is set to 'DEFAULT' or stricter.",
|
||||
@@ -1418,7 +1422,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.",
|
||||
@@ -1440,7 +1445,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.",
|
||||
@@ -1462,7 +1468,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are:- `none`- `ddl`- `mod`- `all`The value `ddl` logs all data definition statements.The value `mod` logs all ddl statements, plus data-modifying statements.The statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included.A value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.",
|
||||
@@ -1484,7 +1491,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include (from lowest to highest severity) `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.Each severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.",
|
||||
@@ -1506,7 +1514,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include (from lowest to highest severity) `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.Each severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.",
|
||||
@@ -1528,7 +1537,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.",
|
||||
@@ -1550,7 +1560,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.",
|
||||
@@ -1572,7 +1583,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`",
|
||||
@@ -1594,7 +1606,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.This flag is deprecated for all SQL Server versions in CGP. Going forward, you can't set its value to on. However, if you have this flag enabled, we strongly recommend that you either remove the flag from your database or set it to off. For cross-database access, use the [Microsoft tutorial for signing stored procedures with a certificate](https://learn.microsoft.com/en-us/sql/relational-databases/tutorial-signing-stored-procedures-with-a-certificate?view=sql-server-ver16).",
|
||||
@@ -1616,7 +1629,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.",
|
||||
@@ -1638,7 +1652,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.",
|
||||
@@ -1660,7 +1675,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.",
|
||||
@@ -1682,7 +1698,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.",
|
||||
@@ -1704,7 +1721,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended not to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `on`.",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,7 @@ from prowler.lib.logger import logger
|
||||
|
||||
timestamp = datetime.today()
|
||||
timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)
|
||||
prowler_version = "5.1.0"
|
||||
prowler_version = "5.2.4"
|
||||
html_logo_url = "https://github.com/prowler-cloud/prowler/"
|
||||
square_logo_img = "https://prowler.com/wp-content/uploads/logo-html.png"
|
||||
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
|
||||
|
||||
@@ -378,6 +378,37 @@ aws:
|
||||
# Minimum retention period in hours for Kinesis streams
|
||||
min_kinesis_stream_retention_hours: 168 # 7 days
|
||||
|
||||
# Detect Secrets plugin configuration
|
||||
detect_secrets_plugins: [
|
||||
{"name": "ArtifactoryDetector"},
|
||||
{"name": "AWSKeyDetector"},
|
||||
{"name": "AzureStorageKeyDetector"},
|
||||
{"name": "BasicAuthDetector"},
|
||||
{"name": "CloudantDetector"},
|
||||
{"name": "DiscordBotTokenDetector"},
|
||||
{"name": "GitHubTokenDetector"},
|
||||
{"name": "GitLabTokenDetector"},
|
||||
{"name": "Base64HighEntropyString", "limit": 6.0},
|
||||
{"name": "HexHighEntropyString", "limit": 3.0},
|
||||
{"name": "IbmCloudIamDetector"},
|
||||
{"name": "IbmCosHmacDetector"},
|
||||
# {"name": "IPPublicDetector"}, https://github.com/Yelp/detect-secrets/pull/885
|
||||
{"name": "JwtTokenDetector"},
|
||||
{"name": "KeywordDetector"},
|
||||
{"name": "MailchimpDetector"},
|
||||
{"name": "NpmDetector"},
|
||||
{"name": "OpenAIDetector"},
|
||||
{"name": "PrivateKeyDetector"},
|
||||
{"name": "PypiTokenDetector"},
|
||||
{"name": "SendGridDetector"},
|
||||
{"name": "SlackDetector"},
|
||||
{"name": "SoftlayerDetector"},
|
||||
{"name": "SquareOAuthDetector"},
|
||||
{"name": "StripeDetector"},
|
||||
# {"name": "TelegramBotTokenDetector"}, https://github.com/Yelp/detect-secrets/pull/878
|
||||
{"name": "TwilioKeyDetector"},
|
||||
]
|
||||
|
||||
|
||||
# Azure Configuration
|
||||
azure:
|
||||
|
||||
@@ -83,6 +83,7 @@ class CIS_Requirement_Attribute(BaseModel):
|
||||
"""CIS Requirement Attribute"""
|
||||
|
||||
Section: str
|
||||
SubSection: Optional[str]
|
||||
Profile: CIS_Requirement_Attribute_Profile
|
||||
AssessmentStatus: CIS_Requirement_Attribute_AssessmentStatus
|
||||
Description: str
|
||||
|
||||
+76
-35
@@ -3,9 +3,9 @@ import os
|
||||
import re
|
||||
import sys
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import asdict, dataclass, is_dataclass
|
||||
from enum import Enum
|
||||
from typing import Set
|
||||
from typing import Any, Dict, Set
|
||||
|
||||
from pydantic import BaseModel, ValidationError, validator
|
||||
|
||||
@@ -405,15 +405,34 @@ class Check_Report:
|
||||
status: str
|
||||
status_extended: str
|
||||
check_metadata: CheckMetadata
|
||||
resource_metadata: dict
|
||||
resource: dict
|
||||
resource_details: str
|
||||
resource_tags: list
|
||||
muted: bool
|
||||
|
||||
def __init__(self, metadata, resource=None):
|
||||
def __init__(self, metadata: Dict, resource: Any) -> None:
|
||||
"""Initialize the Check's finding information.
|
||||
|
||||
Args:
|
||||
metadata: The metadata of the check.
|
||||
resource: Basic information about the resource. Defaults to None.
|
||||
Only accepted dict, list, BaseModels (dict attribute), custom models (with to_dict attribute) and dataclasses.
|
||||
"""
|
||||
self.status = ""
|
||||
self.check_metadata = CheckMetadata.parse_raw(metadata)
|
||||
self.resource_metadata = resource.dict() if resource else {}
|
||||
if isinstance(resource, dict):
|
||||
self.resource = resource
|
||||
elif hasattr(resource, "dict"):
|
||||
self.resource = resource.dict()
|
||||
elif hasattr(resource, "to_dict"):
|
||||
self.resource = resource.to_dict()
|
||||
elif is_dataclass(resource):
|
||||
self.resource = asdict(resource)
|
||||
else:
|
||||
logger.error(
|
||||
f"Resource metadata {type(resource)} in {self.check_metadata.CheckID} could not be converted to dict"
|
||||
)
|
||||
self.resource = {}
|
||||
self.status_extended = ""
|
||||
self.resource_details = ""
|
||||
self.resource_tags = getattr(resource, "tags", []) if resource else []
|
||||
@@ -428,20 +447,13 @@ class Check_Report_AWS(Check_Report):
|
||||
resource_arn: str
|
||||
region: str
|
||||
|
||||
def __init__(self, metadata, resource_metadata=None):
|
||||
super().__init__(metadata, resource_metadata)
|
||||
if resource_metadata:
|
||||
self.resource_id = (
|
||||
getattr(resource_metadata, "id", None)
|
||||
or getattr(resource_metadata, "name", None)
|
||||
or ""
|
||||
)
|
||||
self.resource_arn = getattr(resource_metadata, "arn", "")
|
||||
self.region = getattr(resource_metadata, "region", "")
|
||||
else:
|
||||
self.resource_id = ""
|
||||
self.resource_arn = ""
|
||||
self.region = ""
|
||||
def __init__(self, metadata: Dict, resource: Any) -> None:
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_id = (
|
||||
getattr(resource, "id", None) or getattr(resource, "name", None) or ""
|
||||
)
|
||||
self.resource_arn = getattr(resource, "arn", "")
|
||||
self.region = getattr(resource, "region", "")
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -453,12 +465,20 @@ class Check_Report_Azure(Check_Report):
|
||||
subscription: str
|
||||
location: str
|
||||
|
||||
def __init__(self, metadata):
|
||||
super().__init__(metadata)
|
||||
self.resource_name = ""
|
||||
self.resource_id = ""
|
||||
def __init__(self, metadata: Dict, resource: Any) -> None:
|
||||
"""Initialize the Azure Check's finding information.
|
||||
|
||||
Args:
|
||||
metadata: The metadata of the check.
|
||||
resource: Basic information about the resource. Defaults to None.
|
||||
"""
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_name = getattr(
|
||||
resource, "name", getattr(resource, "resource_name", "")
|
||||
)
|
||||
self.resource_id = getattr(resource, "id", getattr(resource, "resource_id", ""))
|
||||
self.subscription = ""
|
||||
self.location = "global"
|
||||
self.location = getattr(resource, "location", "global")
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -470,12 +490,29 @@ class Check_Report_GCP(Check_Report):
|
||||
project_id: str
|
||||
location: str
|
||||
|
||||
def __init__(self, metadata):
|
||||
super().__init__(metadata)
|
||||
self.resource_name = ""
|
||||
self.resource_id = ""
|
||||
self.project_id = ""
|
||||
self.location = ""
|
||||
def __init__(
|
||||
self,
|
||||
metadata: Dict,
|
||||
resource: Any,
|
||||
location=None,
|
||||
resource_name=None,
|
||||
resource_id=None,
|
||||
project_id=None,
|
||||
) -> None:
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_id = (
|
||||
resource_id
|
||||
or getattr(resource, "id", None)
|
||||
or getattr(resource, "name", None)
|
||||
or ""
|
||||
)
|
||||
self.resource_name = resource_name or getattr(resource, "name", "")
|
||||
self.project_id = project_id or getattr(resource, "project_id", "")
|
||||
self.location = (
|
||||
location
|
||||
or getattr(resource, "location", "")
|
||||
or getattr(resource, "region", "")
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -487,11 +524,15 @@ class Check_Report_Kubernetes(Check_Report):
|
||||
resource_id: str
|
||||
namespace: str
|
||||
|
||||
def __init__(self, metadata):
|
||||
super().__init__(metadata)
|
||||
self.resource_name = ""
|
||||
self.resource_id = ""
|
||||
self.namespace = ""
|
||||
def __init__(self, metadata: Dict, resource: Any) -> None:
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_id = (
|
||||
getattr(resource, "uid", None) or getattr(resource, "name", None) or ""
|
||||
)
|
||||
self.resource_name = getattr(resource, "name", "")
|
||||
self.namespace = getattr(resource, "namespace", "cluster-wide")
|
||||
if not self.namespace:
|
||||
self.namespace = "cluster-wide"
|
||||
|
||||
|
||||
# Testing Pending
|
||||
|
||||
@@ -14,7 +14,7 @@ def fill_common_finding_data(finding: dict, unix_timestamp: bool) -> dict:
|
||||
"status_extended": finding.status_extended,
|
||||
"muted": finding.muted,
|
||||
"resource_details": finding.resource_details,
|
||||
# "resource_metadata": finding.resource_metadata, TODO: add resource_metadata to the finding
|
||||
"resource": finding.resource,
|
||||
"resource_tags": unroll_tags(finding.resource_tags),
|
||||
}
|
||||
return finding_data
|
||||
|
||||
@@ -48,6 +48,7 @@ class AWSCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
@@ -78,6 +79,7 @@ class AWSCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
|
||||
@@ -48,6 +48,7 @@ class AzureCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
@@ -79,6 +80,7 @@ class AzureCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
|
||||
@@ -48,6 +48,7 @@ class GCPCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
@@ -78,6 +79,7 @@ class GCPCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
|
||||
@@ -50,6 +50,7 @@ class KubernetesCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
@@ -81,6 +82,7 @@ class KubernetesCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
@@ -14,6 +16,7 @@ class AWSCISModel(BaseModel):
|
||||
Requirements_Id: str
|
||||
Requirements_Description: str
|
||||
Requirements_Attributes_Section: str
|
||||
Requirements_Attributes_SubSection: Optional[str]
|
||||
Requirements_Attributes_Profile: str
|
||||
Requirements_Attributes_AssessmentStatus: str
|
||||
Requirements_Attributes_Description: str
|
||||
@@ -44,6 +47,7 @@ class AzureCISModel(BaseModel):
|
||||
Requirements_Id: str
|
||||
Requirements_Description: str
|
||||
Requirements_Attributes_Section: str
|
||||
Requirements_Attributes_SubSection: Optional[str]
|
||||
Requirements_Attributes_Profile: str
|
||||
Requirements_Attributes_AssessmentStatus: str
|
||||
Requirements_Attributes_Description: str
|
||||
@@ -75,6 +79,7 @@ class GCPCISModel(BaseModel):
|
||||
Requirements_Id: str
|
||||
Requirements_Description: str
|
||||
Requirements_Attributes_Section: str
|
||||
Requirements_Attributes_SubSection: Optional[str]
|
||||
Requirements_Attributes_Profile: str
|
||||
Requirements_Attributes_AssessmentStatus: str
|
||||
Requirements_Attributes_Description: str
|
||||
@@ -105,6 +110,7 @@ class KubernetesCISModel(BaseModel):
|
||||
Requirements_Id: str
|
||||
Requirements_Description: str
|
||||
Requirements_Attributes_Section: str
|
||||
Requirements_Attributes_SubSection: Optional[str]
|
||||
Requirements_Attributes_Profile: str
|
||||
Requirements_Attributes_AssessmentStatus: str
|
||||
Requirements_Attributes_Description: str
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from datetime import datetime
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
|
||||
from prowler.config.config import prowler_version
|
||||
from prowler.lib.check.models import Check_Report, CheckMetadata
|
||||
@@ -35,7 +35,7 @@ class Finding(BaseModel):
|
||||
status_extended: str
|
||||
muted: bool = False
|
||||
resource_uid: str
|
||||
# resource_metadata: dict = Field(default_factory=dict) TODO: add resource_metadata to the finding
|
||||
resource_metadata: dict = Field(default_factory=dict)
|
||||
resource_name: str
|
||||
resource_details: str
|
||||
resource_tags: dict = Field(default_factory=dict)
|
||||
@@ -121,6 +121,7 @@ class Finding(BaseModel):
|
||||
)
|
||||
try:
|
||||
output_data["provider"] = provider.type
|
||||
output_data["resource_metadata"] = check_output.resource
|
||||
|
||||
if provider.type == "aws":
|
||||
output_data["account_uid"] = get_nested_attribute(
|
||||
@@ -242,7 +243,13 @@ class Finding(BaseModel):
|
||||
)
|
||||
|
||||
return cls(**output_data)
|
||||
except ValidationError as validation_error:
|
||||
logger.error(
|
||||
f"{validation_error.__class__.__name__}[{validation_error.__traceback__.tb_lineno}]: {validation_error} - {output_data}"
|
||||
)
|
||||
raise validation_error
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
raise error
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import List
|
||||
|
||||
from py_ocsf_models.events.base_event import SeverityID, StatusID
|
||||
@@ -68,7 +69,11 @@ class OCSF(Output):
|
||||
activity_name=finding_activity.name,
|
||||
finding_info=FindingInformation(
|
||||
created_time_dt=finding.timestamp,
|
||||
created_time=int(finding.timestamp.timestamp()),
|
||||
created_time=(
|
||||
int(finding.timestamp.timestamp())
|
||||
if isinstance(finding.timestamp, datetime)
|
||||
else finding.timestamp
|
||||
),
|
||||
desc=finding.metadata.Description,
|
||||
title=finding.metadata.CheckTitle,
|
||||
uid=finding.uid,
|
||||
@@ -77,7 +82,11 @@ class OCSF(Output):
|
||||
types=finding.metadata.CheckType,
|
||||
),
|
||||
time_dt=finding.timestamp,
|
||||
time=int(finding.timestamp.timestamp()),
|
||||
time=(
|
||||
int(finding.timestamp.timestamp())
|
||||
if isinstance(finding.timestamp, datetime)
|
||||
else finding.timestamp
|
||||
),
|
||||
remediation=Remediation(
|
||||
desc=finding.metadata.Remediation.Recommendation.Text,
|
||||
references=list(
|
||||
@@ -113,7 +122,7 @@ class OCSF(Output):
|
||||
region=finding.region,
|
||||
data={
|
||||
"details": finding.resource_details,
|
||||
# "metadata": finding.resource_metadata, TODO: add the resource_metadata to the finding
|
||||
"metadata": finding.resource_metadata,
|
||||
},
|
||||
)
|
||||
]
|
||||
@@ -127,7 +136,7 @@ class OCSF(Output):
|
||||
type=finding.metadata.ResourceType,
|
||||
data={
|
||||
"details": finding.resource_details,
|
||||
# "metadata": finding.resource_metadata, TODO: add the resource_metadata to the finding
|
||||
"metadata": finding.resource_metadata,
|
||||
},
|
||||
namespace=finding.region.replace("namespace: ", ""),
|
||||
)
|
||||
@@ -193,10 +202,15 @@ class OCSF(Output):
|
||||
):
|
||||
self._file_descriptor.write("[")
|
||||
for finding in self._data:
|
||||
self._file_descriptor.write(
|
||||
finding.json(exclude_none=True, indent=4)
|
||||
)
|
||||
self._file_descriptor.write(",")
|
||||
try:
|
||||
self._file_descriptor.write(
|
||||
finding.json(exclude_none=True, indent=4)
|
||||
)
|
||||
self._file_descriptor.write(",")
|
||||
except Exception as error:
|
||||
logger.error(
|
||||
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
|
||||
)
|
||||
if self._file_descriptor.tell() > 0:
|
||||
if self._file_descriptor.tell() != 1:
|
||||
self._file_descriptor.seek(
|
||||
|
||||
@@ -81,7 +81,9 @@ def set_report_color(status: str, muted: bool = False) -> str:
|
||||
elif status == "MANUAL":
|
||||
color = Fore.YELLOW
|
||||
else:
|
||||
raise Exception("Invalid Report Status. Must be PASS, FAIL or MANUAL.")
|
||||
raise Exception(
|
||||
f"Invalid Report Status: {status}. Must be PASS, FAIL or MANUAL."
|
||||
)
|
||||
return color
|
||||
|
||||
|
||||
|
||||
+10
-14
@@ -36,7 +36,6 @@ class Scan:
|
||||
_service_checks_to_execute: dict[str, set[str]]
|
||||
_service_checks_completed: dict[str, set[str]]
|
||||
_progress: float = 0.0
|
||||
_findings: list = []
|
||||
_duration: int = 0
|
||||
_status: list[str] = None
|
||||
|
||||
@@ -216,10 +215,6 @@ class Scan:
|
||||
def duration(self) -> int:
|
||||
return self._duration
|
||||
|
||||
@property
|
||||
def findings(self) -> list:
|
||||
return self._findings
|
||||
|
||||
def scan(
|
||||
self,
|
||||
custom_checks_metadata: dict = {},
|
||||
@@ -282,9 +277,6 @@ class Scan:
|
||||
if finding.status not in self._status:
|
||||
check_findings.remove(finding)
|
||||
|
||||
# Store findings
|
||||
self._findings.extend(check_findings)
|
||||
|
||||
# Remove the executed check
|
||||
self._service_checks_to_execute[service].remove(check_name)
|
||||
if len(self._service_checks_to_execute[service]) == 0:
|
||||
@@ -304,12 +296,16 @@ class Scan:
|
||||
self.get_completed_checks(),
|
||||
)
|
||||
|
||||
findings = [
|
||||
Finding.generate_output(
|
||||
self._provider, finding, output_options=None
|
||||
)
|
||||
for finding in check_findings
|
||||
]
|
||||
findings = []
|
||||
for finding in check_findings:
|
||||
try:
|
||||
findings.append(
|
||||
Finding.generate_output(
|
||||
self._provider, finding, output_options=None
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
yield self.progress, findings
|
||||
# If check does not exists in the provider or is from another provider
|
||||
|
||||
+40
-30
@@ -26,6 +26,36 @@ from detect_secrets.settings import transient_settings
|
||||
from prowler.config.config import encoding_format_utf_8
|
||||
from prowler.lib.logger import logger
|
||||
|
||||
default_detect_secrets_plugins = [
|
||||
{"name": "ArtifactoryDetector"},
|
||||
{"name": "AWSKeyDetector"},
|
||||
{"name": "AzureStorageKeyDetector"},
|
||||
{"name": "BasicAuthDetector"},
|
||||
{"name": "CloudantDetector"},
|
||||
{"name": "DiscordBotTokenDetector"},
|
||||
{"name": "GitHubTokenDetector"},
|
||||
{"name": "GitLabTokenDetector"},
|
||||
{"name": "Base64HighEntropyString", "limit": 6.0},
|
||||
{"name": "HexHighEntropyString", "limit": 3.0},
|
||||
{"name": "IbmCloudIamDetector"},
|
||||
{"name": "IbmCosHmacDetector"},
|
||||
# {"name": "IPPublicDetector"}, https://github.com/Yelp/detect-secrets/pull/885
|
||||
{"name": "JwtTokenDetector"},
|
||||
{"name": "KeywordDetector"},
|
||||
{"name": "MailchimpDetector"},
|
||||
{"name": "NpmDetector"},
|
||||
{"name": "OpenAIDetector"},
|
||||
{"name": "PrivateKeyDetector"},
|
||||
{"name": "PypiTokenDetector"},
|
||||
{"name": "SendGridDetector"},
|
||||
{"name": "SlackDetector"},
|
||||
{"name": "SoftlayerDetector"},
|
||||
{"name": "SquareOAuthDetector"},
|
||||
{"name": "StripeDetector"},
|
||||
# {"name": "TelegramBotTokenDetector"}, https://github.com/Yelp/detect-secrets/pull/878
|
||||
{"name": "TwilioKeyDetector"},
|
||||
]
|
||||
|
||||
|
||||
def open_file(input_file: str, mode: str = "r") -> TextIOWrapper:
|
||||
"""open_file returns a handler to the file using the specified mode."""
|
||||
@@ -82,13 +112,17 @@ def hash_sha512(string: str) -> str:
|
||||
|
||||
|
||||
def detect_secrets_scan(
|
||||
data: str = None, file=None, excluded_secrets: list[str] = None
|
||||
data: str = None,
|
||||
file=None,
|
||||
excluded_secrets: list[str] = None,
|
||||
detect_secrets_plugins: dict = None,
|
||||
) -> list[dict[str, str]]:
|
||||
"""detect_secrets_scan scans the data or file for secrets using the detect-secrets library.
|
||||
Args:
|
||||
data (str): The data to scan for secrets.
|
||||
file (str): The file to scan for secrets.
|
||||
excluded_secrets (list): A list of regex patterns to exclude from the scan.
|
||||
detect_secrets_plugins (dict): The settings to use for the scan.
|
||||
Returns:
|
||||
dict: The secrets found in the
|
||||
Raises:
|
||||
@@ -107,36 +141,11 @@ def detect_secrets_scan(
|
||||
|
||||
secrets = SecretsCollection()
|
||||
|
||||
if not detect_secrets_plugins:
|
||||
detect_secrets_plugins = default_detect_secrets_plugins
|
||||
|
||||
settings = {
|
||||
"plugins_used": [
|
||||
{"name": "ArtifactoryDetector"},
|
||||
{"name": "AWSKeyDetector"},
|
||||
{"name": "AzureStorageKeyDetector"},
|
||||
{"name": "BasicAuthDetector"},
|
||||
{"name": "CloudantDetector"},
|
||||
{"name": "DiscordBotTokenDetector"},
|
||||
{"name": "GitHubTokenDetector"},
|
||||
{"name": "GitLabTokenDetector"},
|
||||
{"name": "Base64HighEntropyString", "limit": 6.0},
|
||||
{"name": "HexHighEntropyString", "limit": 3.0},
|
||||
{"name": "IbmCloudIamDetector"},
|
||||
{"name": "IbmCosHmacDetector"},
|
||||
# {"name": "IPPublicDetector"}, https://github.com/Yelp/detect-secrets/pull/885
|
||||
{"name": "JwtTokenDetector"},
|
||||
{"name": "KeywordDetector"},
|
||||
{"name": "MailchimpDetector"},
|
||||
{"name": "NpmDetector"},
|
||||
{"name": "OpenAIDetector"},
|
||||
{"name": "PrivateKeyDetector"},
|
||||
{"name": "PypiTokenDetector"},
|
||||
{"name": "SendGridDetector"},
|
||||
{"name": "SlackDetector"},
|
||||
{"name": "SoftlayerDetector"},
|
||||
{"name": "SquareOAuthDetector"},
|
||||
{"name": "StripeDetector"},
|
||||
# {"name": "TelegramBotTokenDetector"}, https://github.com/Yelp/detect-secrets/pull/878
|
||||
{"name": "TwilioKeyDetector"},
|
||||
],
|
||||
"plugins_used": detect_secrets_plugins,
|
||||
"filters_used": [
|
||||
{"path": "detect_secrets.filters.common.is_invalid_file"},
|
||||
{"path": "detect_secrets.filters.common.is_known_false_positive"},
|
||||
@@ -144,6 +153,7 @@ def detect_secrets_scan(
|
||||
{"path": "detect_secrets.filters.heuristic.is_potential_secret"},
|
||||
],
|
||||
}
|
||||
|
||||
if excluded_secrets and len(excluded_secrets) > 0:
|
||||
settings["filters_used"].append(
|
||||
{
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -112,6 +113,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -351,6 +353,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -472,6 +475,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -517,6 +521,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -844,6 +849,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -889,6 +895,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -1015,6 +1022,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -1060,6 +1068,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -1116,6 +1125,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -1158,6 +1168,7 @@
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ap-southeast-5",
|
||||
"ap-southeast-7",
|
||||
"ca-central-1",
|
||||
"ca-west-1",
|
||||
"eu-central-1",
|
||||
@@ -1171,6 +1182,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -1248,6 +1260,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -1807,6 +1820,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -1852,6 +1866,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -1984,6 +1999,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -2052,6 +2068,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -2611,6 +2628,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -2941,6 +2959,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3132,6 +3151,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3192,6 +3212,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3237,6 +3258,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3399,6 +3421,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3444,6 +3467,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3489,6 +3513,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3534,6 +3559,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3579,6 +3605,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3634,6 +3661,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3679,6 +3707,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3724,6 +3753,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3769,6 +3799,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3782,20 +3813,6 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"elastic-inference": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"eu-west-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"elasticache": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -3825,6 +3842,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3923,6 +3941,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -3968,6 +3987,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -4013,6 +4033,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -4155,6 +4176,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -4200,6 +4222,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -4245,6 +4268,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -4307,6 +4331,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -5016,6 +5041,7 @@
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ap-southeast-5",
|
||||
"ca-central-1",
|
||||
"ca-west-1",
|
||||
"eu-central-1",
|
||||
@@ -5089,6 +5115,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -5221,6 +5248,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -5663,6 +5691,13 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"iotthingsgraph": {
|
||||
"regions": {
|
||||
"aws": [],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"iottwinmaker": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -5878,6 +5913,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -5996,6 +6032,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -6085,6 +6122,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -6422,6 +6460,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -6694,6 +6733,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -7558,6 +7598,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -7635,6 +7676,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -7654,6 +7696,7 @@
|
||||
"opensearchserverless": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-east-1",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-south-1",
|
||||
@@ -7662,6 +7705,7 @@
|
||||
"ca-central-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
"eu-north-1",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
@@ -7774,6 +7818,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -7922,6 +7967,23 @@
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"pcs": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"ap-northeast-1",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"eu-central-1",
|
||||
"eu-north-1",
|
||||
"eu-west-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": []
|
||||
}
|
||||
},
|
||||
"personalize": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
@@ -7972,6 +8034,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -8188,6 +8251,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -8361,6 +8425,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -8406,6 +8471,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -8451,6 +8517,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -8527,6 +8594,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -8558,6 +8626,7 @@
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ap-southeast-5",
|
||||
"ap-southeast-7",
|
||||
"ca-central-1",
|
||||
"ca-west-1",
|
||||
"eu-central-1",
|
||||
@@ -8571,6 +8640,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -8760,6 +8830,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -8805,6 +8876,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -8852,6 +8924,8 @@
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ap-southeast-5",
|
||||
"ap-southeast-7",
|
||||
"ca-central-1",
|
||||
"ca-west-1",
|
||||
"eu-central-1",
|
||||
@@ -8865,6 +8939,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -8948,6 +9023,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -8990,6 +9066,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -9093,6 +9170,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -9172,6 +9250,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -9597,6 +9676,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -9766,6 +9846,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -9893,6 +9974,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -10207,6 +10289,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -10268,6 +10351,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -10313,6 +10397,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -10509,6 +10594,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -10598,6 +10684,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -10643,6 +10730,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -10699,6 +10787,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -10744,6 +10833,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -10997,6 +11087,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -11068,6 +11159,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -11233,6 +11325,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -11314,6 +11407,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
@@ -11662,6 +11756,7 @@
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
|
||||
+1
-5
@@ -8,11 +8,7 @@ class accessanalyzer_enabled(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for analyzer in accessanalyzer_client.analyzers:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = analyzer.region
|
||||
report.resource_id = analyzer.name
|
||||
report.resource_arn = analyzer.arn
|
||||
report.resource_tags = analyzer.tags
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=analyzer)
|
||||
if analyzer.status == "ACTIVE":
|
||||
report.status = "PASS"
|
||||
report.status_extended = (
|
||||
|
||||
+3
-8
@@ -8,14 +8,11 @@ class accessanalyzer_enabled_without_findings(Check):
|
||||
def execute(self):
|
||||
findings = []
|
||||
for analyzer in accessanalyzer_client.analyzers:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = analyzer.region
|
||||
report = Check_Report_AWS(metadata=self.metadata(), resource=analyzer)
|
||||
if analyzer.status == "ACTIVE":
|
||||
report.status = "PASS"
|
||||
report.status_extended = f"IAM Access Analyzer {analyzer.name} does not have active findings."
|
||||
report.resource_id = analyzer.name
|
||||
report.resource_arn = analyzer.arn
|
||||
report.resource_tags = analyzer.tags
|
||||
|
||||
if len(analyzer.findings) != 0:
|
||||
active_finding_counter = 0
|
||||
for finding in analyzer.findings:
|
||||
@@ -25,9 +22,7 @@ class accessanalyzer_enabled_without_findings(Check):
|
||||
if active_finding_counter > 0:
|
||||
report.status = "FAIL"
|
||||
report.status_extended = f"IAM Access Analyzer {analyzer.name} has {active_finding_counter} active findings."
|
||||
report.resource_id = analyzer.name
|
||||
report.resource_arn = analyzer.arn
|
||||
report.resource_tags = analyzer.tags
|
||||
|
||||
findings.append(report)
|
||||
|
||||
return findings
|
||||
|
||||
@@ -8,7 +8,6 @@ from prowler.lib.scan_filters.scan_filters import is_resource_filtered
|
||||
from prowler.providers.aws.lib.service.service import AWSService
|
||||
|
||||
|
||||
################## AccessAnalyzer
|
||||
class AccessAnalyzer(AWSService):
|
||||
def __init__(self, provider):
|
||||
# Call AWSService's __init__
|
||||
|
||||
+3
-1
@@ -6,7 +6,9 @@ from prowler.providers.aws.services.account.account_client import account_client
|
||||
|
||||
class account_maintain_current_contact_details(Check):
|
||||
def execute(self):
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report = Check_Report_AWS(
|
||||
metadata=self.metadata(), resource=account_client.contact_base
|
||||
)
|
||||
report.region = account_client.region
|
||||
report.resource_id = account_client.audited_account
|
||||
report.resource_arn = account_client.audited_account_arn
|
||||
|
||||
+4
-2
@@ -8,10 +8,12 @@ class account_maintain_different_contact_details_to_security_billing_and_operati
|
||||
def execute(self):
|
||||
findings = []
|
||||
if account_client.contact_base:
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report.region = account_client.region
|
||||
report = Check_Report_AWS(
|
||||
metadata=self.metadata(), resource=account_client.contact_base
|
||||
)
|
||||
report.resource_id = account_client.audited_account
|
||||
report.resource_arn = account_client.audited_account_arn
|
||||
report.region = account_client.region
|
||||
|
||||
if (
|
||||
len(account_client.contact_phone_numbers)
|
||||
|
||||
+3
-1
@@ -6,7 +6,9 @@ from prowler.providers.aws.services.account.account_client import account_client
|
||||
|
||||
class account_security_contact_information_is_registered(Check):
|
||||
def execute(self):
|
||||
report = Check_Report_AWS(self.metadata())
|
||||
report = Check_Report_AWS(
|
||||
metadata=self.metadata(), resource=account_client.contact_base
|
||||
)
|
||||
report.region = account_client.region
|
||||
report.resource_id = account_client.audited_account
|
||||
report.resource_arn = account_client.audited_account_arn
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user