mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-03-30 03:49:48 +00:00
Compare commits
147 Commits
v5.6
...
PRWLR-5956
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a14e21553 | ||
|
|
5f54377407 | ||
|
|
552aa64741 | ||
|
|
d64f611f51 | ||
|
|
a96cc92d77 | ||
|
|
3858cccc41 | ||
|
|
072828512a | ||
|
|
a73ffe5642 | ||
|
|
8e784a5b6d | ||
|
|
1b6f9332f1 | ||
|
|
db8b472729 | ||
|
|
867b371522 | ||
|
|
c0d7c9fc7d | ||
|
|
bb4685cf90 | ||
|
|
6a95426749 | ||
|
|
ef6af8e84d | ||
|
|
763130f253 | ||
|
|
1256c040e9 | ||
|
|
0995c7a845 | ||
|
|
18b7b48a99 | ||
|
|
627c11503f | ||
|
|
712ba84f06 | ||
|
|
5186e029b3 | ||
|
|
5bfaedf903 | ||
|
|
5061da6897 | ||
|
|
63f8186bd6 | ||
|
|
c159a28016 | ||
|
|
d70c71c903 | ||
|
|
53c571c289 | ||
|
|
a3c7846cd9 | ||
|
|
82a1b1c921 | ||
|
|
bf2210d0f4 | ||
|
|
8f0772cb94 | ||
|
|
5b57079ecd | ||
|
|
350d759517 | ||
|
|
edd793c9f5 | ||
|
|
545c2dc685 | ||
|
|
84955c066c | ||
|
|
06dd03b170 | ||
|
|
47bc2ed2dc | ||
|
|
44281afc54 | ||
|
|
4d2859d145 | ||
|
|
45d44a1669 | ||
|
|
ddd83b340e | ||
|
|
ccdb54d7c3 | ||
|
|
bcc246d950 | ||
|
|
62139e252a | ||
|
|
86950c3a0a | ||
|
|
f4865ef68d | ||
|
|
ea7209e7ae | ||
|
|
998c551cf3 | ||
|
|
e6f29b0116 | ||
|
|
eb90bb39dc | ||
|
|
ad189b35ad | ||
|
|
7d2989a233 | ||
|
|
862137ae7d | ||
|
|
c86e082d9a | ||
|
|
80fe048f97 | ||
|
|
f2bffb3ce7 | ||
|
|
cbe2f9eef8 | ||
|
|
688f41f570 | ||
|
|
a29197637e | ||
|
|
7a2712a37f | ||
|
|
189f5cfd8c | ||
|
|
e509480892 | ||
|
|
7f7955351a | ||
|
|
46f1db21a8 | ||
|
|
fbe7bc6951 | ||
|
|
f658507847 | ||
|
|
374078683b | ||
|
|
114c4e0886 | ||
|
|
67c62766d4 | ||
|
|
3f2947158d | ||
|
|
278a7cb356 | ||
|
|
890158a79c | ||
|
|
4dc1602b77 | ||
|
|
bbba0abac9 | ||
|
|
d04fd807c6 | ||
|
|
3456df4cf1 | ||
|
|
f56aaa791e | ||
|
|
465a758770 | ||
|
|
0f7c0c1b2c | ||
|
|
bf8d10b6f6 | ||
|
|
20d04553d6 | ||
|
|
b56d62e3c4 | ||
|
|
9a332dcba1 | ||
|
|
166d9f8823 | ||
|
|
42f5eed75f | ||
|
|
01a7db18dd | ||
|
|
d4507465a3 | ||
|
|
3ac92ed10a | ||
|
|
43c76ca85c | ||
|
|
54d87fa96a | ||
|
|
f041f17268 | ||
|
|
31c80a6967 | ||
|
|
783ce136f4 | ||
|
|
f829145781 | ||
|
|
389337f8cd | ||
|
|
a0713c2d66 | ||
|
|
f94d3cbce4 | ||
|
|
8d8994b468 | ||
|
|
784a9097a5 | ||
|
|
b9601626e3 | ||
|
|
dc80b011f2 | ||
|
|
ee7d32d460 | ||
|
|
43fd9ee94e | ||
|
|
8821a91f3f | ||
|
|
98d9256f92 | ||
|
|
b35495eaa7 | ||
|
|
74d6b614b3 | ||
|
|
dd63c16a74 | ||
|
|
4280266a96 | ||
|
|
b1f02098ff | ||
|
|
95189b574a | ||
|
|
c5d23503bf | ||
|
|
77950f6069 | ||
|
|
ec5f2b3753 | ||
|
|
9e7104fb7f | ||
|
|
6b3b6ca45e | ||
|
|
20b8b0b24e | ||
|
|
4e11540458 | ||
|
|
ee87f2676d | ||
|
|
74a90aab98 | ||
|
|
48ff9a5100 | ||
|
|
3dfd578ee5 | ||
|
|
0db46cdc81 | ||
|
|
fdac58d031 | ||
|
|
df9d4ce856 | ||
|
|
e6ae4e97e8 | ||
|
|
10a4c28922 | ||
|
|
8a828c6e51 | ||
|
|
d7b40905ff | ||
|
|
f9a3b5f3cd | ||
|
|
b73b89242f | ||
|
|
23a0f6e8de | ||
|
|
87967abc3f | ||
|
|
ce60c286dc | ||
|
|
90fd9b0eb8 | ||
|
|
ca262a6797 | ||
|
|
c056d39775 | ||
|
|
1c4426ea4b | ||
|
|
36520bd7a1 | ||
|
|
badf0ace76 | ||
|
|
f1f61249e0 | ||
|
|
b371cac18c | ||
|
|
1846535d8d | ||
|
|
d7d9118b9b |
17
.env
17
.env
@@ -30,6 +30,23 @@ VALKEY_HOST=valkey
|
||||
VALKEY_PORT=6379
|
||||
VALKEY_DB=0
|
||||
|
||||
# API scan settings
|
||||
# The AWS access key to be used when uploading scan artifacts to an S3 bucket
|
||||
# If left empty, default AWS credentials resolution behavior will be used
|
||||
ARTIFACTS_AWS_ACCESS_KEY_ID=""
|
||||
|
||||
# The AWS secret key to be used when uploading scan artifacts to an S3 bucket
|
||||
ARTIFACTS_AWS_SECRET_ACCESS_KEY=""
|
||||
|
||||
# An optional AWS session token
|
||||
ARTIFACTS_AWS_SESSION_TOKEN=""
|
||||
|
||||
# The AWS region where your S3 bucket is located (e.g., "us-east-1")
|
||||
ARTIFACTS_AWS_DEFAULT_REGION=""
|
||||
|
||||
# The name of the S3 bucket where scan artifacts should be stored
|
||||
ARTIFACTS_AWS_S3_OUTPUT_BUCKET=""
|
||||
|
||||
# Django settings
|
||||
DJANGO_ALLOWED_HOSTS=localhost,127.0.0.1,prowler-api
|
||||
DJANGO_BIND_ADDRESS=0.0.0.0
|
||||
|
||||
3
.github/pull_request_template.md
vendored
3
.github/pull_request_template.md
vendored
@@ -15,7 +15,8 @@ Please include a summary of the change and which issue is fixed. List any depend
|
||||
- [ ] Review if the code is being covered by tests.
|
||||
- [ ] Review if code is being documented following this specification https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings
|
||||
- [ ] Review if backport is needed.
|
||||
- [ ] Review if is needed to change the [Readme.md](https://github.com/prowler-cloud/prowler/blob/master/README.md)
|
||||
|
||||
### License
|
||||
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
|
||||
|
||||
2
.github/workflows/find-secrets.yml
vendored
2
.github/workflows/find-secrets.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: TruffleHog OSS
|
||||
uses: trufflesecurity/trufflehog@v3.88.2
|
||||
uses: trufflesecurity/trufflehog@v3.88.4
|
||||
with:
|
||||
path: ./
|
||||
base: ${{ github.event.repository.default_branch }}
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -45,6 +45,7 @@ junit-reports/
|
||||
# Terraform
|
||||
.terraform*
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
|
||||
# .env
|
||||
ui/.env*
|
||||
|
||||
@@ -27,6 +27,7 @@ repos:
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
exclude: contrib
|
||||
|
||||
## PYTHON
|
||||
- repo: https://github.com/myint/autoflake
|
||||
rev: v2.3.1
|
||||
@@ -61,8 +62,25 @@ repos:
|
||||
rev: 1.8.0
|
||||
hooks:
|
||||
- id: poetry-check
|
||||
name: API - poetry-check
|
||||
args: ["--directory=./api"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-lock
|
||||
args: ["--no-update"]
|
||||
name: API - poetry-lock
|
||||
args: ["--no-update", "--directory=./api"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-check
|
||||
name: SDK - poetry-check
|
||||
args: ["--directory=./"]
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-lock
|
||||
name: SDK - poetry-lock
|
||||
args: ["--no-update", "--directory=./"]
|
||||
pass_filenames: false
|
||||
|
||||
|
||||
- repo: https://github.com/hadolint/hadolint
|
||||
rev: v2.13.0-beta
|
||||
|
||||
10
README.md
10
README.md
@@ -71,10 +71,12 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe
|
||||
|
||||
| Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/misc/#categories) |
|
||||
|---|---|---|---|---|
|
||||
| AWS | 561 | 81 -> `prowler aws --list-services` | 30 -> `prowler aws --list-compliance` | 9 -> `prowler aws --list-categories` |
|
||||
| GCP | 77 | 13 -> `prowler gcp --list-services` | 4 -> `prowler gcp --list-compliance` | 2 -> `prowler gcp --list-categories`|
|
||||
| Azure | 139 | 18 -> `prowler azure --list-services` | 4 -> `prowler azure --list-compliance` | 2 -> `prowler azure --list-categories` |
|
||||
| Kubernetes | 83 | 7 -> `prowler kubernetes --list-services` | 1 -> `prowler kubernetes --list-compliance` | 7 -> `prowler kubernetes --list-categories` |
|
||||
| AWS | 564 | 82 | 30 | 10 |
|
||||
| GCP | 77 | 13 | 4 | 3 |
|
||||
| Azure | 140 | 18 | 5 | 3 |
|
||||
| Kubernetes | 83 | 7 | 2 | 7 |
|
||||
|
||||
> You can list the checks, services, compliance frameworks and categories with `prowler <provider> --list-checks`, `prowler <provider> --list-services`, `prowler <provider> --list-compliance` and `prowler <provider> --list-categories`.
|
||||
|
||||
# 💻 Installation
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ start_prod_server() {
|
||||
|
||||
start_worker() {
|
||||
echo "Starting the worker..."
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans -E
|
||||
poetry run python -m celery -A config.celery worker -l "${DJANGO_LOGGING_LEVEL:-info}" -Q celery,scans -E --max-tasks-per-child 1
|
||||
}
|
||||
|
||||
start_worker_beat() {
|
||||
|
||||
845
api/poetry.lock
generated
845
api/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -8,11 +8,11 @@ description = "Prowler's API (Django/DRF)"
|
||||
license = "Apache-2.0"
|
||||
name = "prowler-api"
|
||||
package-mode = false
|
||||
version = "1.1.0"
|
||||
version = "1.4.0"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
celery = {extras = ["pytest"], version = "^5.4.0"}
|
||||
django = "5.1.4"
|
||||
django = "5.1.5"
|
||||
django-celery-beat = "^2.7.0"
|
||||
django-celery-results = "^2.5.1"
|
||||
django-cors-headers = "4.4.0"
|
||||
@@ -27,7 +27,7 @@ drf-nested-routers = "^0.94.1"
|
||||
drf-spectacular = "0.27.2"
|
||||
drf-spectacular-jsonapi = "0.5.1"
|
||||
gunicorn = "23.0.0"
|
||||
prowler = "^5.0"
|
||||
prowler = {git = "https://github.com/prowler-cloud/prowler.git", branch = "master"}
|
||||
psycopg2-binary = "2.9.9"
|
||||
pytest-celery = {extras = ["redis"], version = "^1.0.1"}
|
||||
# Needed for prowler compatibility
|
||||
@@ -37,6 +37,7 @@ uuid6 = "2024.7.10"
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
bandit = "1.7.9"
|
||||
coverage = "7.5.4"
|
||||
django-silk = "5.3.2"
|
||||
docker = "7.1.0"
|
||||
freezegun = "1.5.1"
|
||||
mypy = "1.10.1"
|
||||
|
||||
@@ -4,13 +4,17 @@ class MainRouter:
|
||||
|
||||
def db_for_read(self, model, **hints): # noqa: F841
|
||||
model_table_name = model._meta.db_table
|
||||
if model_table_name.startswith("django_"):
|
||||
if model_table_name.startswith("django_") or model_table_name.startswith(
|
||||
"silk_"
|
||||
):
|
||||
return self.admin_db
|
||||
return None
|
||||
|
||||
def db_for_write(self, model, **hints): # noqa: F841
|
||||
model_table_name = model._meta.db_table
|
||||
if model_table_name.startswith("django_"):
|
||||
if model_table_name.startswith("django_") or model_table_name.startswith(
|
||||
"silk_"
|
||||
):
|
||||
return self.admin_db
|
||||
return None
|
||||
|
||||
|
||||
@@ -319,6 +319,28 @@ class FindingFilter(FilterSet):
|
||||
field_name="resources__type", lookup_expr="icontains"
|
||||
)
|
||||
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# resource_tag_key = CharFilter(field_name="resources__tags__key")
|
||||
# resource_tag_key__in = CharInFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_key__icontains = CharFilter(
|
||||
# field_name="resources__tags__key", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tag_value = CharFilter(field_name="resources__tags__value")
|
||||
# resource_tag_value__in = CharInFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="in"
|
||||
# )
|
||||
# resource_tag_value__icontains = CharFilter(
|
||||
# field_name="resources__tags__value", lookup_expr="icontains"
|
||||
# )
|
||||
# resource_tags = CharInFilter(
|
||||
# method="filter_resource_tag",
|
||||
# lookup_expr="in",
|
||||
# help_text="Filter by resource tags `key:value` pairs.\nMultiple values may be "
|
||||
# "separated by commas.",
|
||||
# )
|
||||
|
||||
scan = UUIDFilter(method="filter_scan_id")
|
||||
scan__in = UUIDInFilter(method="filter_scan_id_in")
|
||||
|
||||
@@ -426,6 +448,16 @@ class FindingFilter(FilterSet):
|
||||
|
||||
return queryset.filter(id__lte=end).filter(inserted_at__lte=value)
|
||||
|
||||
def filter_resource_tag(self, queryset, name, value):
|
||||
overall_query = Q()
|
||||
for key_value_pair in value:
|
||||
tag_key, tag_value = key_value_pair.split(":", 1)
|
||||
overall_query |= Q(
|
||||
resources__tags__key__icontains=tag_key,
|
||||
resources__tags__value__icontains=tag_value,
|
||||
)
|
||||
return queryset.filter(overall_query).distinct()
|
||||
|
||||
@staticmethod
|
||||
def maybe_date_to_datetime(value):
|
||||
dt = value
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.823Z",
|
||||
"updated_at": "2024-10-18T10:46:04.841Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.823Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -61,6 +62,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.855Z",
|
||||
"updated_at": "2024-10-18T10:46:04.858Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.855Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -116,6 +118,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.869Z",
|
||||
"updated_at": "2024-10-18T10:46:04.876Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.869Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -171,6 +174,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.888Z",
|
||||
"updated_at": "2024-10-18T10:46:04.892Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.888Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -226,6 +230,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.901Z",
|
||||
"updated_at": "2024-10-18T10:46:04.905Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.901Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -281,6 +286,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.915Z",
|
||||
"updated_at": "2024-10-18T10:46:04.919Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.915Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -336,6 +342,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.929Z",
|
||||
"updated_at": "2024-10-18T10:46:04.934Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.929Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -391,6 +398,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.944Z",
|
||||
"updated_at": "2024-10-18T10:46:04.947Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.944Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -446,6 +454,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.957Z",
|
||||
"updated_at": "2024-10-18T10:46:04.962Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.957Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
|
||||
"delta": "new",
|
||||
"status": "PASS",
|
||||
@@ -501,6 +510,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.971Z",
|
||||
"updated_at": "2024-10-18T10:46:04.975Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.971Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -556,6 +566,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.984Z",
|
||||
"updated_at": "2024-10-18T10:46:04.989Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.984Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -611,6 +622,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:04.999Z",
|
||||
"updated_at": "2024-10-18T10:46:05.003Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.999Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -666,6 +678,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.013Z",
|
||||
"updated_at": "2024-10-18T10:46:05.018Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.013Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -721,6 +734,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.029Z",
|
||||
"updated_at": "2024-10-18T10:46:05.033Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.029Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -776,6 +790,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.045Z",
|
||||
"updated_at": "2024-10-18T10:46:05.050Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.045Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -831,6 +846,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.061Z",
|
||||
"updated_at": "2024-10-18T10:46:05.065Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.061Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -886,6 +902,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.080Z",
|
||||
"updated_at": "2024-10-18T10:46:05.085Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.080Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -941,6 +958,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.099Z",
|
||||
"updated_at": "2024-10-18T10:46:05.104Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.099Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -996,6 +1014,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T10:46:05.115Z",
|
||||
"updated_at": "2024-10-18T10:46:05.121Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.115Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
|
||||
"delta": "new",
|
||||
"status": "FAIL",
|
||||
@@ -1051,6 +1070,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.489Z",
|
||||
"updated_at": "2024-10-18T11:16:24.506Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.823Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-south-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1106,6 +1126,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.518Z",
|
||||
"updated_at": "2024-10-18T11:16:24.521Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.855Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-3-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1161,6 +1182,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.526Z",
|
||||
"updated_at": "2024-10-18T11:16:24.529Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.869Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1216,6 +1238,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.535Z",
|
||||
"updated_at": "2024-10-18T11:16:24.538Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.888Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1271,6 +1294,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.544Z",
|
||||
"updated_at": "2024-10-18T11:16:24.546Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.901Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1326,6 +1350,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.551Z",
|
||||
"updated_at": "2024-10-18T11:16:24.554Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.915Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-south-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1381,6 +1406,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.560Z",
|
||||
"updated_at": "2024-10-18T11:16:24.562Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.929Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1436,6 +1462,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.567Z",
|
||||
"updated_at": "2024-10-18T11:16:24.569Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.944Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ca-central-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1491,6 +1518,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.573Z",
|
||||
"updated_at": "2024-10-18T11:16:24.575Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.957Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-east-1-ConsoleAnalyzer-83b66ad7-d024-454e-b851-52d11cc1cf7c",
|
||||
"delta": null,
|
||||
"status": "PASS",
|
||||
@@ -1546,6 +1574,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.580Z",
|
||||
"updated_at": "2024-10-18T11:16:24.582Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.971Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-west-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1601,6 +1630,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.587Z",
|
||||
"updated_at": "2024-10-18T11:16:24.589Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.984Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-sa-east-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1656,6 +1686,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.595Z",
|
||||
"updated_at": "2024-10-18T11:16:24.597Z",
|
||||
"first_seen_at": "2024-10-18T10:46:04.999Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-north-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1711,6 +1742,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.602Z",
|
||||
"updated_at": "2024-10-18T11:16:24.604Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.013Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-us-west-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1766,6 +1798,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.610Z",
|
||||
"updated_at": "2024-10-18T11:16:24.612Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.029Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1821,6 +1854,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.617Z",
|
||||
"updated_at": "2024-10-18T11:16:24.620Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.045Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-eu-central-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1876,6 +1910,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.625Z",
|
||||
"updated_at": "2024-10-18T11:16:24.627Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.061Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-1-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1931,6 +1966,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.632Z",
|
||||
"updated_at": "2024-10-18T11:16:24.634Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.080Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-southeast-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -1986,6 +2022,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.639Z",
|
||||
"updated_at": "2024-10-18T11:16:24.642Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.099Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-2-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -2041,6 +2078,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:24.646Z",
|
||||
"updated_at": "2024-10-18T11:16:24.648Z",
|
||||
"first_seen_at": "2024-10-18T10:46:05.115Z",
|
||||
"uid": "prowler-aws-accessanalyzer_enabled-112233445566-ap-northeast-3-112233445566",
|
||||
"delta": null,
|
||||
"status": "FAIL",
|
||||
@@ -2096,6 +2134,7 @@
|
||||
"tenant": "12646005-9067-4d2a-a098-8bb378604362",
|
||||
"inserted_at": "2024-10-18T11:16:26.033Z",
|
||||
"updated_at": "2024-10-18T11:16:26.045Z",
|
||||
"first_seen_at": "2024-10-18T11:16:26.033Z",
|
||||
"uid": "prowler-aws-account_security_contact_information_is_registered-112233445566-us-east-1-112233445566",
|
||||
"delta": "new",
|
||||
"status": "MANUAL",
|
||||
|
||||
15
api/src/backend/api/migrations/0006_findings_first_seen.py
Normal file
15
api/src/backend/api/migrations/0006_findings_first_seen.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0005_rbac_missing_admin_roles"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="finding",
|
||||
name="first_seen_at",
|
||||
field=models.DateTimeField(editable=False, null=True),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,25 @@
|
||||
# Generated by Django 5.1.5 on 2025-01-28 15:03
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
("api", "0006_findings_first_seen"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddIndex(
|
||||
model_name="scan",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "provider_id", "state", "inserted_at"],
|
||||
name="scans_prov_state_insert_idx",
|
||||
),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name="scansummary",
|
||||
index=models.Index(
|
||||
fields=["tenant_id", "scan_id"], name="scan_summaries_tenant_scan_idx"
|
||||
),
|
||||
),
|
||||
]
|
||||
@@ -0,0 +1,64 @@
|
||||
import json
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Scan, StateChoices
|
||||
|
||||
|
||||
def migrate_daily_scheduled_scan_tasks(apps, schema_editor):
|
||||
for daily_scheduled_scan_task in PeriodicTask.objects.filter(
|
||||
task="scan-perform-scheduled"
|
||||
):
|
||||
task_kwargs = json.loads(daily_scheduled_scan_task.kwargs)
|
||||
tenant_id = task_kwargs["tenant_id"]
|
||||
provider_id = task_kwargs["provider_id"]
|
||||
|
||||
current_time = datetime.now(timezone.utc)
|
||||
scheduled_time_today = datetime.combine(
|
||||
current_time.date(),
|
||||
daily_scheduled_scan_task.start_time.time(),
|
||||
tzinfo=timezone.utc,
|
||||
)
|
||||
|
||||
if current_time < scheduled_time_today:
|
||||
next_scan_date = scheduled_time_today
|
||||
else:
|
||||
next_scan_date = scheduled_time_today + timedelta(days=1)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
state=StateChoices.SCHEDULED,
|
||||
scheduled_at=next_scan_date,
|
||||
scheduler_task_id=daily_scheduled_scan_task.id,
|
||||
)
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
atomic = False
|
||||
|
||||
dependencies = [
|
||||
("api", "0007_scan_and_scan_summaries_indexes"),
|
||||
("django_celery_beat", "0019_alter_periodictasks_options"),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name="scan",
|
||||
name="scheduler_task",
|
||||
field=models.ForeignKey(
|
||||
blank=True,
|
||||
null=True,
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
to="django_celery_beat.periodictask",
|
||||
),
|
||||
),
|
||||
migrations.RunPython(migrate_daily_scheduled_scan_tasks),
|
||||
]
|
||||
@@ -11,6 +11,7 @@ from django.core.validators import MinLengthValidator
|
||||
from django.db import models
|
||||
from django.db.models import Q
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from django_celery_results.models import TaskResult
|
||||
from psqlextra.manager import PostgresManager
|
||||
from psqlextra.models import PostgresPartitionedModel
|
||||
@@ -410,6 +411,9 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
started_at = models.DateTimeField(null=True, blank=True)
|
||||
completed_at = models.DateTimeField(null=True, blank=True)
|
||||
next_scan_at = models.DateTimeField(null=True, blank=True)
|
||||
scheduler_task = models.ForeignKey(
|
||||
PeriodicTask, on_delete=models.CASCADE, null=True, blank=True
|
||||
)
|
||||
# TODO: mutelist foreign key
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
@@ -428,6 +432,10 @@ class Scan(RowLevelSecurityProtectedModel):
|
||||
fields=["provider", "state", "trigger", "scheduled_at"],
|
||||
name="scans_prov_state_trig_sche_idx",
|
||||
),
|
||||
models.Index(
|
||||
fields=["tenant_id", "provider_id", "state", "inserted_at"],
|
||||
name="scans_prov_state_insert_idx",
|
||||
),
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
@@ -615,6 +623,7 @@ class Finding(PostgresPartitionedModel, RowLevelSecurityProtectedModel):
|
||||
id = models.UUIDField(primary_key=True, default=uuid7, editable=False)
|
||||
inserted_at = models.DateTimeField(auto_now_add=True, editable=False)
|
||||
updated_at = models.DateTimeField(auto_now=True, editable=False)
|
||||
first_seen_at = models.DateTimeField(editable=False, null=True)
|
||||
|
||||
uid = models.CharField(max_length=300)
|
||||
delta = FindingDeltaEnumField(
|
||||
@@ -1099,6 +1108,12 @@ class ScanSummary(RowLevelSecurityProtectedModel):
|
||||
statements=["SELECT", "INSERT", "UPDATE", "DELETE"],
|
||||
),
|
||||
]
|
||||
indexes = [
|
||||
models.Index(
|
||||
fields=["tenant_id", "scan_id"],
|
||||
name="scan_summaries_tenant_scan_idx",
|
||||
)
|
||||
]
|
||||
|
||||
class JSONAPIMeta:
|
||||
resource_name = "scan-summaries"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Prowler API
|
||||
version: 1.1.0
|
||||
version: 1.4.0
|
||||
description: |-
|
||||
Prowler API specification.
|
||||
|
||||
@@ -257,6 +257,7 @@ paths:
|
||||
- raw_result
|
||||
- inserted_at
|
||||
- updated_at
|
||||
- first_seen_at
|
||||
- url
|
||||
- scan
|
||||
- resources
|
||||
@@ -668,8 +669,6 @@ paths:
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- id
|
||||
- -id
|
||||
- status
|
||||
- -status
|
||||
- severity
|
||||
@@ -715,6 +714,7 @@ paths:
|
||||
- raw_result
|
||||
- inserted_at
|
||||
- updated_at
|
||||
- first_seen_at
|
||||
- url
|
||||
- scan
|
||||
- resources
|
||||
@@ -1150,8 +1150,430 @@ paths:
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- id
|
||||
- -id
|
||||
- status
|
||||
- -status
|
||||
- severity
|
||||
- -severity
|
||||
- check_id
|
||||
- -check_id
|
||||
- inserted_at
|
||||
- -inserted_at
|
||||
- updated_at
|
||||
- -updated_at
|
||||
explode: false
|
||||
tags:
|
||||
- Finding
|
||||
security:
|
||||
- jwtAuth: []
|
||||
deprecated: true
|
||||
responses:
|
||||
'200':
|
||||
content:
|
||||
application/vnd.api+json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FindingDynamicFilterResponse'
|
||||
description: ''
|
||||
/api/v1/findings/metadata:
|
||||
get:
|
||||
operationId: findings_metadata_retrieve
|
||||
description: Fetch unique metadata values from a set of findings. This is useful
|
||||
for dynamic filtering.
|
||||
summary: Retrieve metadata values from findings
|
||||
parameters:
|
||||
- in: query
|
||||
name: fields[findings-metadata]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- services
|
||||
- regions
|
||||
- resource_types
|
||||
description: endpoint return only specific fields in the response on a per-type
|
||||
basis by including a fields[TYPE] query parameter.
|
||||
explode: false
|
||||
- in: query
|
||||
name: filter[check_id]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[check_id__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[check_id__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[delta]
|
||||
schema:
|
||||
type: string
|
||||
nullable: true
|
||||
enum:
|
||||
- changed
|
||||
- new
|
||||
description: |-
|
||||
* `new` - New
|
||||
* `changed` - Changed
|
||||
- in: query
|
||||
name: filter[delta__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[id]
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
- in: query
|
||||
name: filter[id__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
format: uuid
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[impact]
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- critical
|
||||
- high
|
||||
- informational
|
||||
- low
|
||||
- medium
|
||||
description: |-
|
||||
* `critical` - Critical
|
||||
* `high` - High
|
||||
* `medium` - Medium
|
||||
* `low` - Low
|
||||
* `informational` - Informational
|
||||
- in: query
|
||||
name: filter[impact__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[inserted_at]
|
||||
schema:
|
||||
type: string
|
||||
format: date
|
||||
- in: query
|
||||
name: filter[inserted_at__date]
|
||||
schema:
|
||||
type: string
|
||||
format: date
|
||||
- in: query
|
||||
name: filter[inserted_at__gte]
|
||||
schema:
|
||||
type: string
|
||||
format: date
|
||||
- in: query
|
||||
name: filter[inserted_at__lte]
|
||||
schema:
|
||||
type: string
|
||||
format: date
|
||||
- in: query
|
||||
name: filter[provider]
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
- in: query
|
||||
name: filter[provider__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
format: uuid
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[provider_alias]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[provider_alias__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[provider_alias__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[provider_type]
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
description: |-
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
- in: query
|
||||
name: filter[provider_type__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
- kubernetes
|
||||
description: |-
|
||||
Multiple values may be separated by commas.
|
||||
|
||||
* `aws` - AWS
|
||||
* `azure` - Azure
|
||||
* `gcp` - GCP
|
||||
* `kubernetes` - Kubernetes
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[provider_uid]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[provider_uid__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[provider_uid__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[region]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[region__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[region__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[resource_name]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_name__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_name__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[resource_type]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_type__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_type__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[resource_uid]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_uid__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[resource_uid__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[resources]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
format: uuid
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[scan]
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
- in: query
|
||||
name: filter[scan__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
format: uuid
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- name: filter[search]
|
||||
required: false
|
||||
in: query
|
||||
description: A search term.
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[service]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[service__icontains]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[service__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[severity]
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- critical
|
||||
- high
|
||||
- informational
|
||||
- low
|
||||
- medium
|
||||
description: |-
|
||||
* `critical` - Critical
|
||||
* `high` - High
|
||||
* `medium` - Medium
|
||||
* `low` - Low
|
||||
* `informational` - Informational
|
||||
- in: query
|
||||
name: filter[severity__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[status]
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- FAIL
|
||||
- MANUAL
|
||||
- MUTED
|
||||
- PASS
|
||||
description: |-
|
||||
* `FAIL` - Fail
|
||||
* `PASS` - Pass
|
||||
* `MANUAL` - Manual
|
||||
* `MUTED` - Muted
|
||||
- in: query
|
||||
name: filter[status__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[uid]
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: filter[uid__in]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Multiple values may be separated by commas.
|
||||
explode: false
|
||||
style: form
|
||||
- in: query
|
||||
name: filter[updated_at]
|
||||
schema:
|
||||
type: string
|
||||
format: date
|
||||
- in: query
|
||||
name: filter[updated_at__gte]
|
||||
schema:
|
||||
type: string
|
||||
format: date-time
|
||||
- in: query
|
||||
name: filter[updated_at__lte]
|
||||
schema:
|
||||
type: string
|
||||
format: date-time
|
||||
- name: sort
|
||||
required: false
|
||||
in: query
|
||||
description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
enum:
|
||||
- status
|
||||
- -status
|
||||
- severity
|
||||
@@ -1168,11 +1590,11 @@ paths:
|
||||
security:
|
||||
- jwtAuth: []
|
||||
responses:
|
||||
'201':
|
||||
'200':
|
||||
content:
|
||||
application/vnd.api+json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/OpenApiResponseResponse'
|
||||
$ref: '#/components/schemas/FindingMetadataResponse'
|
||||
description: ''
|
||||
/api/v1/invitations/accept:
|
||||
post:
|
||||
@@ -2948,9 +3370,7 @@ paths:
|
||||
- name
|
||||
- manage_users
|
||||
- manage_account
|
||||
- manage_billing
|
||||
- manage_providers
|
||||
- manage_integrations
|
||||
- manage_scans
|
||||
- permission_state
|
||||
- unlimited_visibility
|
||||
@@ -3068,12 +3488,8 @@ paths:
|
||||
- -manage_users
|
||||
- manage_account
|
||||
- -manage_account
|
||||
- manage_billing
|
||||
- -manage_billing
|
||||
- manage_providers
|
||||
- -manage_providers
|
||||
- manage_integrations
|
||||
- -manage_integrations
|
||||
- manage_scans
|
||||
- -manage_scans
|
||||
- permission_state
|
||||
@@ -3147,9 +3563,7 @@ paths:
|
||||
- name
|
||||
- manage_users
|
||||
- manage_account
|
||||
- manage_billing
|
||||
- manage_providers
|
||||
- manage_integrations
|
||||
- manage_scans
|
||||
- permission_state
|
||||
- unlimited_visibility
|
||||
@@ -3679,6 +4093,38 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ScanUpdateResponse'
|
||||
description: ''
|
||||
/api/v1/scans/{id}/report:
|
||||
get:
|
||||
operationId: scans_report_retrieve
|
||||
description: Returns a ZIP file containing the requested report
|
||||
summary: Download ZIP report
|
||||
parameters:
|
||||
- in: query
|
||||
name: fields[scans]
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
enum: []
|
||||
description: endpoint return only specific fields in the response on a per-type
|
||||
basis by including a fields[TYPE] query parameter.
|
||||
explode: false
|
||||
- in: path
|
||||
name: id
|
||||
schema:
|
||||
type: string
|
||||
format: uuid
|
||||
description: A UUID string identifying this scan.
|
||||
required: true
|
||||
tags:
|
||||
- Scan
|
||||
security:
|
||||
- jwtAuth: []
|
||||
responses:
|
||||
'200':
|
||||
description: Report obtanined successfully
|
||||
'404':
|
||||
description: Report not found
|
||||
/api/v1/schedules/daily:
|
||||
post:
|
||||
operationId: schedules_daily_create
|
||||
@@ -4825,8 +5271,8 @@ paths:
|
||||
description: ''
|
||||
delete:
|
||||
operationId: users_destroy
|
||||
description: Remove a user account from the system.
|
||||
summary: Delete a user account
|
||||
description: Remove the current user account from the system.
|
||||
summary: Delete the user account
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
@@ -5395,6 +5841,11 @@ components:
|
||||
type: string
|
||||
format: date-time
|
||||
readOnly: true
|
||||
first_seen_at:
|
||||
type: string
|
||||
format: date-time
|
||||
readOnly: true
|
||||
nullable: true
|
||||
required:
|
||||
- uid
|
||||
- status
|
||||
@@ -5458,6 +5909,89 @@ components:
|
||||
readOnly: true
|
||||
required:
|
||||
- scan
|
||||
FindingDynamicFilter:
|
||||
type: object
|
||||
required:
|
||||
- type
|
||||
- id
|
||||
additionalProperties: false
|
||||
properties:
|
||||
type:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/FindingDynamicFilterTypeEnum'
|
||||
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
|
||||
member is used to describe resource objects that share common attributes
|
||||
and relationships.
|
||||
id: {}
|
||||
attributes:
|
||||
type: object
|
||||
properties:
|
||||
services:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
regions:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
required:
|
||||
- services
|
||||
- regions
|
||||
FindingDynamicFilterResponse:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/FindingDynamicFilter'
|
||||
required:
|
||||
- data
|
||||
FindingDynamicFilterTypeEnum:
|
||||
type: string
|
||||
enum:
|
||||
- finding-dynamic-filters
|
||||
FindingMetadata:
|
||||
type: object
|
||||
required:
|
||||
- type
|
||||
- id
|
||||
additionalProperties: false
|
||||
properties:
|
||||
type:
|
||||
allOf:
|
||||
- $ref: '#/components/schemas/FindingMetadataTypeEnum'
|
||||
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
|
||||
member is used to describe resource objects that share common attributes
|
||||
and relationships.
|
||||
id: {}
|
||||
attributes:
|
||||
type: object
|
||||
properties:
|
||||
services:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
regions:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
resource_types:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
required:
|
||||
- services
|
||||
- regions
|
||||
- resource_types
|
||||
FindingMetadataResponse:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/FindingMetadata'
|
||||
required:
|
||||
- data
|
||||
FindingMetadataTypeEnum:
|
||||
type: string
|
||||
enum:
|
||||
- findings-metadata
|
||||
FindingResponse:
|
||||
type: object
|
||||
properties:
|
||||
@@ -5902,8 +6436,6 @@ components:
|
||||
- data
|
||||
description: A related resource object from type roles
|
||||
title: roles
|
||||
required:
|
||||
- roles
|
||||
InvitationUpdateResponse:
|
||||
type: object
|
||||
properties:
|
||||
@@ -5915,7 +6447,6 @@ components:
|
||||
type: object
|
||||
required:
|
||||
- type
|
||||
- id
|
||||
additionalProperties: false
|
||||
properties:
|
||||
type:
|
||||
@@ -5924,9 +6455,6 @@ components:
|
||||
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
|
||||
member is used to describe resource objects that share common attributes
|
||||
and relationships.
|
||||
id:
|
||||
type: string
|
||||
format: uuid
|
||||
attributes:
|
||||
type: object
|
||||
properties:
|
||||
@@ -6112,7 +6640,7 @@ components:
|
||||
type: integer
|
||||
fail:
|
||||
type: integer
|
||||
manual:
|
||||
muted:
|
||||
type: integer
|
||||
total:
|
||||
type: integer
|
||||
@@ -6437,8 +6965,6 @@ components:
|
||||
- data
|
||||
description: A related resource object from type roles
|
||||
title: roles
|
||||
required:
|
||||
- roles
|
||||
required:
|
||||
- data
|
||||
PatchedProviderGroupMembershipRequest:
|
||||
@@ -6644,6 +7170,9 @@ components:
|
||||
type: string
|
||||
description: The Amazon Resource Name (ARN) of the role to
|
||||
assume. Required for AWS role assumption.
|
||||
external_id:
|
||||
type: string
|
||||
description: An identifier to enhance security for role assumption.
|
||||
aws_access_key_id:
|
||||
type: string
|
||||
description: The AWS access key ID. Only required if the environment
|
||||
@@ -6662,10 +7191,6 @@ components:
|
||||
maximum: 43200
|
||||
default: 3600
|
||||
description: The duration (in seconds) for the role session.
|
||||
external_id:
|
||||
type: string
|
||||
description: An optional identifier to enhance security for
|
||||
role assumption; may be required by the role administrator.
|
||||
role_session_name:
|
||||
type: string
|
||||
description: |-
|
||||
@@ -6678,6 +7203,7 @@ components:
|
||||
pattern: ^[a-zA-Z0-9=,.@_-]+$
|
||||
required:
|
||||
- role_arn
|
||||
- external_id
|
||||
- type: object
|
||||
title: Azure Static Credentials
|
||||
properties:
|
||||
@@ -6850,12 +7376,8 @@ components:
|
||||
type: boolean
|
||||
manage_account:
|
||||
type: boolean
|
||||
manage_billing:
|
||||
type: boolean
|
||||
manage_providers:
|
||||
type: boolean
|
||||
manage_integrations:
|
||||
type: boolean
|
||||
manage_scans:
|
||||
type: boolean
|
||||
permission_state:
|
||||
@@ -7131,37 +7653,6 @@ components:
|
||||
required:
|
||||
- name
|
||||
- email
|
||||
relationships:
|
||||
type: object
|
||||
properties:
|
||||
roles:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
format: uuid
|
||||
title: Resource Identifier
|
||||
description: The identifier of the related object.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- roles
|
||||
title: Resource Type Name
|
||||
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
|
||||
member is used to describe resource objects that share
|
||||
common attributes and relationships.
|
||||
required:
|
||||
- id
|
||||
- type
|
||||
required:
|
||||
- data
|
||||
description: A related resource object from type roles
|
||||
title: roles
|
||||
required:
|
||||
- data
|
||||
Provider:
|
||||
@@ -7890,6 +8381,9 @@ components:
|
||||
type: string
|
||||
description: The Amazon Resource Name (ARN) of the role to assume.
|
||||
Required for AWS role assumption.
|
||||
external_id:
|
||||
type: string
|
||||
description: An identifier to enhance security for role assumption.
|
||||
aws_access_key_id:
|
||||
type: string
|
||||
description: The AWS access key ID. Only required if the environment
|
||||
@@ -7907,10 +8401,6 @@ components:
|
||||
maximum: 43200
|
||||
default: 3600
|
||||
description: The duration (in seconds) for the role session.
|
||||
external_id:
|
||||
type: string
|
||||
description: An optional identifier to enhance security for role
|
||||
assumption; may be required by the role administrator.
|
||||
role_session_name:
|
||||
type: string
|
||||
description: |-
|
||||
@@ -7923,6 +8413,7 @@ components:
|
||||
pattern: ^[a-zA-Z0-9=,.@_-]+$
|
||||
required:
|
||||
- role_arn
|
||||
- external_id
|
||||
- type: object
|
||||
title: Azure Static Credentials
|
||||
properties:
|
||||
@@ -8071,6 +8562,9 @@ components:
|
||||
type: string
|
||||
description: The Amazon Resource Name (ARN) of the role to
|
||||
assume. Required for AWS role assumption.
|
||||
external_id:
|
||||
type: string
|
||||
description: An identifier to enhance security for role assumption.
|
||||
aws_access_key_id:
|
||||
type: string
|
||||
description: The AWS access key ID. Only required if the environment
|
||||
@@ -8089,10 +8583,6 @@ components:
|
||||
maximum: 43200
|
||||
default: 3600
|
||||
description: The duration (in seconds) for the role session.
|
||||
external_id:
|
||||
type: string
|
||||
description: An optional identifier to enhance security for
|
||||
role assumption; may be required by the role administrator.
|
||||
role_session_name:
|
||||
type: string
|
||||
description: |-
|
||||
@@ -8105,6 +8595,7 @@ components:
|
||||
pattern: ^[a-zA-Z0-9=,.@_-]+$
|
||||
required:
|
||||
- role_arn
|
||||
- external_id
|
||||
- type: object
|
||||
title: Azure Static Credentials
|
||||
properties:
|
||||
@@ -8270,6 +8761,9 @@ components:
|
||||
type: string
|
||||
description: The Amazon Resource Name (ARN) of the role to assume.
|
||||
Required for AWS role assumption.
|
||||
external_id:
|
||||
type: string
|
||||
description: An identifier to enhance security for role assumption.
|
||||
aws_access_key_id:
|
||||
type: string
|
||||
description: The AWS access key ID. Only required if the environment
|
||||
@@ -8287,10 +8781,6 @@ components:
|
||||
maximum: 43200
|
||||
default: 3600
|
||||
description: The duration (in seconds) for the role session.
|
||||
external_id:
|
||||
type: string
|
||||
description: An optional identifier to enhance security for role
|
||||
assumption; may be required by the role administrator.
|
||||
role_session_name:
|
||||
type: string
|
||||
description: |-
|
||||
@@ -8303,6 +8793,7 @@ components:
|
||||
pattern: ^[a-zA-Z0-9=,.@_-]+$
|
||||
required:
|
||||
- role_arn
|
||||
- external_id
|
||||
- type: object
|
||||
title: Azure Static Credentials
|
||||
properties:
|
||||
@@ -8537,12 +9028,8 @@ components:
|
||||
type: boolean
|
||||
manage_account:
|
||||
type: boolean
|
||||
manage_billing:
|
||||
type: boolean
|
||||
manage_providers:
|
||||
type: boolean
|
||||
manage_integrations:
|
||||
type: boolean
|
||||
manage_scans:
|
||||
type: boolean
|
||||
permission_state:
|
||||
@@ -8670,12 +9157,8 @@ components:
|
||||
type: boolean
|
||||
manage_account:
|
||||
type: boolean
|
||||
manage_billing:
|
||||
type: boolean
|
||||
manage_providers:
|
||||
type: boolean
|
||||
manage_integrations:
|
||||
type: boolean
|
||||
manage_scans:
|
||||
type: boolean
|
||||
permission_state:
|
||||
@@ -8808,12 +9291,8 @@ components:
|
||||
type: boolean
|
||||
manage_account:
|
||||
type: boolean
|
||||
manage_billing:
|
||||
type: boolean
|
||||
manage_providers:
|
||||
type: boolean
|
||||
manage_integrations:
|
||||
type: boolean
|
||||
manage_scans:
|
||||
type: boolean
|
||||
permission_state:
|
||||
@@ -9877,37 +10356,6 @@ components:
|
||||
required:
|
||||
- name
|
||||
- email
|
||||
relationships:
|
||||
type: object
|
||||
properties:
|
||||
roles:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
format: uuid
|
||||
title: Resource Identifier
|
||||
description: The identifier of the related object.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- roles
|
||||
title: Resource Type Name
|
||||
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
|
||||
member is used to describe resource objects that share common
|
||||
attributes and relationships.
|
||||
required:
|
||||
- id
|
||||
- type
|
||||
required:
|
||||
- data
|
||||
description: A related resource object from type roles
|
||||
title: roles
|
||||
UserUpdateResponse:
|
||||
type: object
|
||||
properties:
|
||||
|
||||
@@ -261,6 +261,16 @@ class TestUserViewSet:
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
assert not User.objects.filter(id=create_test_user.id).exists()
|
||||
|
||||
def test_users_destroy_other_user(
|
||||
self, authenticated_client, create_test_user, users_fixture
|
||||
):
|
||||
user = users_fixture[2]
|
||||
response = authenticated_client.delete(
|
||||
reverse("user-detail", kwargs={"pk": str(user.id)})
|
||||
)
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert User.objects.filter(id=create_test_user.id).exists()
|
||||
|
||||
def test_users_destroy_invalid_user(self, authenticated_client, create_test_user):
|
||||
another_user = User.objects.create_user(
|
||||
password="otherpassword", email="other@example.com"
|
||||
@@ -268,7 +278,7 @@ class TestUserViewSet:
|
||||
response = authenticated_client.delete(
|
||||
reverse("user-detail", kwargs={"pk": another_user.id})
|
||||
)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
assert User.objects.filter(id=another_user.id).exists()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -2444,6 +2454,16 @@ class TestFindingViewSet:
|
||||
("search", "ec2", 2),
|
||||
# full text search on finding tags
|
||||
("search", "value2", 2),
|
||||
# Temporary disabled until we implement tag filtering in the UI
|
||||
# ("resource_tag_key", "key", 2),
|
||||
# ("resource_tag_key__in", "key,key2", 2),
|
||||
# ("resource_tag_key__icontains", "key", 2),
|
||||
# ("resource_tag_value", "value", 2),
|
||||
# ("resource_tag_value__in", "value,value2", 2),
|
||||
# ("resource_tag_value__icontains", "value", 2),
|
||||
# ("resource_tags", "key:value", 2),
|
||||
# ("resource_tags", "not:exists", 0),
|
||||
# ("resource_tags", "not:exists,key:value", 2),
|
||||
]
|
||||
),
|
||||
)
|
||||
@@ -2582,30 +2602,35 @@ class TestFindingViewSet:
|
||||
)
|
||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||
|
||||
def test_findings_services_regions_retrieve(
|
||||
self, authenticated_client, findings_fixture
|
||||
):
|
||||
def test_findings_metadata_retrieve(self, authenticated_client, findings_fixture):
|
||||
finding_1, *_ = findings_fixture
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{"filter[inserted_at]": finding_1.updated_at.strftime("%Y-%m-%d")},
|
||||
)
|
||||
data = response.json()
|
||||
|
||||
expected_services = {"ec2", "s3"}
|
||||
expected_regions = {"eu-west-1", "us-east-1"}
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# expected_tags = {"key": ["value"], "key2": ["value2"]}
|
||||
expected_resource_types = {"prowler-test"}
|
||||
|
||||
assert data["data"]["type"] == "finding-dynamic-filters"
|
||||
assert data["data"]["type"] == "findings-metadata"
|
||||
assert data["data"]["id"] is None
|
||||
assert set(data["data"]["attributes"]["services"]) == expected_services
|
||||
assert set(data["data"]["attributes"]["regions"]) == expected_regions
|
||||
assert (
|
||||
set(data["data"]["attributes"]["resource_types"]) == expected_resource_types
|
||||
)
|
||||
# assert data["data"]["attributes"]["tags"] == expected_tags
|
||||
|
||||
def test_findings_services_regions_severity_retrieve(
|
||||
def test_findings_metadata_severity_retrieve(
|
||||
self, authenticated_client, findings_fixture
|
||||
):
|
||||
finding_1, *_ = findings_fixture
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{
|
||||
"filter[severity__in]": ["low", "medium"],
|
||||
"filter[inserted_at]": finding_1.updated_at.strftime("%Y-%m-%d"),
|
||||
@@ -2615,26 +2640,36 @@ class TestFindingViewSet:
|
||||
|
||||
expected_services = {"s3"}
|
||||
expected_regions = {"eu-west-1"}
|
||||
# Temporary disabled until we implement tag filtering in the UI
|
||||
# expected_tags = {"key": ["value"], "key2": ["value2"]}
|
||||
expected_resource_types = {"prowler-test"}
|
||||
|
||||
assert data["data"]["type"] == "finding-dynamic-filters"
|
||||
assert data["data"]["type"] == "findings-metadata"
|
||||
assert data["data"]["id"] is None
|
||||
assert set(data["data"]["attributes"]["services"]) == expected_services
|
||||
assert set(data["data"]["attributes"]["regions"]) == expected_regions
|
||||
assert (
|
||||
set(data["data"]["attributes"]["resource_types"]) == expected_resource_types
|
||||
)
|
||||
# assert data["data"]["attributes"]["tags"] == expected_tags
|
||||
|
||||
def test_findings_services_regions_future_date(self, authenticated_client):
|
||||
def test_findings_metadata_future_date(self, authenticated_client):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{"filter[inserted_at]": "2048-01-01"},
|
||||
)
|
||||
data = response.json()
|
||||
assert data["data"]["type"] == "finding-dynamic-filters"
|
||||
assert data["data"]["type"] == "findings-metadata"
|
||||
assert data["data"]["id"] is None
|
||||
assert data["data"]["attributes"]["services"] == []
|
||||
assert data["data"]["attributes"]["regions"] == []
|
||||
# Temporary disabled until we implement tag filtering in the UI
|
||||
# assert data["data"]["attributes"]["tags"] == {}
|
||||
assert data["data"]["attributes"]["resource_types"] == []
|
||||
|
||||
def test_findings_services_regions_invalid_date(self, authenticated_client):
|
||||
def test_findings_metadata_invalid_date(self, authenticated_client):
|
||||
response = authenticated_client.get(
|
||||
reverse("finding-findings_services_regions"),
|
||||
reverse("finding-metadata"),
|
||||
{"filter[inserted_at]": "2048-01-011"},
|
||||
)
|
||||
assert response.json() == {
|
||||
@@ -4249,18 +4284,15 @@ class TestOverviewViewSet:
|
||||
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
|
||||
|
||||
def test_overview_providers_list(
|
||||
self, authenticated_client, findings_fixture, resources_fixture
|
||||
self, authenticated_client, scan_summaries_fixture, resources_fixture
|
||||
):
|
||||
response = authenticated_client.get(reverse("overview-providers"))
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
# Only findings from one provider
|
||||
assert len(response.json()["data"]) == 1
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["total"] == len(
|
||||
findings_fixture
|
||||
)
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["pass"] == 0
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["fail"] == 2
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["manual"] == 0
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["total"] == 4
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["pass"] == 2
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["fail"] == 1
|
||||
assert response.json()["data"][0]["attributes"]["findings"]["muted"] == 1
|
||||
assert response.json()["data"][0]["attributes"]["resources"]["total"] == len(
|
||||
resources_fixture
|
||||
)
|
||||
|
||||
@@ -819,6 +819,14 @@ class ScanTaskSerializer(RLSSerializer):
|
||||
]
|
||||
|
||||
|
||||
class ScanReportSerializer(RLSSerializer):
|
||||
class Meta:
|
||||
model = Scan
|
||||
fields = [
|
||||
"id",
|
||||
]
|
||||
|
||||
|
||||
class ResourceTagSerializer(RLSSerializer):
|
||||
"""
|
||||
Serializer for the ResourceTag model
|
||||
@@ -905,6 +913,7 @@ class FindingSerializer(RLSSerializer):
|
||||
"raw_result",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"first_seen_at",
|
||||
"url",
|
||||
# Relationships
|
||||
"scan",
|
||||
@@ -917,6 +926,7 @@ class FindingSerializer(RLSSerializer):
|
||||
}
|
||||
|
||||
|
||||
# To be removed when the related endpoint is removed as well
|
||||
class FindingDynamicFilterSerializer(serializers.Serializer):
|
||||
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
@@ -925,6 +935,19 @@ class FindingDynamicFilterSerializer(serializers.Serializer):
|
||||
resource_name = "finding-dynamic-filters"
|
||||
|
||||
|
||||
class FindingMetadataSerializer(serializers.Serializer):
|
||||
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
|
||||
resource_types = serializers.ListField(
|
||||
child=serializers.CharField(), allow_empty=True
|
||||
)
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# tags = serializers.JSONField(help_text="Tags are described as key-value pairs.")
|
||||
|
||||
class Meta:
|
||||
resource_name = "findings-metadata"
|
||||
|
||||
|
||||
# Provider secrets
|
||||
class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
|
||||
@staticmethod
|
||||
@@ -997,7 +1020,7 @@ class KubernetesProviderSecret(serializers.Serializer):
|
||||
|
||||
class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
role_arn = serializers.CharField()
|
||||
external_id = serializers.CharField(required=False)
|
||||
external_id = serializers.CharField()
|
||||
role_session_name = serializers.CharField(required=False)
|
||||
session_duration = serializers.IntegerField(
|
||||
required=False, min_value=900, max_value=43200
|
||||
@@ -1044,6 +1067,10 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"description": "The Amazon Resource Name (ARN) of the role to assume. Required for AWS role "
|
||||
"assumption.",
|
||||
},
|
||||
"external_id": {
|
||||
"type": "string",
|
||||
"description": "An identifier to enhance security for role assumption.",
|
||||
},
|
||||
"aws_access_key_id": {
|
||||
"type": "string",
|
||||
"description": "The AWS access key ID. Only required if the environment lacks pre-configured "
|
||||
@@ -1065,11 +1092,6 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"default": 3600,
|
||||
"description": "The duration (in seconds) for the role session.",
|
||||
},
|
||||
"external_id": {
|
||||
"type": "string",
|
||||
"description": "An optional identifier to enhance security for role assumption; may be "
|
||||
"required by the role administrator.",
|
||||
},
|
||||
"role_session_name": {
|
||||
"type": "string",
|
||||
"description": "An identifier for the role session, useful for tracking sessions in AWS logs. "
|
||||
@@ -1083,7 +1105,7 @@ class AWSRoleAssumptionProviderSecret(serializers.Serializer):
|
||||
"pattern": "^[a-zA-Z0-9=,.@_-]+$",
|
||||
},
|
||||
},
|
||||
"required": ["role_arn"],
|
||||
"required": ["role_arn", "external_id"],
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
@@ -1722,7 +1744,7 @@ class OverviewProviderSerializer(serializers.Serializer):
|
||||
"properties": {
|
||||
"pass": {"type": "integer"},
|
||||
"fail": {"type": "integer"},
|
||||
"manual": {"type": "integer"},
|
||||
"muted": {"type": "integer"},
|
||||
"total": {"type": "integer"},
|
||||
},
|
||||
}
|
||||
@@ -1731,7 +1753,7 @@ class OverviewProviderSerializer(serializers.Serializer):
|
||||
return {
|
||||
"pass": obj["findings_passed"],
|
||||
"fail": obj["findings_failed"],
|
||||
"manual": obj["findings_manual"],
|
||||
"muted": obj["findings_muted"],
|
||||
"total": obj["total_findings"],
|
||||
}
|
||||
|
||||
|
||||
@@ -3,28 +3,28 @@ from drf_spectacular.views import SpectacularRedocView
|
||||
from rest_framework_nested import routers
|
||||
|
||||
from api.v1.views import (
|
||||
ComplianceOverviewViewSet,
|
||||
CustomTokenObtainView,
|
||||
CustomTokenRefreshView,
|
||||
FindingViewSet,
|
||||
MembershipViewSet,
|
||||
ProviderGroupViewSet,
|
||||
ProviderGroupProvidersRelationshipView,
|
||||
ProviderSecretViewSet,
|
||||
InvitationViewSet,
|
||||
InvitationAcceptViewSet,
|
||||
RoleViewSet,
|
||||
RoleProviderGroupRelationshipView,
|
||||
UserRoleRelationshipView,
|
||||
InvitationViewSet,
|
||||
MembershipViewSet,
|
||||
OverviewViewSet,
|
||||
ComplianceOverviewViewSet,
|
||||
ProviderGroupProvidersRelationshipView,
|
||||
ProviderGroupViewSet,
|
||||
ProviderSecretViewSet,
|
||||
ProviderViewSet,
|
||||
ResourceViewSet,
|
||||
RoleProviderGroupRelationshipView,
|
||||
RoleViewSet,
|
||||
ScanViewSet,
|
||||
ScheduleViewSet,
|
||||
SchemaView,
|
||||
TaskViewSet,
|
||||
TenantMembersViewSet,
|
||||
TenantViewSet,
|
||||
UserRoleRelationshipView,
|
||||
UserViewSet,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
import glob
|
||||
import os
|
||||
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError, NoCredentialsError, ParamValidationError
|
||||
from celery.result import AsyncResult
|
||||
from config.env import env
|
||||
from django.conf import settings as django_settings
|
||||
from django.contrib.postgres.aggregates import ArrayAgg
|
||||
from django.contrib.postgres.search import SearchQuery
|
||||
from django.db import transaction
|
||||
from django.db.models import Count, F, OuterRef, Prefetch, Q, Subquery, Sum
|
||||
from django.http import HttpResponse
|
||||
from django.db.models.functions import Coalesce
|
||||
from django.urls import reverse
|
||||
from django.utils.decorators import method_decorator
|
||||
from django.views.decorators.cache import cache_control
|
||||
@@ -73,7 +81,6 @@ from api.models import (
|
||||
ScanSummary,
|
||||
SeverityChoices,
|
||||
StateChoices,
|
||||
StatusChoices,
|
||||
Task,
|
||||
User,
|
||||
UserRoleRelationship,
|
||||
@@ -87,6 +94,7 @@ from api.v1.serializers import (
|
||||
ComplianceOverviewFullSerializer,
|
||||
ComplianceOverviewSerializer,
|
||||
FindingDynamicFilterSerializer,
|
||||
FindingMetadataSerializer,
|
||||
FindingSerializer,
|
||||
InvitationAcceptSerializer,
|
||||
InvitationCreateSerializer,
|
||||
@@ -113,6 +121,7 @@ from api.v1.serializers import (
|
||||
RoleSerializer,
|
||||
RoleUpdateSerializer,
|
||||
ScanCreateSerializer,
|
||||
ScanReportSerializer,
|
||||
ScanSerializer,
|
||||
ScanUpdateSerializer,
|
||||
ScheduleDailyCreateSerializer,
|
||||
@@ -125,6 +134,7 @@ from api.v1.serializers import (
|
||||
UserSerializer,
|
||||
UserUpdateSerializer,
|
||||
)
|
||||
from prowler.config.config import tmp_output_directory
|
||||
|
||||
CACHE_DECORATOR = cache_control(
|
||||
max_age=django_settings.CACHE_MAX_AGE,
|
||||
@@ -192,7 +202,7 @@ class SchemaView(SpectacularAPIView):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
spectacular_settings.TITLE = "Prowler API"
|
||||
spectacular_settings.VERSION = "1.1.1"
|
||||
spectacular_settings.VERSION = "1.4.0"
|
||||
spectacular_settings.DESCRIPTION = (
|
||||
"Prowler API specification.\n\nThis file is auto-generated."
|
||||
)
|
||||
@@ -275,8 +285,8 @@ class SchemaView(SpectacularAPIView):
|
||||
),
|
||||
destroy=extend_schema(
|
||||
tags=["User"],
|
||||
summary="Delete a user account",
|
||||
description="Remove a user account from the system.",
|
||||
summary="Delete the user account",
|
||||
description="Remove the current user account from the system.",
|
||||
),
|
||||
me=extend_schema(
|
||||
tags=["User"],
|
||||
@@ -340,6 +350,12 @@ class UserViewSet(BaseUserViewset):
|
||||
status=status.HTTP_200_OK,
|
||||
)
|
||||
|
||||
def destroy(self, request, *args, **kwargs):
|
||||
if kwargs["pk"] != str(self.request.user.id):
|
||||
raise ValidationError("Only the current user can be deleted.")
|
||||
|
||||
return super().destroy(request, *args, **kwargs)
|
||||
|
||||
@extend_schema(
|
||||
parameters=[
|
||||
OpenApiParameter(
|
||||
@@ -1044,7 +1060,7 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
"""
|
||||
if self.request.method in SAFE_METHODS:
|
||||
# No permissions required for GET requests
|
||||
self.required_permissions = [Permissions.MANAGE_PROVIDERS]
|
||||
self.required_permissions = []
|
||||
else:
|
||||
# Require permission for non-GET requests
|
||||
self.required_permissions = [Permissions.MANAGE_SCANS]
|
||||
@@ -1066,6 +1082,8 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
return ScanCreateSerializer
|
||||
elif self.action == "partial_update":
|
||||
return ScanUpdateSerializer
|
||||
elif self.action == "report":
|
||||
return ScanReportSerializer
|
||||
return super().get_serializer_class()
|
||||
|
||||
def partial_update(self, request, *args, **kwargs):
|
||||
@@ -1120,6 +1138,101 @@ class ScanViewSet(BaseRLSViewSet):
|
||||
},
|
||||
)
|
||||
|
||||
@extend_schema(
|
||||
tags=["Scan"],
|
||||
summary="Download ZIP report",
|
||||
description="Returns a ZIP file containing the requested report",
|
||||
request=ScanReportSerializer,
|
||||
responses={
|
||||
200: OpenApiResponse(description="Report obtanined successfully"),
|
||||
404: OpenApiResponse(description="Report not found"),
|
||||
},
|
||||
)
|
||||
@action(detail=True, methods=["get"], url_name="report")
|
||||
def report(self, request, pk=None):
|
||||
s3_client = None
|
||||
try:
|
||||
s3_client = boto3.client("s3")
|
||||
s3_client.list_buckets()
|
||||
except (ClientError, NoCredentialsError, ParamValidationError):
|
||||
try:
|
||||
s3_client = boto3.client(
|
||||
"s3",
|
||||
aws_access_key_id=env.str("ARTIFACTS_AWS_ACCESS_KEY_ID"),
|
||||
aws_secret_access_key=env.str("ARTIFACTS_AWS_SECRET_ACCESS_KEY"),
|
||||
aws_session_token=env.str("ARTIFACTS_AWS_SESSION_TOKEN"),
|
||||
region_name=env.str("ARTIFACTS_AWS_DEFAULT_REGION"),
|
||||
)
|
||||
s3_client.list_buckets()
|
||||
except (ClientError, NoCredentialsError, ParamValidationError):
|
||||
s3_client = None
|
||||
|
||||
if s3_client:
|
||||
bucket_name = env.str("ARTIFACTS_AWS_S3_OUTPUT_BUCKET")
|
||||
s3_prefix = f"{request.tenant_id}/{pk}/"
|
||||
|
||||
try:
|
||||
response = s3_client.list_objects_v2(
|
||||
Bucket=bucket_name, Prefix=s3_prefix
|
||||
)
|
||||
if response["KeyCount"] == 0:
|
||||
return Response(
|
||||
{"detail": "No files found in S3 storage"},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
|
||||
zip_files = [
|
||||
obj["Key"]
|
||||
for obj in response.get("Contents", [])
|
||||
if obj["Key"].endswith(".zip")
|
||||
]
|
||||
if not zip_files:
|
||||
return Response(
|
||||
{"detail": "No ZIP files found in S3 storage"},
|
||||
status=status.HTTP_404_NOT_FOUND,
|
||||
)
|
||||
|
||||
s3_key = zip_files[0]
|
||||
s3_object = s3_client.get_object(Bucket=bucket_name, Key=s3_key)
|
||||
file_content = s3_object["Body"].read()
|
||||
filename = os.path.basename(s3_key)
|
||||
|
||||
except ClientError:
|
||||
return Response(
|
||||
{"detail": "Error accessing cloud storage"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
else:
|
||||
local_path = os.path.join(
|
||||
tmp_output_directory,
|
||||
str(request.tenant_id),
|
||||
str(pk),
|
||||
"*.zip",
|
||||
)
|
||||
zip_files = glob.glob(local_path)
|
||||
if not zip_files:
|
||||
return Response(
|
||||
{"detail": "No local files found"}, status=status.HTTP_404_NOT_FOUND
|
||||
)
|
||||
|
||||
try:
|
||||
file_path = zip_files[0]
|
||||
with open(file_path, "rb") as f:
|
||||
file_content = f.read()
|
||||
filename = os.path.basename(file_path)
|
||||
except IOError:
|
||||
return Response(
|
||||
{"detail": "Error reading local file"},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
response = HttpResponse(
|
||||
file_content, content_type="application/x-zip-compressed"
|
||||
)
|
||||
response["Content-Disposition"] = f'attachment; filename="{filename}"'
|
||||
return response
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
@@ -1274,7 +1387,13 @@ class ResourceViewSet(BaseRLSViewSet):
|
||||
tags=["Finding"],
|
||||
summary="Retrieve the services and regions that are impacted by findings",
|
||||
description="Fetch services and regions affected in findings.",
|
||||
responses={201: OpenApiResponse(response=MembershipSerializer)},
|
||||
filters=True,
|
||||
deprecated=True,
|
||||
),
|
||||
metadata=extend_schema(
|
||||
tags=["Finding"],
|
||||
summary="Retrieve metadata values from findings",
|
||||
description="Fetch unique metadata values from a set of findings. This is useful for dynamic filtering.",
|
||||
filters=True,
|
||||
),
|
||||
)
|
||||
@@ -1292,9 +1411,8 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
}
|
||||
http_method_names = ["get"]
|
||||
filterset_class = FindingFilter
|
||||
ordering = ["-id"]
|
||||
ordering = ["-inserted_at"]
|
||||
ordering_fields = [
|
||||
"id",
|
||||
"status",
|
||||
"severity",
|
||||
"check_id",
|
||||
@@ -1308,6 +1426,8 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
def get_serializer_class(self):
|
||||
if self.action == "findings_services_regions":
|
||||
return FindingDynamicFilterSerializer
|
||||
elif self.action == "metadata":
|
||||
return FindingMetadataSerializer
|
||||
|
||||
return super().get_serializer_class()
|
||||
|
||||
@@ -1376,6 +1496,62 @@ class FindingViewSet(BaseRLSViewSet):
|
||||
|
||||
return Response(data=serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="metadata")
|
||||
def metadata(self, request):
|
||||
tenant_id = self.request.tenant_id
|
||||
queryset = self.get_queryset()
|
||||
filtered_queryset = self.filter_queryset(queryset)
|
||||
|
||||
relevant_resources = Resource.objects.filter(
|
||||
tenant_id=tenant_id, findings__in=filtered_queryset
|
||||
).distinct()
|
||||
|
||||
services = (
|
||||
relevant_resources.values_list("service", flat=True)
|
||||
.distinct()
|
||||
.order_by("service")
|
||||
)
|
||||
|
||||
regions = (
|
||||
relevant_resources.exclude(region="")
|
||||
.values_list("region", flat=True)
|
||||
.distinct()
|
||||
.order_by("region")
|
||||
)
|
||||
|
||||
resource_types = (
|
||||
relevant_resources.values_list("type", flat=True)
|
||||
.distinct()
|
||||
.order_by("type")
|
||||
)
|
||||
|
||||
# Temporarily disabled until we implement tag filtering in the UI
|
||||
# tag_data = (
|
||||
# relevant_resources
|
||||
# .filter(tags__key__isnull=False, tags__value__isnull=False)
|
||||
# .exclude(tags__key="")
|
||||
# .exclude(tags__value="")
|
||||
# .values("tags__key", "tags__value")
|
||||
# .distinct()
|
||||
# .order_by("tags__key", "tags__value")
|
||||
# )
|
||||
#
|
||||
# tags_dict = {}
|
||||
# for row in tag_data:
|
||||
# k, v = row["tags__key"], row["tags__value"]
|
||||
# tags_dict.setdefault(k, []).append(v)
|
||||
|
||||
result = {
|
||||
"services": list(services),
|
||||
"regions": list(regions),
|
||||
"resource_types": list(resource_types),
|
||||
# "tags": tags_dict
|
||||
}
|
||||
|
||||
serializer = self.get_serializer(data=result)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
|
||||
@extend_schema_view(
|
||||
list=extend_schema(
|
||||
@@ -1938,68 +2114,53 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
@action(detail=False, methods=["get"], url_name="providers")
|
||||
def providers(self, request):
|
||||
tenant_id = self.request.tenant_id
|
||||
# Subquery to get the most recent finding for each uid
|
||||
latest_finding_ids = (
|
||||
Finding.objects.filter(
|
||||
|
||||
latest_scan_ids = (
|
||||
Scan.objects.filter(
|
||||
tenant_id=tenant_id,
|
||||
uid=OuterRef("uid"),
|
||||
scan__provider=OuterRef("scan__provider"),
|
||||
state=StateChoices.COMPLETED,
|
||||
)
|
||||
.order_by("-id") # Most recent by id
|
||||
.values("id")[:1]
|
||||
.order_by("provider_id", "-inserted_at")
|
||||
.distinct("provider_id")
|
||||
.values_list("id", flat=True)
|
||||
)
|
||||
|
||||
# Filter findings to only include the most recent for each uid
|
||||
recent_findings = Finding.objects.filter(
|
||||
tenant_id=tenant_id, id__in=Subquery(latest_finding_ids)
|
||||
)
|
||||
|
||||
# Aggregate findings by provider
|
||||
findings_aggregated = (
|
||||
recent_findings.values("scan__provider__provider")
|
||||
ScanSummary.objects.filter(tenant_id=tenant_id, scan_id__in=latest_scan_ids)
|
||||
.values("scan__provider__provider")
|
||||
.annotate(
|
||||
findings_passed=Count("id", filter=Q(status=StatusChoices.PASS.value)),
|
||||
findings_failed=Count("id", filter=Q(status=StatusChoices.FAIL.value)),
|
||||
findings_manual=Count(
|
||||
"id", filter=Q(status=StatusChoices.MANUAL.value)
|
||||
),
|
||||
total_findings=Count("id"),
|
||||
findings_passed=Coalesce(Sum("_pass"), 0),
|
||||
findings_failed=Coalesce(Sum("fail"), 0),
|
||||
findings_muted=Coalesce(Sum("muted"), 0),
|
||||
total_findings=Coalesce(Sum("total"), 0),
|
||||
)
|
||||
.order_by("-findings_failed")
|
||||
)
|
||||
|
||||
# Aggregate total resources by provider
|
||||
resources_aggregated = (
|
||||
Resource.objects.filter(tenant_id=tenant_id)
|
||||
.values("provider__provider")
|
||||
.annotate(total_resources=Count("id"))
|
||||
)
|
||||
resources_dict = {
|
||||
row["provider__provider"]: row["total_resources"]
|
||||
for row in resources_aggregated
|
||||
}
|
||||
|
||||
# Combine findings and resources data
|
||||
overview = []
|
||||
for findings in findings_aggregated:
|
||||
provider = findings["scan__provider__provider"]
|
||||
total_resources = next(
|
||||
(
|
||||
res["total_resources"]
|
||||
for res in resources_aggregated
|
||||
if res["provider__provider"] == provider
|
||||
),
|
||||
0,
|
||||
)
|
||||
for row in findings_aggregated:
|
||||
provider_type = row["scan__provider__provider"]
|
||||
overview.append(
|
||||
{
|
||||
"provider": provider,
|
||||
"total_resources": total_resources,
|
||||
"total_findings": findings["total_findings"],
|
||||
"findings_passed": findings["findings_passed"],
|
||||
"findings_failed": findings["findings_failed"],
|
||||
"findings_manual": findings["findings_manual"],
|
||||
"provider": provider_type,
|
||||
"total_resources": resources_dict.get(provider_type, 0),
|
||||
"total_findings": row["total_findings"],
|
||||
"findings_passed": row["findings_passed"],
|
||||
"findings_failed": row["findings_failed"],
|
||||
"findings_muted": row["findings_muted"],
|
||||
}
|
||||
)
|
||||
|
||||
serializer = OverviewProviderSerializer(overview, many=True)
|
||||
|
||||
return Response(serializer.data, status=status.HTTP_200_OK)
|
||||
|
||||
@action(detail=False, methods=["get"], url_name="findings")
|
||||
@@ -2014,7 +2175,7 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-id")
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
)
|
||||
|
||||
@@ -2059,7 +2220,7 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-id")
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
)
|
||||
|
||||
@@ -2095,7 +2256,7 @@ class OverviewViewSet(BaseRLSViewSet):
|
||||
state=StateChoices.COMPLETED,
|
||||
provider_id=OuterRef("scan__provider_id"),
|
||||
)
|
||||
.order_by("-id")
|
||||
.order_by("-inserted_at")
|
||||
.values("id")[:1]
|
||||
)
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=True)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["*"])
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=False)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from config.django.base import * # noqa
|
||||
from config.env import env
|
||||
|
||||
|
||||
DEBUG = env.bool("DJANGO_DEBUG", default=False)
|
||||
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["localhost", "127.0.0.1"])
|
||||
|
||||
|
||||
@@ -626,6 +626,7 @@ def findings_fixture(scans_fixture, resources_fixture):
|
||||
"CheckId": "test_check_id",
|
||||
"Description": "test description apple sauce",
|
||||
},
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
|
||||
finding1.add_resources([resource1])
|
||||
@@ -651,6 +652,7 @@ def findings_fixture(scans_fixture, resources_fixture):
|
||||
"CheckId": "test_check_id",
|
||||
"Description": "test description orange juice",
|
||||
},
|
||||
first_seen_at="2024-01-02T00:00:00Z",
|
||||
)
|
||||
|
||||
finding2.add_resources([resource2])
|
||||
|
||||
@@ -5,10 +5,14 @@ from django_celery_beat.models import IntervalSchedule, PeriodicTask
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
from tasks.tasks import perform_scheduled_scan_task
|
||||
|
||||
from api.models import Provider
|
||||
from api.db_utils import rls_transaction
|
||||
from api.models import Provider, Scan, StateChoices
|
||||
|
||||
|
||||
def schedule_provider_scan(provider_instance: Provider):
|
||||
tenant_id = str(provider_instance.tenant_id)
|
||||
provider_id = str(provider_instance.id)
|
||||
|
||||
schedule, _ = IntervalSchedule.objects.get_or_create(
|
||||
every=24,
|
||||
period=IntervalSchedule.HOURS,
|
||||
@@ -17,23 +21,9 @@ def schedule_provider_scan(provider_instance: Provider):
|
||||
# Create a unique name for the periodic task
|
||||
task_name = f"scan-perform-scheduled-{provider_instance.id}"
|
||||
|
||||
# Schedule the task
|
||||
_, created = PeriodicTask.objects.get_or_create(
|
||||
interval=schedule,
|
||||
name=task_name,
|
||||
task="scan-perform-scheduled",
|
||||
kwargs=json.dumps(
|
||||
{
|
||||
"tenant_id": str(provider_instance.tenant_id),
|
||||
"provider_id": str(provider_instance.id),
|
||||
}
|
||||
),
|
||||
one_off=False,
|
||||
defaults={
|
||||
"start_time": datetime.now(timezone.utc) + timedelta(hours=24),
|
||||
},
|
||||
)
|
||||
if not created:
|
||||
if PeriodicTask.objects.filter(
|
||||
interval=schedule, name=task_name, task="scan-perform-scheduled"
|
||||
).exists():
|
||||
raise ValidationError(
|
||||
[
|
||||
{
|
||||
@@ -45,9 +35,36 @@ def schedule_provider_scan(provider_instance: Provider):
|
||||
]
|
||||
)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
scheduled_scan = Scan.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
state=StateChoices.AVAILABLE,
|
||||
scheduled_at=datetime.now(timezone.utc),
|
||||
)
|
||||
|
||||
# Schedule the task
|
||||
periodic_task_instance = PeriodicTask.objects.create(
|
||||
interval=schedule,
|
||||
name=task_name,
|
||||
task="scan-perform-scheduled",
|
||||
kwargs=json.dumps(
|
||||
{
|
||||
"tenant_id": tenant_id,
|
||||
"provider_id": provider_id,
|
||||
}
|
||||
),
|
||||
one_off=False,
|
||||
start_time=datetime.now(timezone.utc) + timedelta(hours=24),
|
||||
)
|
||||
scheduled_scan.scheduler_task_id = periodic_task_instance.id
|
||||
scheduled_scan.save()
|
||||
|
||||
return perform_scheduled_scan_task.apply_async(
|
||||
kwargs={
|
||||
"tenant_id": str(provider_instance.tenant_id),
|
||||
"provider_id": str(provider_instance.id),
|
||||
"provider_id": provider_id,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
import os
|
||||
import time
|
||||
import zipfile
|
||||
from copy import deepcopy
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
from celery.utils.log import get_task_logger
|
||||
from config.env import env
|
||||
from config.settings.celery import CELERY_DEADLOCK_ATTEMPTS
|
||||
from django.db import IntegrityError, OperationalError
|
||||
from django.db.models import Case, Count, IntegerField, Sum, When
|
||||
@@ -25,15 +30,117 @@ from api.models import (
|
||||
from api.models import StatusChoices as FindingStatus
|
||||
from api.utils import initialize_prowler_provider
|
||||
from api.v1.serializers import ScanTaskSerializer
|
||||
from prowler.config.config import (
|
||||
csv_file_suffix,
|
||||
html_file_suffix,
|
||||
json_asff_file_suffix,
|
||||
json_ocsf_file_suffix,
|
||||
output_file_timestamp,
|
||||
tmp_output_directory,
|
||||
)
|
||||
from prowler.lib.outputs.asff.asff import ASFF
|
||||
from prowler.lib.outputs.csv.csv import CSV
|
||||
from prowler.lib.outputs.finding import Finding as ProwlerFinding
|
||||
from prowler.lib.outputs.html.html import HTML
|
||||
from prowler.lib.outputs.ocsf.ocsf import OCSF
|
||||
from prowler.lib.scan.scan import Scan as ProwlerScan
|
||||
|
||||
logger = get_task_logger(__name__)
|
||||
|
||||
# Predefined mapping for output formats and their configurations
|
||||
OUTPUT_FORMATS_MAPPING = {
|
||||
"csv": {
|
||||
"class": CSV,
|
||||
"suffix": csv_file_suffix,
|
||||
"kwargs": {},
|
||||
},
|
||||
"json-asff": {"class": ASFF, "suffix": json_asff_file_suffix, "kwargs": {}},
|
||||
"json-ocsf": {"class": OCSF, "suffix": json_ocsf_file_suffix, "kwargs": {}},
|
||||
"html": {"class": HTML, "suffix": html_file_suffix, "kwargs": {"stats": {}}},
|
||||
}
|
||||
|
||||
# Mapping provider types to their identity components for output paths
|
||||
PROVIDER_IDENTITY_MAP = {
|
||||
"aws": lambda p: p.identity.account,
|
||||
"azure": lambda p: p.identity.tenant_domain,
|
||||
"gcp": lambda p: p.identity.profile,
|
||||
"kubernetes": lambda p: p.identity.context.replace(":", "_").replace("/", "_"),
|
||||
}
|
||||
|
||||
|
||||
def _compress_output_files(output_directory: str) -> str:
|
||||
"""
|
||||
Compress output files from all configured output formats into a ZIP archive.
|
||||
|
||||
Args:
|
||||
output_directory (str): The directory where the output files are located.
|
||||
The function looks up all known suffixes in OUTPUT_FORMATS_MAPPING
|
||||
and compresses those files into a single ZIP.
|
||||
|
||||
Returns:
|
||||
str: The full path to the newly created ZIP archive.
|
||||
"""
|
||||
zip_path = f"{output_directory}.zip"
|
||||
|
||||
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
|
||||
for suffix in [config["suffix"] for config in OUTPUT_FORMATS_MAPPING.values()]:
|
||||
zipf.write(
|
||||
f"{output_directory}{suffix}",
|
||||
f"artifacts/{output_directory.split('/')[-1]}{suffix}",
|
||||
)
|
||||
|
||||
return zip_path
|
||||
|
||||
|
||||
def _upload_to_s3(tenant_id: str, zip_path: str, scan_id: str) -> str:
|
||||
"""
|
||||
Upload the specified ZIP file to an S3 bucket.
|
||||
|
||||
If the S3 bucket environment variables are not configured,
|
||||
the function returns None without performing an upload.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The tenant identifier, used as part of the S3 key prefix.
|
||||
zip_path (str): The local file system path to the ZIP file to be uploaded.
|
||||
scan_id (str): The scan identifier, used as part of the S3 key prefix.
|
||||
|
||||
Returns:
|
||||
str: The S3 URI of the uploaded file (e.g., "s3://<bucket>/<key>") if successful.
|
||||
None: If the required environment variables for the S3 bucket are not set.
|
||||
|
||||
Raises:
|
||||
botocore.exceptions.ClientError: If the upload attempt to S3 fails for any reason.
|
||||
"""
|
||||
if not env.str("ARTIFACTS_AWS_S3_OUTPUT_BUCKET", ""):
|
||||
return
|
||||
|
||||
if env.str("ARTIFACTS_AWS_ACCESS_KEY_ID", ""):
|
||||
s3 = boto3.client(
|
||||
"s3",
|
||||
aws_access_key_id=env.str("ARTIFACTS_AWS_ACCESS_KEY_ID"),
|
||||
aws_secret_access_key=env.str("ARTIFACTS_AWS_SECRET_ACCESS_KEY"),
|
||||
aws_session_token=env.str("ARTIFACTS_AWS_SESSION_TOKEN"),
|
||||
region_name=env.str("ARTIFACTS_AWS_DEFAULT_REGION"),
|
||||
)
|
||||
else:
|
||||
s3 = boto3.client("s3")
|
||||
|
||||
s3_key = f"{tenant_id}/{scan_id}/{os.path.basename(zip_path)}"
|
||||
try:
|
||||
s3.upload_file(
|
||||
Filename=zip_path,
|
||||
Bucket=env.str("ARTIFACTS_AWS_S3_OUTPUT_BUCKET"),
|
||||
Key=s3_key,
|
||||
)
|
||||
return f"s3://{env.str("ARTIFACTS_AWS_S3_OUTPUT_BUCKET")}/{s3_key}"
|
||||
except ClientError as e:
|
||||
logger.error(f"S3 upload failed: {str(e)}")
|
||||
raise e
|
||||
|
||||
|
||||
def _create_finding_delta(
|
||||
last_status: FindingStatus | None | str, new_status: FindingStatus | None
|
||||
) -> Finding.DeltaChoices:
|
||||
) -> Finding.DeltaChoices | None:
|
||||
"""
|
||||
Determine the delta status of a finding based on its previous and current status.
|
||||
|
||||
@@ -53,7 +160,11 @@ def _create_finding_delta(
|
||||
|
||||
|
||||
def _store_resources(
|
||||
finding: ProwlerFinding, tenant_id: str, provider_instance: Provider
|
||||
finding: ProwlerFinding,
|
||||
tenant_id: str,
|
||||
provider_instance: Provider,
|
||||
resource_cache: dict,
|
||||
tag_cache: dict,
|
||||
) -> tuple[Resource, tuple[str, str]]:
|
||||
"""
|
||||
Store resource information from a finding, including tags, in the database.
|
||||
@@ -65,40 +176,91 @@ def _store_resources(
|
||||
|
||||
Returns:
|
||||
tuple:
|
||||
- Resource: The resource instance created or retrieved from the database.
|
||||
- Resource: The resource instance created or updated from the database.
|
||||
- tuple[str, str]: A tuple containing the resource UID and region.
|
||||
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
resource_instance, created = Resource.objects.get_or_create(
|
||||
tenant_id=tenant_id,
|
||||
provider=provider_instance,
|
||||
uid=finding.resource_uid,
|
||||
defaults={
|
||||
"region": finding.region,
|
||||
"service": finding.service_name,
|
||||
"type": finding.resource_type,
|
||||
},
|
||||
)
|
||||
resource_uid = finding.resource_uid
|
||||
|
||||
# Check cache or create/update resource
|
||||
if resource_uid in resource_cache:
|
||||
resource_instance = resource_cache[resource_uid]
|
||||
update_fields = []
|
||||
for field, value in [
|
||||
("region", finding.region),
|
||||
("service", finding.service_name),
|
||||
("type", finding.resource_type),
|
||||
("name", finding.resource_name),
|
||||
]:
|
||||
if getattr(resource_instance, field) != value:
|
||||
setattr(resource_instance, field, value)
|
||||
update_fields.append(field)
|
||||
if update_fields:
|
||||
with rls_transaction(tenant_id):
|
||||
resource_instance.save(update_fields=update_fields)
|
||||
else:
|
||||
with rls_transaction(tenant_id):
|
||||
resource_instance, _ = Resource.objects.update_or_create(
|
||||
tenant_id=tenant_id,
|
||||
provider=provider_instance,
|
||||
uid=resource_uid,
|
||||
defaults={
|
||||
"region": finding.region,
|
||||
"service": finding.service_name,
|
||||
"type": finding.resource_type,
|
||||
"name": finding.resource_name,
|
||||
},
|
||||
)
|
||||
resource_cache[resource_uid] = resource_instance
|
||||
|
||||
# Process tags with caching
|
||||
tags = []
|
||||
for key, value in finding.resource_tags.items():
|
||||
tag_key = (key, value)
|
||||
if tag_key not in tag_cache:
|
||||
with rls_transaction(tenant_id):
|
||||
tag_instance, _ = ResourceTag.objects.get_or_create(
|
||||
tenant_id=tenant_id, key=key, value=value
|
||||
)
|
||||
tag_cache[tag_key] = tag_instance
|
||||
tags.append(tag_cache[tag_key])
|
||||
|
||||
if not created:
|
||||
resource_instance.region = finding.region
|
||||
resource_instance.service = finding.service_name
|
||||
resource_instance.type = finding.resource_type
|
||||
resource_instance.save()
|
||||
with rls_transaction(tenant_id):
|
||||
tags = [
|
||||
ResourceTag.objects.get_or_create(
|
||||
tenant_id=tenant_id, key=key, value=value
|
||||
)[0]
|
||||
for key, value in finding.resource_tags.items()
|
||||
]
|
||||
resource_instance.upsert_or_delete_tags(tags=tags)
|
||||
|
||||
return resource_instance, (resource_instance.uid, resource_instance.region)
|
||||
|
||||
|
||||
def _generate_output_directory(
|
||||
prowler_provider: object, tenant_id: str, scan_id: str
|
||||
) -> str:
|
||||
"""
|
||||
Generate a dynamic output directory path based on the given provider type.
|
||||
|
||||
Args:
|
||||
prowler_provider (object): An object that has a `type` attribute indicating
|
||||
the provider type (e.g., "aws", "azure", etc.).
|
||||
tenant_id (str): A unique identifier for the tenant. Used to build the output path.
|
||||
scan_id (str): A unique identifier for the scan. Included in the output path.
|
||||
|
||||
Returns:
|
||||
str: The complete path to the output directory, including the tenant ID, scan ID,
|
||||
provider identity, and a timestamp.
|
||||
|
||||
"""
|
||||
provider_type = prowler_provider.type
|
||||
get_identity = PROVIDER_IDENTITY_MAP.get(provider_type, lambda _: "unknown")
|
||||
return (
|
||||
f"{tmp_output_directory}/{tenant_id}/{scan_id}/prowler-output-"
|
||||
f"{get_identity(prowler_provider)}-{output_file_timestamp}"
|
||||
)
|
||||
|
||||
|
||||
def perform_prowler_scan(
|
||||
tenant_id: str, scan_id: str, provider_id: str, checks_to_execute: list[str] = None
|
||||
tenant_id: str,
|
||||
scan_id: str,
|
||||
provider_id: str,
|
||||
checks_to_execute: list[str] = None,
|
||||
):
|
||||
"""
|
||||
Perform a scan using Prowler and store the findings and resources in the database.
|
||||
@@ -120,6 +282,9 @@ def perform_prowler_scan(
|
||||
exception = None
|
||||
unique_resources = set()
|
||||
start_time = time.time()
|
||||
resource_cache = {}
|
||||
tag_cache = {}
|
||||
last_status_cache = {}
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
provider_instance = Provider.objects.get(pk=provider_id)
|
||||
@@ -129,6 +294,7 @@ def perform_prowler_scan(
|
||||
scan_instance.save()
|
||||
|
||||
try:
|
||||
# Provider initialization
|
||||
with rls_transaction(tenant_id):
|
||||
try:
|
||||
prowler_provider = initialize_prowler_provider(provider_instance)
|
||||
@@ -144,101 +310,66 @@ def perform_prowler_scan(
|
||||
)
|
||||
provider_instance.save()
|
||||
|
||||
prowler_scan = ProwlerScan(provider=prowler_provider, checks=checks_to_execute)
|
||||
# Scan configuration
|
||||
prowler_scan = ProwlerScan(
|
||||
provider=prowler_provider, checks=checks_to_execute or []
|
||||
)
|
||||
output_directory = _generate_output_directory(
|
||||
prowler_provider, tenant_id, scan_id
|
||||
)
|
||||
# Create the output directory
|
||||
os.makedirs("/".join(output_directory.split("/")[:-1]), exist_ok=True)
|
||||
|
||||
resource_cache = {}
|
||||
tag_cache = {}
|
||||
last_status_cache = {}
|
||||
|
||||
for progress, findings in prowler_scan.scan():
|
||||
all_findings = []
|
||||
for progress, findings, stats in prowler_scan.scan():
|
||||
for finding in findings:
|
||||
if finding is None:
|
||||
logger.error(f"None finding detected on scan {scan_id}.")
|
||||
continue
|
||||
for attempt in range(CELERY_DEADLOCK_ATTEMPTS):
|
||||
try:
|
||||
with rls_transaction(tenant_id):
|
||||
# Process resource
|
||||
resource_uid = finding.resource_uid
|
||||
if resource_uid not in resource_cache:
|
||||
# Get or create the resource
|
||||
resource_instance, _ = Resource.objects.get_or_create(
|
||||
tenant_id=tenant_id,
|
||||
provider=provider_instance,
|
||||
uid=resource_uid,
|
||||
defaults={
|
||||
"region": finding.region,
|
||||
"service": finding.service_name,
|
||||
"type": finding.resource_type,
|
||||
"name": finding.resource_name,
|
||||
},
|
||||
)
|
||||
resource_cache[resource_uid] = resource_instance
|
||||
else:
|
||||
resource_instance = resource_cache[resource_uid]
|
||||
|
||||
# Update resource fields if necessary
|
||||
updated_fields = []
|
||||
if resource_instance.region != finding.region:
|
||||
resource_instance.region = finding.region
|
||||
updated_fields.append("region")
|
||||
if resource_instance.service != finding.service_name:
|
||||
resource_instance.service = finding.service_name
|
||||
updated_fields.append("service")
|
||||
if resource_instance.type != finding.resource_type:
|
||||
resource_instance.type = finding.resource_type
|
||||
updated_fields.append("type")
|
||||
if updated_fields:
|
||||
with rls_transaction(tenant_id):
|
||||
resource_instance.save(update_fields=updated_fields)
|
||||
resource_instance, resource_uid_region = _store_resources(
|
||||
finding,
|
||||
tenant_id,
|
||||
provider_instance,
|
||||
resource_cache,
|
||||
tag_cache,
|
||||
)
|
||||
unique_resources.add(resource_uid_region)
|
||||
break
|
||||
except (OperationalError, IntegrityError) as db_err:
|
||||
if attempt < CELERY_DEADLOCK_ATTEMPTS - 1:
|
||||
logger.warning(
|
||||
f"{'Deadlock error' if isinstance(db_err, OperationalError) else 'Integrity error'} "
|
||||
f"detected when processing resource {resource_uid} on scan {scan_id}. Retrying..."
|
||||
f"Database error ({type(db_err).__name__}) "
|
||||
f"processing resource {finding.resource_uid}, retrying..."
|
||||
)
|
||||
time.sleep(0.1 * (2**attempt))
|
||||
continue
|
||||
else:
|
||||
raise db_err
|
||||
|
||||
# Update tags
|
||||
tags = []
|
||||
with rls_transaction(tenant_id):
|
||||
for key, value in finding.resource_tags.items():
|
||||
tag_key = (key, value)
|
||||
if tag_key not in tag_cache:
|
||||
tag_instance, _ = ResourceTag.objects.get_or_create(
|
||||
tenant_id=tenant_id, key=key, value=value
|
||||
)
|
||||
tag_cache[tag_key] = tag_instance
|
||||
else:
|
||||
tag_instance = tag_cache[tag_key]
|
||||
tags.append(tag_instance)
|
||||
resource_instance.upsert_or_delete_tags(tags=tags)
|
||||
|
||||
unique_resources.add((resource_instance.uid, resource_instance.region))
|
||||
|
||||
# Process finding
|
||||
# Finding processing
|
||||
with rls_transaction(tenant_id):
|
||||
finding_uid = finding.uid
|
||||
if finding_uid not in last_status_cache:
|
||||
most_recent_finding = (
|
||||
most_recent = (
|
||||
Finding.objects.filter(uid=finding_uid)
|
||||
.order_by("-id")
|
||||
.values("status")
|
||||
.order_by("-inserted_at")
|
||||
.values("status", "first_seen_at")
|
||||
.first()
|
||||
)
|
||||
last_status = (
|
||||
most_recent_finding["status"]
|
||||
if most_recent_finding
|
||||
else None
|
||||
last_status, first_seen = (
|
||||
(most_recent["status"], most_recent["first_seen_at"])
|
||||
if most_recent
|
||||
else (None, None)
|
||||
)
|
||||
last_status_cache[finding_uid] = last_status
|
||||
last_status_cache[finding_uid] = (last_status, first_seen)
|
||||
else:
|
||||
last_status = last_status_cache[finding_uid]
|
||||
last_status, first_seen = last_status_cache[finding_uid]
|
||||
|
||||
status = FindingStatus[finding.status]
|
||||
delta = _create_finding_delta(last_status, status)
|
||||
first_seen = first_seen or datetime.now(tz=timezone.utc)
|
||||
|
||||
# Create the finding
|
||||
finding_instance = Finding.objects.create(
|
||||
tenant_id=tenant_id,
|
||||
uid=finding_uid,
|
||||
@@ -251,91 +382,96 @@ def perform_prowler_scan(
|
||||
raw_result=finding.raw,
|
||||
check_id=finding.check_id,
|
||||
scan=scan_instance,
|
||||
first_seen_at=first_seen,
|
||||
)
|
||||
finding_instance.add_resources([resource_instance])
|
||||
|
||||
# Update compliance data if applicable
|
||||
if finding.status.value == "MUTED":
|
||||
continue
|
||||
# Update compliance status
|
||||
if finding.status.value != "MUTED":
|
||||
region_data = check_status_by_region.setdefault(finding.region, {})
|
||||
if region_data.get(finding.check_id) != "FAIL":
|
||||
region_data[finding.check_id] = finding.status.value
|
||||
|
||||
region_dict = check_status_by_region.setdefault(finding.region, {})
|
||||
current_status = region_dict.get(finding.check_id)
|
||||
if current_status == "FAIL":
|
||||
continue
|
||||
region_dict[finding.check_id] = finding.status.value
|
||||
|
||||
# Update scan progress
|
||||
# Progress updates and output generation
|
||||
with rls_transaction(tenant_id):
|
||||
scan_instance.progress = progress
|
||||
scan_instance.save()
|
||||
|
||||
all_findings.extend(findings)
|
||||
|
||||
# Generate output files
|
||||
for mode, config in OUTPUT_FORMATS_MAPPING.items():
|
||||
kwargs = dict(config["kwargs"])
|
||||
if mode == "html":
|
||||
kwargs["provider"] = prowler_provider
|
||||
kwargs["stats"] = stats
|
||||
config["class"](
|
||||
findings=all_findings,
|
||||
create_file_descriptor=True,
|
||||
file_path=output_directory,
|
||||
file_extension=config["suffix"],
|
||||
).batch_write_data_to_file(**kwargs)
|
||||
|
||||
scan_instance.state = StateChoices.COMPLETED
|
||||
|
||||
# Compress output files
|
||||
zip_path = _compress_output_files(output_directory)
|
||||
|
||||
# Save to configured storage
|
||||
_upload_to_s3(tenant_id, zip_path, scan_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error performing scan {scan_id}: {e}")
|
||||
logger.error(f"Scan {scan_id} failed: {str(e)}")
|
||||
exception = e
|
||||
scan_instance.state = StateChoices.FAILED
|
||||
|
||||
finally:
|
||||
# Final scan updates
|
||||
with rls_transaction(tenant_id):
|
||||
scan_instance.duration = time.time() - start_time
|
||||
scan_instance.completed_at = datetime.now(tz=timezone.utc)
|
||||
scan_instance.unique_resource_count = len(unique_resources)
|
||||
scan_instance.save()
|
||||
|
||||
if exception is None:
|
||||
try:
|
||||
regions = prowler_provider.get_regions()
|
||||
except AttributeError:
|
||||
regions = set()
|
||||
|
||||
# Compliance processing
|
||||
if not exception:
|
||||
compliance_template = PROWLER_COMPLIANCE_OVERVIEW_TEMPLATE[
|
||||
provider_instance.provider
|
||||
]
|
||||
compliance_overview_by_region = {
|
||||
region: deepcopy(compliance_template) for region in regions
|
||||
compliance_overview = {
|
||||
region: deepcopy(compliance_template)
|
||||
for region in getattr(prowler_provider, "get_regions", lambda: set())()
|
||||
}
|
||||
|
||||
for region, check_status in check_status_by_region.items():
|
||||
compliance_data = compliance_overview_by_region.setdefault(
|
||||
region, deepcopy(compliance_template)
|
||||
)
|
||||
for check_name, status in check_status.items():
|
||||
for region, checks in check_status_by_region.items():
|
||||
for check_id, status in checks.items():
|
||||
generate_scan_compliance(
|
||||
compliance_data,
|
||||
compliance_overview.setdefault(
|
||||
region, deepcopy(compliance_template)
|
||||
),
|
||||
provider_instance.provider,
|
||||
check_name,
|
||||
check_id,
|
||||
status,
|
||||
)
|
||||
|
||||
# Prepare compliance overview objects
|
||||
compliance_overview_objects = []
|
||||
for region, compliance_data in compliance_overview_by_region.items():
|
||||
for compliance_id, compliance in compliance_data.items():
|
||||
compliance_overview_objects.append(
|
||||
ComplianceOverview(
|
||||
tenant_id=tenant_id,
|
||||
scan=scan_instance,
|
||||
region=region,
|
||||
compliance_id=compliance_id,
|
||||
framework=compliance["framework"],
|
||||
version=compliance["version"],
|
||||
description=compliance["description"],
|
||||
requirements=compliance["requirements"],
|
||||
requirements_passed=compliance["requirements_status"]["passed"],
|
||||
requirements_failed=compliance["requirements_status"]["failed"],
|
||||
requirements_manual=compliance["requirements_status"]["manual"],
|
||||
total_requirements=compliance["total_requirements"],
|
||||
)
|
||||
ComplianceOverview.objects.bulk_create(
|
||||
[
|
||||
ComplianceOverview(
|
||||
tenant_id=tenant_id,
|
||||
scan=scan_instance,
|
||||
region=region,
|
||||
compliance_id=compliance_id,
|
||||
**compliance_data,
|
||||
)
|
||||
with rls_transaction(tenant_id):
|
||||
ComplianceOverview.objects.bulk_create(compliance_overview_objects)
|
||||
for region, data in compliance_overview.items()
|
||||
for compliance_id, compliance_data in data.items()
|
||||
]
|
||||
)
|
||||
|
||||
if exception is not None:
|
||||
if exception:
|
||||
raise exception
|
||||
|
||||
serializer = ScanTaskSerializer(instance=scan_instance)
|
||||
return serializer.data
|
||||
return ScanTaskSerializer(instance=scan_instance).data
|
||||
|
||||
|
||||
def aggregate_findings(tenant_id: str, scan_id: str):
|
||||
@@ -367,7 +503,7 @@ def aggregate_findings(tenant_id: str, scan_id: str):
|
||||
- muted_changed: Muted findings with a delta of 'changed'.
|
||||
"""
|
||||
with rls_transaction(tenant_id):
|
||||
findings = Finding.objects.filter(scan_id=scan_id)
|
||||
findings = Finding.objects.filter(tenant_id=tenant_id, scan_id=scan_id)
|
||||
|
||||
aggregation = findings.values(
|
||||
"check_id",
|
||||
@@ -462,29 +598,28 @@ def aggregate_findings(tenant_id: str, scan_id: str):
|
||||
),
|
||||
)
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
scan_aggregations = {
|
||||
ScanSummary(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
check_id=agg["check_id"],
|
||||
service=agg["resources__service"],
|
||||
severity=agg["severity"],
|
||||
region=agg["resources__region"],
|
||||
fail=agg["fail"],
|
||||
_pass=agg["_pass"],
|
||||
muted=agg["muted"],
|
||||
total=agg["total"],
|
||||
new=agg["new"],
|
||||
changed=agg["changed"],
|
||||
unchanged=agg["unchanged"],
|
||||
fail_new=agg["fail_new"],
|
||||
fail_changed=agg["fail_changed"],
|
||||
pass_new=agg["pass_new"],
|
||||
pass_changed=agg["pass_changed"],
|
||||
muted_new=agg["muted_new"],
|
||||
muted_changed=agg["muted_changed"],
|
||||
)
|
||||
for agg in aggregation
|
||||
}
|
||||
ScanSummary.objects.bulk_create(scan_aggregations, batch_size=3000)
|
||||
ScanSummary.objects.bulk_create(
|
||||
[
|
||||
ScanSummary(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=scan_id,
|
||||
check_id=agg["check_id"],
|
||||
service=agg["resources__service"],
|
||||
severity=agg["severity"],
|
||||
region=agg["resources__region"],
|
||||
**{
|
||||
k: v or 0
|
||||
for k, v in agg.items()
|
||||
if k
|
||||
not in {
|
||||
"check_id",
|
||||
"resources__service",
|
||||
"severity",
|
||||
"resources__region",
|
||||
}
|
||||
},
|
||||
)
|
||||
for agg in aggregation
|
||||
],
|
||||
batch_size=3000,
|
||||
)
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from celery import shared_task
|
||||
from config.celery import RLSTask
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from tasks.jobs.connection import check_provider_connection
|
||||
from tasks.jobs.deletion import delete_provider, delete_tenant
|
||||
from tasks.jobs.scan import aggregate_findings, perform_prowler_scan
|
||||
from tasks.utils import get_next_execution_datetime
|
||||
|
||||
from api.db_utils import rls_transaction
|
||||
from api.decorators import set_tenant
|
||||
from api.models import Provider, Scan
|
||||
from api.models import Scan, StateChoices
|
||||
|
||||
|
||||
@shared_task(base=RLSTask, name="provider-connection-check")
|
||||
@@ -100,28 +99,42 @@ def perform_scheduled_scan_task(self, tenant_id: str, provider_id: str):
|
||||
task_id = self.request.id
|
||||
|
||||
with rls_transaction(tenant_id):
|
||||
provider_instance = Provider.objects.get(pk=provider_id)
|
||||
periodic_task_instance = PeriodicTask.objects.get(
|
||||
name=f"scan-perform-scheduled-{provider_id}"
|
||||
)
|
||||
next_scan_date = datetime.combine(
|
||||
datetime.now(timezone.utc), periodic_task_instance.start_time.time()
|
||||
) + timedelta(hours=24)
|
||||
|
||||
scan_instance = Scan.objects.create(
|
||||
next_scan_datetime = get_next_execution_datetime(task_id, provider_id)
|
||||
scan_instance, _ = Scan.objects.get_or_create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider=provider_instance,
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
next_scan_at=next_scan_date,
|
||||
task_id=task_id,
|
||||
state__in=(StateChoices.SCHEDULED, StateChoices.AVAILABLE),
|
||||
scheduler_task_id=periodic_task_instance.id,
|
||||
defaults={"state": StateChoices.SCHEDULED},
|
||||
)
|
||||
|
||||
result = perform_prowler_scan(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=str(scan_instance.id),
|
||||
provider_id=provider_id,
|
||||
)
|
||||
scan_instance.task_id = task_id
|
||||
scan_instance.save()
|
||||
|
||||
try:
|
||||
result = perform_prowler_scan(
|
||||
tenant_id=tenant_id,
|
||||
scan_id=str(scan_instance.id),
|
||||
provider_id=provider_id,
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
finally:
|
||||
with rls_transaction(tenant_id):
|
||||
Scan.objects.get_or_create(
|
||||
tenant_id=tenant_id,
|
||||
name="Daily scheduled scan",
|
||||
provider_id=provider_id,
|
||||
trigger=Scan.TriggerChoices.SCHEDULED,
|
||||
state=StateChoices.SCHEDULED,
|
||||
scheduled_at=next_scan_datetime,
|
||||
scheduler_task_id=periodic_task_instance.id,
|
||||
)
|
||||
|
||||
perform_scan_summary_task.apply_async(
|
||||
kwargs={
|
||||
"tenant_id": tenant_id,
|
||||
|
||||
@@ -6,6 +6,8 @@ from django_celery_beat.models import IntervalSchedule, PeriodicTask
|
||||
from rest_framework_json_api.serializers import ValidationError
|
||||
from tasks.beat import schedule_provider_scan
|
||||
|
||||
from api.models import Scan
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestScheduleProviderScan:
|
||||
@@ -15,9 +17,11 @@ class TestScheduleProviderScan:
|
||||
with patch(
|
||||
"tasks.tasks.perform_scheduled_scan_task.apply_async"
|
||||
) as mock_apply_async:
|
||||
assert Scan.all_objects.count() == 0
|
||||
result = schedule_provider_scan(provider_instance)
|
||||
|
||||
assert result is not None
|
||||
assert Scan.all_objects.count() == 1
|
||||
|
||||
mock_apply_async.assert_called_once_with(
|
||||
kwargs={
|
||||
|
||||
76
api/src/backend/tasks/tests/test_utils.py
Normal file
76
api/src/backend/tasks/tests/test_utils.py
Normal file
@@ -0,0 +1,76 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from django_celery_beat.models import IntervalSchedule, PeriodicTask
|
||||
from django_celery_results.models import TaskResult
|
||||
from tasks.utils import get_next_execution_datetime
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
class TestGetNextExecutionDatetime:
|
||||
@pytest.fixture
|
||||
def setup_periodic_task(self, db):
|
||||
# Create a periodic task with an hourly interval
|
||||
interval = IntervalSchedule.objects.create(
|
||||
every=1, period=IntervalSchedule.HOURS
|
||||
)
|
||||
periodic_task = PeriodicTask.objects.create(
|
||||
name="scan-perform-scheduled-123",
|
||||
task="scan-perform-scheduled",
|
||||
interval=interval,
|
||||
)
|
||||
return periodic_task
|
||||
|
||||
@pytest.fixture
|
||||
def setup_task_result(self, db):
|
||||
# Create a task result record
|
||||
task_result = TaskResult.objects.create(
|
||||
task_id="abc123",
|
||||
task_name="scan-perform-scheduled",
|
||||
status="SUCCESS",
|
||||
date_created=datetime.now(timezone.utc) - timedelta(hours=1),
|
||||
result="Success",
|
||||
)
|
||||
return task_result
|
||||
|
||||
def test_get_next_execution_datetime_success(
|
||||
self, setup_task_result, setup_periodic_task
|
||||
):
|
||||
task_result = setup_task_result
|
||||
periodic_task = setup_periodic_task
|
||||
|
||||
# Mock periodic_task_name on TaskResult
|
||||
with patch.object(
|
||||
TaskResult, "periodic_task_name", return_value=periodic_task.name
|
||||
):
|
||||
next_execution = get_next_execution_datetime(
|
||||
task_id=task_result.task_id, provider_id="123"
|
||||
)
|
||||
|
||||
expected_time = task_result.date_created + timedelta(hours=1)
|
||||
assert next_execution == expected_time
|
||||
|
||||
def test_get_next_execution_datetime_fallback_to_provider_id(
|
||||
self, setup_task_result, setup_periodic_task
|
||||
):
|
||||
task_result = setup_task_result
|
||||
|
||||
# Simulate the case where `periodic_task_name` is missing
|
||||
with patch.object(TaskResult, "periodic_task_name", return_value=None):
|
||||
next_execution = get_next_execution_datetime(
|
||||
task_id=task_result.task_id, provider_id="123"
|
||||
)
|
||||
|
||||
expected_time = task_result.date_created + timedelta(hours=1)
|
||||
assert next_execution == expected_time
|
||||
|
||||
def test_get_next_execution_datetime_periodic_task_does_not_exist(
|
||||
self, setup_task_result
|
||||
):
|
||||
task_result = setup_task_result
|
||||
|
||||
with pytest.raises(PeriodicTask.DoesNotExist):
|
||||
get_next_execution_datetime(
|
||||
task_id=task_result.task_id, provider_id="nonexistent"
|
||||
)
|
||||
26
api/src/backend/tasks/utils.py
Normal file
26
api/src/backend/tasks/utils.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from django_celery_beat.models import PeriodicTask
|
||||
from django_celery_results.models import TaskResult
|
||||
|
||||
|
||||
def get_next_execution_datetime(task_id: int, provider_id: str) -> datetime:
|
||||
task_instance = TaskResult.objects.get(task_id=task_id)
|
||||
try:
|
||||
periodic_task_instance = PeriodicTask.objects.get(
|
||||
name=task_instance.periodic_task_name
|
||||
)
|
||||
except PeriodicTask.DoesNotExist:
|
||||
periodic_task_instance = PeriodicTask.objects.get(
|
||||
name=f"scan-perform-scheduled-{provider_id}"
|
||||
)
|
||||
|
||||
interval = periodic_task_instance.interval
|
||||
|
||||
current_scheduled_time = datetime.combine(
|
||||
datetime.now(timezone.utc).date(),
|
||||
task_instance.date_created.time(),
|
||||
tzinfo=timezone.utc,
|
||||
)
|
||||
|
||||
return current_scheduled_time + timedelta(**{interval.period: interval.every})
|
||||
24
contrib/k8s/helm/prowler-api/Chart.yaml
Normal file
24
contrib/k8s/helm/prowler-api/Chart.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v2
|
||||
name: prowler-api
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "5.1.1"
|
||||
22
contrib/k8s/helm/prowler-api/templates/NOTES.txt
Normal file
22
contrib/k8s/helm/prowler-api/templates/NOTES.txt
Normal file
@@ -0,0 +1,22 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prowler-api.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prowler-api.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prowler-api.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prowler-api.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
62
contrib/k8s/helm/prowler-api/templates/_helpers.tpl
Normal file
62
contrib/k8s/helm/prowler-api/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "prowler-api.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "prowler-api.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "prowler-api.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "prowler-api.labels" -}}
|
||||
helm.sh/chart: {{ include "prowler-api.chart" . }}
|
||||
{{ include "prowler-api.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "prowler-api.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "prowler-api.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "prowler-api.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "prowler-api.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
9
contrib/k8s/helm/prowler-api/templates/configmap.yaml
Normal file
9
contrib/k8s/helm/prowler-api/templates/configmap.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}-config
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
data:
|
||||
config.yaml: |-
|
||||
{{- toYaml .Values.mainConfig | nindent 4 }}
|
||||
85
contrib/k8s/helm/prowler-api/templates/deployment.yaml
Normal file
85
contrib/k8s/helm/prowler-api/templates/deployment.yaml
Normal file
@@ -0,0 +1,85 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "prowler-api.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "prowler-api.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
{{- range $name,$config := .Values.containers }}
|
||||
{{- if $config.enabled }}
|
||||
- name: {{ $name }}
|
||||
securityContext:
|
||||
{{- toYaml $config.securityContext | nindent 12 }}
|
||||
image: "{{ $config.image.repository }}:{{ $config.image.tag | default $.Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ $config.image.pullPolicy }}
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "prowler-api.fullname" $ }}
|
||||
command:
|
||||
{{- toYaml $config.command | nindent 12 }}
|
||||
{{- if $config.ports }}
|
||||
ports:
|
||||
{{- toYaml $config.ports | nindent 12 }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml $config.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml $config.readinessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml $config.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: {{ include "prowler-api.fullname" $ }}-config
|
||||
mountPath: {{ $.Values.releaseConfigRoot }}{{ $.Values.releaseConfigPath }}
|
||||
subPath: config.yaml
|
||||
{{- with .volumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: {{ include "prowler-api.fullname" . }}-config
|
||||
configMap:
|
||||
name: {{ include "prowler-api.fullname" . }}-config
|
||||
{{- with .Values.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
43
contrib/k8s/helm/prowler-api/templates/ingress.yaml
Normal file
43
contrib/k8s/helm/prowler-api/templates/ingress.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.ingress.className }}
|
||||
ingressClassName: {{ . }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- with .pathType }}
|
||||
pathType: {{ . }}
|
||||
{{- end }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "prowler-api.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
11
contrib/k8s/helm/prowler-api/templates/secrets.yaml
Normal file
11
contrib/k8s/helm/prowler-api/templates/secrets.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- range $k, $v := .Values.secrets }}
|
||||
{{ $k }}: {{ $v | toString | b64enc | quote }}
|
||||
{{- end }}
|
||||
21
contrib/k8s/helm/prowler-api/templates/service.yaml
Normal file
21
contrib/k8s/helm/prowler-api/templates/service.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "prowler-api.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
{{- range $name,$config := .Values.containers }}
|
||||
{{- if $config.ports }}
|
||||
{{- range $p := $config.ports }}
|
||||
- port: {{ $p.containerPort }}
|
||||
targetPort: {{ $p.containerPort }}
|
||||
protocol: TCP
|
||||
name: {{ $config.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "prowler-api.selectorLabels" . | nindent 4 }}
|
||||
13
contrib/k8s/helm/prowler-api/templates/serviceaccount.yaml
Normal file
13
contrib/k8s/helm/prowler-api/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "prowler-api.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "prowler-api.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
||||
625
contrib/k8s/helm/prowler-api/values.yaml
Normal file
625
contrib/k8s/helm/prowler-api/values.yaml
Normal file
@@ -0,0 +1,625 @@
|
||||
# Default values for prowler-api.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
|
||||
replicaCount: 1
|
||||
|
||||
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
|
||||
containers:
|
||||
prowler-api:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
command: ["/home/prowler/docker-entrypoint.sh", "prod"]
|
||||
worker:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
command: ["/home/prowler/docker-entrypoint.sh", "worker"]
|
||||
worker-beat:
|
||||
enabled: true
|
||||
image:
|
||||
repository: prowlercloud/prowler-api
|
||||
pullPolicy: IfNotPresent
|
||||
command: ["../docker-entrypoint.sh", "beat"]
|
||||
|
||||
secrets:
|
||||
POSTGRES_HOST:
|
||||
POSTGRES_PORT: 5432
|
||||
POSTGRES_ADMIN_USER:
|
||||
POSTGRES_ADMIN_PASSWORD:
|
||||
POSTGRES_USER:
|
||||
POSTGRES_PASSWORD:
|
||||
POSTGRES_DB:
|
||||
# Valkey settings
|
||||
VALKEY_HOST: valkey-headless
|
||||
VALKEY_PORT: "6379"
|
||||
VALKEY_DB: "0"
|
||||
# Django settings
|
||||
DJANGO_ALLOWED_HOSTS: localhost,127.0.0.1,prowler-api
|
||||
DJANGO_BIND_ADDRESS: 0.0.0.0
|
||||
DJANGO_PORT: "8080"
|
||||
DJANGO_DEBUG: False
|
||||
DJANGO_SETTINGS_MODULE: config.django.production
|
||||
# Select one of [ndjson|human_readable]
|
||||
DJANGO_LOGGING_FORMATTER: human_readable
|
||||
# Select one of [DEBUG|INFO|WARNING|ERROR|CRITICAL]
|
||||
# Applies to both Django and Celery Workers
|
||||
DJANGO_LOGGING_LEVEL: INFO
|
||||
# Defaults to the maximum available based on CPU cores if not set.
|
||||
DJANGO_WORKERS: 2
|
||||
# Token lifetime is in minutes
|
||||
DJANGO_ACCESS_TOKEN_LIFETIME: "30"
|
||||
# Token lifetime is in minutes
|
||||
DJANGO_REFRESH_TOKEN_LIFETIME: "1440"
|
||||
DJANGO_CACHE_MAX_AGE: "3600"
|
||||
DJANGO_STALE_WHILE_REVALIDATE: "60"
|
||||
DJANGO_MANAGE_DB_PARTITIONS: "False"
|
||||
# openssl genrsa -out private.pem 2048
|
||||
DJANGO_TOKEN_SIGNING_KEY:
|
||||
# openssl rsa -in private.pem -pubout -out public.pem
|
||||
DJANGO_TOKEN_VERIFYING_KEY:
|
||||
# openssl rand -base64 32
|
||||
DJANGO_SECRETS_ENCRYPTION_KEY:
|
||||
DJANGO_BROKER_VISIBILITY_TIMEOUT: 86400
|
||||
|
||||
releaseConfigRoot: /home/prowler/.cache/pypoetry/virtualenvs/prowler-api-NnJNioq7-py3.12/lib/python3.12/site-packages/
|
||||
releaseConfigPath: prowler/config/config.yaml
|
||||
|
||||
mainConfig:
|
||||
# AWS Configuration
|
||||
aws:
|
||||
# AWS Global Configuration
|
||||
# aws.mute_non_default_regions --> Set to True to muted failed findings in non-default regions for AccessAnalyzer, GuardDuty, SecurityHub, DRS and Config
|
||||
mute_non_default_regions: False
|
||||
# If you want to mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w mutelist.yaml`:
|
||||
# Mutelist:
|
||||
# Accounts:
|
||||
# "*":
|
||||
# Checks:
|
||||
# "*":
|
||||
# Regions:
|
||||
# - "ap-southeast-1"
|
||||
# - "ap-southeast-2"
|
||||
# Resources:
|
||||
# - "*"
|
||||
|
||||
# AWS IAM Configuration
|
||||
# aws.iam_user_accesskey_unused --> CIS recommends 45 days
|
||||
max_unused_access_keys_days: 45
|
||||
# aws.iam_user_console_access_unused --> CIS recommends 45 days
|
||||
max_console_access_days: 45
|
||||
|
||||
# AWS EC2 Configuration
|
||||
# aws.ec2_elastic_ip_shodan
|
||||
# TODO: create common config
|
||||
shodan_api_key: null
|
||||
# aws.ec2_securitygroup_with_many_ingress_egress_rules --> by default is 50 rules
|
||||
max_security_group_rules: 50
|
||||
# aws.ec2_instance_older_than_specific_days --> by default is 6 months (180 days)
|
||||
max_ec2_instance_age_in_days: 180
|
||||
# aws.ec2_securitygroup_allow_ingress_from_internet_to_any_port
|
||||
# allowed network interface types for security groups open to the Internet
|
||||
ec2_allowed_interface_types:
|
||||
[
|
||||
"api_gateway_managed",
|
||||
"vpc_endpoint",
|
||||
]
|
||||
# allowed network interface owners for security groups open to the Internet
|
||||
ec2_allowed_instance_owners:
|
||||
[
|
||||
"amazon-elb"
|
||||
]
|
||||
# aws.ec2_securitygroup_allow_ingress_from_internet_to_high_risk_tcp_ports
|
||||
ec2_high_risk_ports:
|
||||
[
|
||||
25,
|
||||
110,
|
||||
135,
|
||||
143,
|
||||
445,
|
||||
3000,
|
||||
4333,
|
||||
5000,
|
||||
5500,
|
||||
8080,
|
||||
8088,
|
||||
]
|
||||
|
||||
# AWS ECS Configuration
|
||||
# aws.ecs_service_fargate_latest_platform_version
|
||||
fargate_linux_latest_version: "1.4.0"
|
||||
fargate_windows_latest_version: "1.0.0"
|
||||
|
||||
# AWS VPC Configuration (vpc_endpoint_connections_trust_boundaries, vpc_endpoint_services_allowed_principals_trust_boundaries)
|
||||
# AWS SSM Configuration (aws.ssm_documents_set_as_public)
|
||||
# Single account environment: No action required. The AWS account number will be automatically added by the checks.
|
||||
# Multi account environment: Any additional trusted account number should be added as a space separated list, e.g.
|
||||
# trusted_account_ids : ["123456789012", "098765432109", "678901234567"]
|
||||
trusted_account_ids: []
|
||||
|
||||
# AWS Cloudwatch Configuration
|
||||
# aws.cloudwatch_log_group_retention_policy_specific_days_enabled --> by default is 365 days
|
||||
log_group_retention_days: 365
|
||||
|
||||
# AWS CloudFormation Configuration
|
||||
# cloudformation_stack_cdktoolkit_bootstrap_version --> by default is 21
|
||||
recommended_cdk_bootstrap_version: 21
|
||||
|
||||
# AWS AppStream Session Configuration
|
||||
# aws.appstream_fleet_session_idle_disconnect_timeout
|
||||
max_idle_disconnect_timeout_in_seconds: 600 # 10 Minutes
|
||||
# aws.appstream_fleet_session_disconnect_timeout
|
||||
max_disconnect_timeout_in_seconds: 300 # 5 Minutes
|
||||
# aws.appstream_fleet_maximum_session_duration
|
||||
max_session_duration_seconds: 36000 # 10 Hours
|
||||
|
||||
# AWS Lambda Configuration
|
||||
# aws.awslambda_function_using_supported_runtimes
|
||||
obsolete_lambda_runtimes:
|
||||
[
|
||||
"java8",
|
||||
"go1.x",
|
||||
"provided",
|
||||
"python3.6",
|
||||
"python2.7",
|
||||
"python3.7",
|
||||
"nodejs4.3",
|
||||
"nodejs4.3-edge",
|
||||
"nodejs6.10",
|
||||
"nodejs",
|
||||
"nodejs8.10",
|
||||
"nodejs10.x",
|
||||
"nodejs12.x",
|
||||
"nodejs14.x",
|
||||
"nodejs16.x",
|
||||
"dotnet5.0",
|
||||
"dotnet7",
|
||||
"dotnetcore1.0",
|
||||
"dotnetcore2.0",
|
||||
"dotnetcore2.1",
|
||||
"dotnetcore3.1",
|
||||
"ruby2.5",
|
||||
"ruby2.7",
|
||||
]
|
||||
# aws.awslambda_function_vpc_is_in_multi_azs
|
||||
lambda_min_azs: 2
|
||||
|
||||
# AWS Organizations
|
||||
# aws.organizations_scp_check_deny_regions
|
||||
# aws.organizations_enabled_regions: [
|
||||
# "eu-central-1",
|
||||
# "eu-west-1",
|
||||
# "us-east-1"
|
||||
# ]
|
||||
organizations_enabled_regions: []
|
||||
organizations_trusted_delegated_administrators: []
|
||||
|
||||
# AWS ECR
|
||||
# aws.ecr_repositories_scan_vulnerabilities_in_latest_image
|
||||
# CRITICAL
|
||||
# HIGH
|
||||
# MEDIUM
|
||||
ecr_repository_vulnerability_minimum_severity: "MEDIUM"
|
||||
|
||||
# AWS Trusted Advisor
|
||||
# aws.trustedadvisor_premium_support_plan_subscribed
|
||||
verify_premium_support_plans: True
|
||||
|
||||
# AWS CloudTrail Configuration
|
||||
# aws.cloudtrail_threat_detection_privilege_escalation
|
||||
threat_detection_privilege_escalation_threshold: 0.2 # Percentage of actions found to decide if it is an privilege_escalation attack event, by default is 0.2 (20%)
|
||||
threat_detection_privilege_escalation_minutes: 1440 # Past minutes to search from now for privilege_escalation attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_privilege_escalation_actions:
|
||||
[
|
||||
"AddPermission",
|
||||
"AddRoleToInstanceProfile",
|
||||
"AddUserToGroup",
|
||||
"AssociateAccessPolicy",
|
||||
"AssumeRole",
|
||||
"AttachGroupPolicy",
|
||||
"AttachRolePolicy",
|
||||
"AttachUserPolicy",
|
||||
"ChangePassword",
|
||||
"CreateAccessEntry",
|
||||
"CreateAccessKey",
|
||||
"CreateDevEndpoint",
|
||||
"CreateEventSourceMapping",
|
||||
"CreateFunction",
|
||||
"CreateGroup",
|
||||
"CreateJob",
|
||||
"CreateKeyPair",
|
||||
"CreateLoginProfile",
|
||||
"CreatePipeline",
|
||||
"CreatePolicyVersion",
|
||||
"CreateRole",
|
||||
"CreateStack",
|
||||
"DeleteRolePermissionsBoundary",
|
||||
"DeleteRolePolicy",
|
||||
"DeleteUserPermissionsBoundary",
|
||||
"DeleteUserPolicy",
|
||||
"DetachRolePolicy",
|
||||
"DetachUserPolicy",
|
||||
"GetCredentialsForIdentity",
|
||||
"GetId",
|
||||
"GetPolicyVersion",
|
||||
"GetUserPolicy",
|
||||
"Invoke",
|
||||
"ModifyInstanceAttribute",
|
||||
"PassRole",
|
||||
"PutGroupPolicy",
|
||||
"PutPipelineDefinition",
|
||||
"PutRolePermissionsBoundary",
|
||||
"PutRolePolicy",
|
||||
"PutUserPermissionsBoundary",
|
||||
"PutUserPolicy",
|
||||
"ReplaceIamInstanceProfileAssociation",
|
||||
"RunInstances",
|
||||
"SetDefaultPolicyVersion",
|
||||
"UpdateAccessKey",
|
||||
"UpdateAssumeRolePolicy",
|
||||
"UpdateDevEndpoint",
|
||||
"UpdateEventSourceMapping",
|
||||
"UpdateFunctionCode",
|
||||
"UpdateJob",
|
||||
"UpdateLoginProfile",
|
||||
]
|
||||
# aws.cloudtrail_threat_detection_enumeration
|
||||
threat_detection_enumeration_threshold: 0.3 # Percentage of actions found to decide if it is an enumeration attack event, by default is 0.3 (30%)
|
||||
threat_detection_enumeration_minutes: 1440 # Past minutes to search from now for enumeration attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_enumeration_actions:
|
||||
[
|
||||
"DescribeAccessEntry",
|
||||
"DescribeAccountAttributes",
|
||||
"DescribeAvailabilityZones",
|
||||
"DescribeBundleTasks",
|
||||
"DescribeCarrierGateways",
|
||||
"DescribeClientVpnRoutes",
|
||||
"DescribeCluster",
|
||||
"DescribeDhcpOptions",
|
||||
"DescribeFlowLogs",
|
||||
"DescribeImages",
|
||||
"DescribeInstanceAttribute",
|
||||
"DescribeInstanceInformation",
|
||||
"DescribeInstanceTypes",
|
||||
"DescribeInstances",
|
||||
"DescribeInstances",
|
||||
"DescribeKeyPairs",
|
||||
"DescribeLogGroups",
|
||||
"DescribeLogStreams",
|
||||
"DescribeOrganization",
|
||||
"DescribeRegions",
|
||||
"DescribeSecurityGroups",
|
||||
"DescribeSnapshotAttribute",
|
||||
"DescribeSnapshotTierStatus",
|
||||
"DescribeSubscriptionFilters",
|
||||
"DescribeTransitGatewayMulticastDomains",
|
||||
"DescribeVolumes",
|
||||
"DescribeVolumesModifications",
|
||||
"DescribeVpcEndpointConnectionNotifications",
|
||||
"DescribeVpcs",
|
||||
"GetAccount",
|
||||
"GetAccountAuthorizationDetails",
|
||||
"GetAccountSendingEnabled",
|
||||
"GetBucketAcl",
|
||||
"GetBucketLogging",
|
||||
"GetBucketPolicy",
|
||||
"GetBucketReplication",
|
||||
"GetBucketVersioning",
|
||||
"GetCallerIdentity",
|
||||
"GetCertificate",
|
||||
"GetConsoleScreenshot",
|
||||
"GetCostAndUsage",
|
||||
"GetDetector",
|
||||
"GetEbsDefaultKmsKeyId",
|
||||
"GetEbsEncryptionByDefault",
|
||||
"GetFindings",
|
||||
"GetFlowLogsIntegrationTemplate",
|
||||
"GetIdentityVerificationAttributes",
|
||||
"GetInstances",
|
||||
"GetIntrospectionSchema",
|
||||
"GetLaunchTemplateData",
|
||||
"GetLaunchTemplateData",
|
||||
"GetLogRecord",
|
||||
"GetParameters",
|
||||
"GetPolicyVersion",
|
||||
"GetPublicAccessBlock",
|
||||
"GetQueryResults",
|
||||
"GetRegions",
|
||||
"GetSMSAttributes",
|
||||
"GetSMSSandboxAccountStatus",
|
||||
"GetSendQuota",
|
||||
"GetTransitGatewayRouteTableAssociations",
|
||||
"GetUserPolicy",
|
||||
"HeadObject",
|
||||
"ListAccessKeys",
|
||||
"ListAccounts",
|
||||
"ListAllMyBuckets",
|
||||
"ListAssociatedAccessPolicies",
|
||||
"ListAttachedUserPolicies",
|
||||
"ListClusters",
|
||||
"ListDetectors",
|
||||
"ListDomains",
|
||||
"ListFindings",
|
||||
"ListHostedZones",
|
||||
"ListIPSets",
|
||||
"ListIdentities",
|
||||
"ListInstanceProfiles",
|
||||
"ListObjects",
|
||||
"ListOrganizationalUnitsForParent",
|
||||
"ListOriginationNumbers",
|
||||
"ListPolicyVersions",
|
||||
"ListRoles",
|
||||
"ListRoles",
|
||||
"ListRules",
|
||||
"ListServiceQuotas",
|
||||
"ListSubscriptions",
|
||||
"ListTargetsByRule",
|
||||
"ListTopics",
|
||||
"ListUsers",
|
||||
"LookupEvents",
|
||||
"Search",
|
||||
]
|
||||
# aws.cloudtrail_threat_detection_llm_jacking
|
||||
threat_detection_llm_jacking_threshold: 0.4 # Percentage of actions found to decide if it is an LLM Jacking attack event, by default is 0.4 (40%)
|
||||
threat_detection_llm_jacking_minutes: 1440 # Past minutes to search from now for LLM Jacking attacks, by default is 1440 minutes (24 hours)
|
||||
threat_detection_llm_jacking_actions:
|
||||
[
|
||||
"PutUseCaseForModelAccess", # Submits a use case for model access, providing justification (Write).
|
||||
"PutFoundationModelEntitlement", # Grants entitlement for accessing a foundation model (Write).
|
||||
"PutModelInvocationLoggingConfiguration", # Configures logging for model invocations (Write).
|
||||
"CreateFoundationModelAgreement", # Creates a new agreement to use a foundation model (Write).
|
||||
"InvokeModel", # Invokes a specified Bedrock model for inference using provided prompt and parameters (Read).
|
||||
"InvokeModelWithResponseStream", # Invokes a Bedrock model for inference with real-time token streaming (Read).
|
||||
"GetUseCaseForModelAccess", # Retrieves an existing use case for model access (Read).
|
||||
"GetModelInvocationLoggingConfiguration", # Fetches the logging configuration for model invocations (Read).
|
||||
"GetFoundationModelAvailability", # Checks the availability of a foundation model for use (Read).
|
||||
"ListFoundationModelAgreementOffers", # Lists available agreement offers for accessing foundation models (List).
|
||||
"ListFoundationModels", # Lists the available foundation models in Bedrock (List).
|
||||
"ListProvisionedModelThroughputs", # Lists the provisioned throughput for previously created models (List).
|
||||
]
|
||||
|
||||
# AWS RDS Configuration
|
||||
# aws.rds_instance_backup_enabled
|
||||
# Whether to check RDS instance replicas or not
|
||||
check_rds_instance_replicas: False
|
||||
|
||||
# AWS ACM Configuration
|
||||
# aws.acm_certificates_expiration_check
|
||||
days_to_expire_threshold: 7
|
||||
# aws.acm_certificates_with_secure_key_algorithms
|
||||
insecure_key_algorithms:
|
||||
[
|
||||
"RSA-1024",
|
||||
"P-192",
|
||||
"SHA-1",
|
||||
]
|
||||
|
||||
# AWS EKS Configuration
|
||||
# aws.eks_control_plane_logging_all_types_enabled
|
||||
# EKS control plane logging types that must be enabled
|
||||
eks_required_log_types:
|
||||
[
|
||||
"api",
|
||||
"audit",
|
||||
"authenticator",
|
||||
"controllerManager",
|
||||
"scheduler",
|
||||
]
|
||||
|
||||
# aws.eks_cluster_uses_a_supported_version
|
||||
# EKS clusters must be version 1.28 or higher
|
||||
eks_cluster_oldest_version_supported: "1.28"
|
||||
|
||||
# AWS CodeBuild Configuration
|
||||
# aws.codebuild_project_no_secrets_in_variables
|
||||
# CodeBuild sensitive variables that are excluded from the check
|
||||
excluded_sensitive_environment_variables:
|
||||
[
|
||||
|
||||
]
|
||||
|
||||
# AWS ELB Configuration
|
||||
# aws.elb_is_in_multiple_az
|
||||
# Minimum number of Availability Zones that an CLB must be in
|
||||
elb_min_azs: 2
|
||||
|
||||
# AWS ELBv2 Configuration
|
||||
# aws.elbv2_is_in_multiple_az
|
||||
# Minimum number of Availability Zones that an ELBv2 must be in
|
||||
elbv2_min_azs: 2
|
||||
|
||||
|
||||
# AWS Secrets Configuration
|
||||
# Patterns to ignore in the secrets checks
|
||||
secrets_ignore_patterns: []
|
||||
|
||||
# AWS Secrets Manager Configuration
|
||||
# aws.secretsmanager_secret_unused
|
||||
# Maximum number of days a secret can be unused
|
||||
max_days_secret_unused: 90
|
||||
|
||||
# aws.secretsmanager_secret_rotated_periodically
|
||||
# Maximum number of days a secret should be rotated
|
||||
max_days_secret_unrotated: 90
|
||||
|
||||
# AWS Kinesis Configuration
|
||||
# Minimum retention period in hours for Kinesis streams
|
||||
min_kinesis_stream_retention_hours: 168 # 7 days
|
||||
|
||||
|
||||
# Azure Configuration
|
||||
azure:
|
||||
# Azure Network Configuration
|
||||
# azure.network_public_ip_shodan
|
||||
# TODO: create common config
|
||||
shodan_api_key: null
|
||||
|
||||
# Azure App Service
|
||||
# azure.app_ensure_php_version_is_latest
|
||||
php_latest_version: "8.2"
|
||||
# azure.app_ensure_python_version_is_latest
|
||||
python_latest_version: "3.12"
|
||||
# azure.app_ensure_java_version_is_latest
|
||||
java_latest_version: "17"
|
||||
|
||||
# Azure SQL Server
|
||||
# azure.sqlserver_minimal_tls_version
|
||||
recommended_minimal_tls_versions:
|
||||
[
|
||||
"1.2",
|
||||
"1.3",
|
||||
]
|
||||
|
||||
# GCP Configuration
|
||||
gcp:
|
||||
# GCP Compute Configuration
|
||||
# gcp.compute_public_address_shodan
|
||||
shodan_api_key: null
|
||||
|
||||
# Kubernetes Configuration
|
||||
kubernetes:
|
||||
# Kubernetes API Server
|
||||
# kubernetes.apiserver_audit_log_maxbackup_set
|
||||
audit_log_maxbackup: 10
|
||||
# kubernetes.apiserver_audit_log_maxsize_set
|
||||
audit_log_maxsize: 100
|
||||
# kubernetes.apiserver_audit_log_maxage_set
|
||||
audit_log_maxage: 30
|
||||
# kubernetes.apiserver_strong_ciphers_only
|
||||
apiserver_strong_ciphers:
|
||||
[
|
||||
"TLS_AES_128_GCM_SHA256",
|
||||
"TLS_AES_256_GCM_SHA384",
|
||||
"TLS_CHACHA20_POLY1305_SHA256",
|
||||
]
|
||||
# Kubelet
|
||||
# kubernetes.kubelet_strong_ciphers_only
|
||||
kubelet_strong_ciphers:
|
||||
[
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
]
|
||||
|
||||
|
||||
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# This is for setting Kubernetes Annotations to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
podAnnotations: {}
|
||||
# This is for setting Kubernetes Labels to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
podLabels: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
|
||||
service:
|
||||
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
|
||||
type: ClusterIP
|
||||
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
|
||||
port: 80
|
||||
|
||||
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
|
||||
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes: []
|
||||
# - name: foo
|
||||
# secret:
|
||||
# secretName: mysecret
|
||||
# optional: false
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts: []
|
||||
# - name: foo
|
||||
# mountPath: "/etc/foo"
|
||||
# readOnly: true
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
23
contrib/k8s/helm/prowler-cli/.helmignore
Normal file
23
contrib/k8s/helm/prowler-cli/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -39,4 +39,3 @@ spec:
|
||||
path: {{ $value }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
23
contrib/k8s/helm/prowler-ui/.helmignore
Normal file
23
contrib/k8s/helm/prowler-ui/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
6
contrib/k8s/helm/prowler-ui/Chart.yaml
Normal file
6
contrib/k8s/helm/prowler-ui/Chart.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v2
|
||||
name: prowler-ui
|
||||
description: A Helm chart for Kubernetes
|
||||
type: application
|
||||
version: 0.1.0
|
||||
appVersion: "5.1.1"
|
||||
22
contrib/k8s/helm/prowler-ui/templates/NOTES.txt
Normal file
22
contrib/k8s/helm/prowler-ui/templates/NOTES.txt
Normal file
@@ -0,0 +1,22 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prowler-ui.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "prowler-ui.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prowler-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prowler-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
62
contrib/k8s/helm/prowler-ui/templates/_helpers.tpl
Normal file
62
contrib/k8s/helm/prowler-ui/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "prowler-ui.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "prowler-ui.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "prowler-ui.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "prowler-ui.labels" -}}
|
||||
helm.sh/chart: {{ include "prowler-ui.chart" . }}
|
||||
{{ include "prowler-ui.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "prowler-ui.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "prowler-ui.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "prowler-ui.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "prowler-ui.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
72
contrib/k8s/helm/prowler-ui/templates/deployment.yaml
Normal file
72
contrib/k8s/helm/prowler-ui/templates/deployment.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "prowler-ui.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "prowler-ui.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "prowler-ui.fullname" $ }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml .Values.readinessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
43
contrib/k8s/helm/prowler-ui/templates/ingress.yaml
Normal file
43
contrib/k8s/helm/prowler-ui/templates/ingress.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.ingress.className }}
|
||||
ingressClassName: {{ . }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- with .pathType }}
|
||||
pathType: {{ . }}
|
||||
{{- end }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "prowler-ui.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
11
contrib/k8s/helm/prowler-ui/templates/secrets.yaml
Normal file
11
contrib/k8s/helm/prowler-ui/templates/secrets.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- range $k, $v := .Values.secrets }}
|
||||
{{ $k }}: {{ $v | toString | b64enc | quote }}
|
||||
{{- end }}
|
||||
15
contrib/k8s/helm/prowler-ui/templates/service.yaml
Normal file
15
contrib/k8s/helm/prowler-ui/templates/service.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
{{- include "prowler-ui.selectorLabels" . | nindent 4 }}
|
||||
13
contrib/k8s/helm/prowler-ui/templates/serviceaccount.yaml
Normal file
13
contrib/k8s/helm/prowler-ui/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "prowler-ui.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "prowler-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
||||
132
contrib/k8s/helm/prowler-ui/values.yaml
Normal file
132
contrib/k8s/helm/prowler-ui/values.yaml
Normal file
@@ -0,0 +1,132 @@
|
||||
# Default values for prowler-ui.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
|
||||
replicaCount: 1
|
||||
|
||||
# This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
|
||||
image:
|
||||
repository: prowlercloud/prowler-ui
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
secrets:
|
||||
SITE_URL: http://localhost:3000
|
||||
API_BASE_URL: http://prowler-api:8080/api/v1
|
||||
NEXT_PUBLIC_API_DOCS_URL: http://prowler-api:8080/api/v1/docs
|
||||
AUTH_TRUST_HOST: True
|
||||
UI_PORT: 3000
|
||||
# openssl rand -base64 32
|
||||
AUTH_SECRET:
|
||||
|
||||
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# This is for setting Kubernetes Annotations to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
podAnnotations: {}
|
||||
# This is for setting Kubernetes Labels to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
podLabels: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
|
||||
service:
|
||||
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
|
||||
type: ClusterIP
|
||||
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
|
||||
port: 3000
|
||||
|
||||
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
|
||||
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes: []
|
||||
# - name: foo
|
||||
# secret:
|
||||
# secretName: mysecret
|
||||
# optional: false
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts: []
|
||||
# - name: foo
|
||||
# mountPath: "/etc/foo"
|
||||
# readOnly: true
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
24
dashboard/compliance/cis_1_10_kubernetes.py
Normal file
24
dashboard/compliance/cis_1_10_kubernetes.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
25
dashboard/compliance/cis_3_0_azure.py
Normal file
25
dashboard/compliance/cis_3_0_azure.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import warnings
|
||||
|
||||
from dashboard.common_methods import get_section_containers_cis
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def get_table(data):
|
||||
|
||||
aux = data[
|
||||
[
|
||||
"REQUIREMENTS_ID",
|
||||
"REQUIREMENTS_DESCRIPTION",
|
||||
"REQUIREMENTS_ATTRIBUTES_SECTION",
|
||||
"CHECKID",
|
||||
"STATUS",
|
||||
"REGION",
|
||||
"ACCOUNTID",
|
||||
"RESOURCEID",
|
||||
]
|
||||
].copy()
|
||||
|
||||
return get_section_containers_cis(
|
||||
aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION"
|
||||
)
|
||||
@@ -16,6 +16,7 @@ services:
|
||||
volumes:
|
||||
- "./api/src/backend:/home/prowler/backend"
|
||||
- "./api/pyproject.toml:/home/prowler/pyproject.toml"
|
||||
- "/tmp/prowler_api_output:/tmp/prowler_api_output"
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
@@ -35,6 +36,9 @@ services:
|
||||
required: false
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
||||
- "./ui:/app"
|
||||
- "/app/node_modules"
|
||||
|
||||
postgres:
|
||||
image: postgres:16.3-alpine3.20
|
||||
@@ -82,6 +86,8 @@ services:
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
volumes:
|
||||
- "/tmp/prowler_api_output:/tmp/prowler_api_output"
|
||||
depends_on:
|
||||
valkey:
|
||||
condition: service_healthy
|
||||
|
||||
@@ -56,7 +56,6 @@ from prowler.providers.<provider>.lib.service.service import ServiceParentClass
|
||||
|
||||
|
||||
# Create a class for the Service
|
||||
################## <Service>
|
||||
class <Service>(ServiceParentClass):
|
||||
def __init__(self, provider):
|
||||
# Call Service Parent Class __init__
|
||||
|
||||
@@ -669,8 +669,9 @@ class Test_app_ensure_http_is_redirected_to_https:
|
||||
# Create the custom App object to be tested
|
||||
app_client.apps = {
|
||||
AZURE_SUBSCRIPTION_ID: {
|
||||
"app_id-1": WebApp(
|
||||
resource_id: WebApp(
|
||||
resource_id=resource_id,
|
||||
name="app_id-1",
|
||||
auth_enabled=True,
|
||||
configurations=mock.MagicMock(),
|
||||
client_cert_mode="Ignore",
|
||||
@@ -716,8 +717,9 @@ class Test_app_ensure_http_is_redirected_to_https:
|
||||
|
||||
app_client.apps = {
|
||||
AZURE_SUBSCRIPTION_ID: {
|
||||
"app_id-1": WebApp(
|
||||
resource_id: WebApp(
|
||||
resource_id=resource_id,
|
||||
name="app_id-1",
|
||||
auth_enabled=True,
|
||||
configurations=mock.MagicMock(),
|
||||
client_cert_mode="Ignore",
|
||||
|
||||
@@ -133,3 +133,5 @@ While the scan is running, start exploring the findings in these sections:
|
||||
<img src="../../img/issues.png" alt="Issues" width="300" style="text-align: center;"/>
|
||||
|
||||
- **Browse All Findings**: Detailed list of findings detected, where you can filter by severity, service, and more. <img src="../../img/findings.png" alt="Findings" width="700"/>
|
||||
|
||||
To view all `new` findings that have not been seen prior to this scan, click the `Delta` filter and select `new`. To view all `changed` findings that have had a status change (from `PASS` to `FAIL` for example), click the `Delta` filter and select `changed`.
|
||||
|
||||
@@ -124,7 +124,7 @@ extra:
|
||||
make our documentation better.
|
||||
analytics:
|
||||
provider: google
|
||||
property: G-H5TFH6WJRQ
|
||||
property: G-KBKV70W5Y2
|
||||
social:
|
||||
- icon: fontawesome/brands/github
|
||||
link: https://github.com/prowler-cloud
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
],
|
||||
"Resource": "*",
|
||||
"Effect": "Allow",
|
||||
"Sid": "AllowMoreReadForProwler"
|
||||
"Sid": "AllowMoreReadOnly"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
@@ -60,9 +60,10 @@
|
||||
"apigateway:GET"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:apigateway:*::/restapis/*",
|
||||
"arn:aws:apigateway:*::/apis/*"
|
||||
]
|
||||
"arn:*:apigateway:*::/restapis/*",
|
||||
"arn:*:apigateway:*::/apis/*"
|
||||
],
|
||||
"Sid": "AllowAPIGatewayReadOnly"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"securityhub:BatchImportFindings",
|
||||
"securityhub:GetFindings"
|
||||
],
|
||||
"Effect": "Allow",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
127
permissions/templates/cloudformation/prowler-scan-role.yml
Normal file
127
permissions/templates/cloudformation/prowler-scan-role.yml
Normal file
@@ -0,0 +1,127 @@
|
||||
AWSTemplateFormatVersion: "2010-09-09"
|
||||
|
||||
# You can invoke CloudFormation and pass the principal ARN from a command line like this:
|
||||
# aws cloudformation create-stack \
|
||||
# --capabilities CAPABILITY_IAM --capabilities CAPABILITY_NAMED_IAM \
|
||||
# --template-body "file://prowler-pro-saas-scan-role.yaml" \
|
||||
# --stack-name "ProwlerProSaaSScanRole" \
|
||||
# --parameters "ParameterKey=ExternalId,ParameterValue=ProvidedExternalID"
|
||||
|
||||
Description: |
|
||||
This template creates the ProwlerScan IAM Role in this account with
|
||||
all read-only permissions to scan your account for security issues.
|
||||
Contains two AWS managed policies (SecurityAudit and ViewOnlyAccess) and an inline policy.
|
||||
It sets the trust policy on that IAM Role to permit Prowler to assume that role.
|
||||
Parameters:
|
||||
ExternalId:
|
||||
Description: |
|
||||
This is the External ID that Prowler will use to assume the role ProwlerScan IAM Role.
|
||||
Type: String
|
||||
MinLength: 1
|
||||
AllowedPattern: ".+"
|
||||
ConstraintDescription: "ExternalId must not be empty."
|
||||
AccountId:
|
||||
Description: |
|
||||
AWS Account ID that will assume the role created, if you are deploying this template to be used in Prowler Cloud please do not edit this.
|
||||
Type: String
|
||||
Default: "232136659152"
|
||||
MinLength: 12
|
||||
MaxLength: 12
|
||||
AllowedPattern: "[0-9]{12}"
|
||||
ConstraintDescription: "AccountId must be a valid AWS Account ID."
|
||||
IAMPrincipal:
|
||||
Description: |
|
||||
The IAM principal type and name that will be allowed to assume the role created, leave an * for all the IAM principals in your AWS account. If you are deploying this template to be used in Prowler Cloud please do not edit this.
|
||||
Type: String
|
||||
Default: role/prowler*
|
||||
|
||||
Resources:
|
||||
ProwlerScan:
|
||||
Type: AWS::IAM::Role
|
||||
Properties:
|
||||
RoleName: ProwlerScan
|
||||
AssumeRolePolicyDocument:
|
||||
Version: "2012-10-17"
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Principal:
|
||||
AWS: !Sub "arn:${AWS::Partition}:iam::${AccountId}:root"
|
||||
Action: "sts:AssumeRole"
|
||||
Condition:
|
||||
StringEquals:
|
||||
"sts:ExternalId": !Sub ${ExternalId}
|
||||
StringLike:
|
||||
"aws:PrincipalArn": !Sub "arn:${AWS::Partition}:iam::${AccountId}:${IAMPrincipal}"
|
||||
MaxSessionDuration: 3600
|
||||
ManagedPolicyArns:
|
||||
- "arn:aws:iam::aws:policy/SecurityAudit"
|
||||
- "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"
|
||||
Policies:
|
||||
- PolicyName: ProwlerScan
|
||||
PolicyDocument:
|
||||
Version: "2012-10-17"
|
||||
Statement:
|
||||
- Sid: AllowMoreReadOnly
|
||||
Effect: Allow
|
||||
Action:
|
||||
- "account:Get*"
|
||||
- "appstream:Describe*"
|
||||
- "appstream:List*"
|
||||
- "backup:List*"
|
||||
- "bedrock:List*"
|
||||
- "bedrock:Get*"
|
||||
- "cloudtrail:GetInsightSelectors"
|
||||
- "codeartifact:List*"
|
||||
- "codebuild:BatchGet*"
|
||||
- "codebuild:ListReportGroups"
|
||||
- "cognito-idp:GetUserPoolMfaConfig"
|
||||
- "dlm:Get*"
|
||||
- "drs:Describe*"
|
||||
- "ds:Get*"
|
||||
- "ds:Describe*"
|
||||
- "ds:List*"
|
||||
- "dynamodb:GetResourcePolicy"
|
||||
- "ec2:GetEbsEncryptionByDefault"
|
||||
- "ec2:GetSnapshotBlockPublicAccessState"
|
||||
- "ec2:GetInstanceMetadataDefaults"
|
||||
- "ecr:Describe*"
|
||||
- "ecr:GetRegistryScanningConfiguration"
|
||||
- "elasticfilesystem:DescribeBackupPolicy"
|
||||
- "glue:GetConnections"
|
||||
- "glue:GetSecurityConfiguration*"
|
||||
- "glue:SearchTables"
|
||||
- "lambda:GetFunction*"
|
||||
- "logs:FilterLogEvents"
|
||||
- "lightsail:GetRelationalDatabases"
|
||||
- "macie2:GetMacieSession"
|
||||
- "macie2:GetAutomatedDiscoveryConfiguration"
|
||||
- "s3:GetAccountPublicAccessBlock"
|
||||
- "shield:DescribeProtection"
|
||||
- "shield:GetSubscriptionState"
|
||||
- "securityhub:BatchImportFindings"
|
||||
- "securityhub:GetFindings"
|
||||
- "servicecatalog:Describe*"
|
||||
- "servicecatalog:List*"
|
||||
- "ssm:GetDocument"
|
||||
- "ssm-incidents:List*"
|
||||
- "states:ListTagsForResource"
|
||||
- "support:Describe*"
|
||||
- "tag:GetTagKeys"
|
||||
- "wellarchitected:List*"
|
||||
Resource: "*"
|
||||
- Sid: AllowAPIGatewayReadOnly
|
||||
Effect: Allow
|
||||
Action:
|
||||
- "apigateway:GET"
|
||||
Resource:
|
||||
- "arn:*:apigateway:*::/restapis/*"
|
||||
- "arn:*:apigateway:*::/apis/*"
|
||||
Tags:
|
||||
- Key: "Service"
|
||||
Value: "https://prowler.com"
|
||||
- Key: "Support"
|
||||
Value: "support@prowler.com"
|
||||
- Key: "CloudFormation"
|
||||
Value: "true"
|
||||
- Key: "Name"
|
||||
Value: "ProwlerScan"
|
||||
10
permissions/templates/terraform/README.md
Normal file
10
permissions/templates/terraform/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
## Deployment using Terraform
|
||||
|
||||
To deploy the Prowler Scan Role in order to allow to scan you AWS account from Prowler, please run the following commands in your terminal:
|
||||
1. `terraform init`
|
||||
2. `terraform plan`
|
||||
3. `terraform apply`
|
||||
|
||||
During the `terraform plan` and `terraform apply` steps you will be asked for an External ID to be configured in the `ProwlerScan` IAM role.
|
||||
|
||||
> Note that Terraform will use the AWS credentials of your default profile.
|
||||
111
permissions/templates/terraform/main.tf
Normal file
111
permissions/templates/terraform/main.tf
Normal file
@@ -0,0 +1,111 @@
|
||||
# Variables
|
||||
###################################
|
||||
variable "external_id" {
|
||||
type = string
|
||||
description = "This is the External ID that Prowler will use to assume the role ProwlerScan IAM Role."
|
||||
|
||||
validation {
|
||||
condition = length(var.external_id) > 0
|
||||
error_message = "ExternalId must not be empty."
|
||||
}
|
||||
}
|
||||
|
||||
variable "account_id" {
|
||||
type = string
|
||||
description = "AWS Account ID that will assume the role created, if you are deploying this template to be used in Prowler Cloud please do not edit this."
|
||||
default = "232136659152"
|
||||
|
||||
validation {
|
||||
condition = length(var.account_id) == 12
|
||||
error_message = "AccountId must be a valid AWS Account ID."
|
||||
}
|
||||
}
|
||||
|
||||
variable "iam_principal" {
|
||||
type = string
|
||||
description = "The IAM principal type and name that will be allowed to assume the role created, leave an * for all the IAM principals in your AWS account. If you are deploying this template to be used in Prowler Cloud please do not edit this."
|
||||
default = "role/prowler*"
|
||||
}
|
||||
|
||||
##### PLEASE, DO NOT EDIT BELOW THIS LINE #####
|
||||
|
||||
|
||||
# Terraform Provider Configuration
|
||||
###################################
|
||||
terraform {
|
||||
required_version = ">= 1.5"
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 5.83"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = "us-east-1"
|
||||
default_tags {
|
||||
tags = {
|
||||
"Name" = "ProwlerScan",
|
||||
"Terraform" = "true",
|
||||
"Service" = "https://prowler.com",
|
||||
"Support" = "support@prowler.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_partition" "current" {}
|
||||
|
||||
|
||||
# IAM Role
|
||||
###################################
|
||||
data "aws_iam_policy_document" "prowler_assume_role_policy" {
|
||||
statement {
|
||||
actions = ["sts:AssumeRole"]
|
||||
principals {
|
||||
type = "AWS"
|
||||
identifiers = ["arn:${data.aws_partition.current.partition}:iam::${var.account_id}:root"]
|
||||
}
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "sts:ExternalId"
|
||||
values = [
|
||||
var.external_id,
|
||||
]
|
||||
}
|
||||
condition {
|
||||
test = "StringLike"
|
||||
variable = "aws:PrincipalArn"
|
||||
values = [
|
||||
"arn:${data.aws_partition.current.partition}:iam::${var.account_id}:${var.iam_principal}",
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "prowler_scan" {
|
||||
name = "ProwlerScan"
|
||||
assume_role_policy = data.aws_iam_policy_document.prowler_assume_role_policy.json
|
||||
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "prowler_scan_policy" {
|
||||
name = "ProwlerScan"
|
||||
description = "Prowler Scan Policy"
|
||||
policy = file("../../prowler-additions-policy.json")
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "prowler_scan_policy_attachment" {
|
||||
role = aws_iam_role.prowler_scan.name
|
||||
policy_arn = aws_iam_policy.prowler_scan_policy.arn
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "prowler_scan_securityaudit_policy_attachment" {
|
||||
role = aws_iam_role.prowler_scan.name
|
||||
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/SecurityAudit"
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "prowler_scan_viewonly_policy_attachment" {
|
||||
role = aws_iam_role.prowler_scan.name
|
||||
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/job-function/ViewOnlyAccess"
|
||||
}
|
||||
1807
poetry.lock
generated
1807
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -51,6 +51,7 @@ from prowler.lib.outputs.compliance.cis.cis_aws import AWSCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_azure import AzureCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_gcp import GCPCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_kubernetes import KubernetesCIS
|
||||
from prowler.lib.outputs.compliance.cis.cis_microsoft365 import Microsoft365CIS
|
||||
from prowler.lib.outputs.compliance.compliance import display_compliance_table
|
||||
from prowler.lib.outputs.compliance.ens.ens_aws import AWSENS
|
||||
from prowler.lib.outputs.compliance.ens.ens_azure import AzureENS
|
||||
@@ -78,6 +79,7 @@ from prowler.providers.common.provider import Provider
|
||||
from prowler.providers.common.quick_inventory import run_provider_quick_inventory
|
||||
from prowler.providers.gcp.models import GCPOutputOptions
|
||||
from prowler.providers.kubernetes.models import KubernetesOutputOptions
|
||||
from prowler.providers.microsoft365.models import Microsoft365OutputOptions
|
||||
|
||||
|
||||
def prowler():
|
||||
@@ -259,6 +261,10 @@ def prowler():
|
||||
output_options = KubernetesOutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
elif provider == "microsoft365":
|
||||
output_options = Microsoft365OutputOptions(
|
||||
args, bulk_checks_metadata, global_provider.identity
|
||||
)
|
||||
|
||||
# Run the quick inventory for the provider if available
|
||||
if hasattr(args, "quick_inventory") and args.quick_inventory:
|
||||
@@ -307,7 +313,6 @@ def prowler():
|
||||
if "SLACK_API_TOKEN" in environ and (
|
||||
"SLACK_CHANNEL_NAME" in environ or "SLACK_CHANNEL_ID" in environ
|
||||
):
|
||||
|
||||
token = environ["SLACK_API_TOKEN"]
|
||||
channel = (
|
||||
environ["SLACK_CHANNEL_NAME"]
|
||||
@@ -327,18 +332,21 @@ def prowler():
|
||||
# Outputs
|
||||
# TODO: this part is needed since the checks generates a Check_Report_XXX and the output uses Finding
|
||||
# This will be refactored for the outputs generate directly the Finding
|
||||
finding_outputs = [
|
||||
Finding.generate_output(global_provider, finding, output_options)
|
||||
for finding in findings
|
||||
]
|
||||
finding_outputs = []
|
||||
for finding in findings:
|
||||
try:
|
||||
finding_outputs.append(
|
||||
Finding.generate_output(global_provider, finding, output_options)
|
||||
)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
generated_outputs = {"regular": [], "compliance": []}
|
||||
|
||||
if args.output_formats:
|
||||
for mode in args.output_formats:
|
||||
filename = (
|
||||
f"{output_options.output_directory}/"
|
||||
f"{output_options.output_filename}"
|
||||
f"{output_options.output_directory}/{output_options.output_filename}"
|
||||
)
|
||||
if mode == "csv":
|
||||
csv_output = CSV(
|
||||
@@ -629,6 +637,36 @@ def prowler():
|
||||
generated_outputs["compliance"].append(generic_compliance)
|
||||
generic_compliance.batch_write_data_to_file()
|
||||
|
||||
elif provider == "microsoft365":
|
||||
for compliance_name in input_compliance_frameworks:
|
||||
if compliance_name.startswith("cis_"):
|
||||
# Generate CIS Finding Object
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
cis = Microsoft365CIS(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
create_file_descriptor=True,
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(cis)
|
||||
cis.batch_write_data_to_file()
|
||||
else:
|
||||
filename = (
|
||||
f"{output_options.output_directory}/compliance/"
|
||||
f"{output_options.output_filename}_{compliance_name}.csv"
|
||||
)
|
||||
generic_compliance = GenericCompliance(
|
||||
findings=finding_outputs,
|
||||
compliance=bulk_compliance_frameworks[compliance_name],
|
||||
create_file_descriptor=True,
|
||||
file_path=filename,
|
||||
)
|
||||
generated_outputs["compliance"].append(generic_compliance)
|
||||
generic_compliance.batch_write_data_to_file()
|
||||
|
||||
# AWS Security Hub Integration
|
||||
if provider == "aws":
|
||||
# Send output to S3 if needed (-B / -D) for all the output formats
|
||||
|
||||
@@ -28,7 +28,9 @@
|
||||
"Service": "ebs"
|
||||
}
|
||||
],
|
||||
"Checks": []
|
||||
"Checks": [
|
||||
"ec2_ebs_volume_snapshots_exists"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.0.3",
|
||||
@@ -42,7 +44,8 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"ec2_ebs_default_encryption"
|
||||
"ec2_ebs_default_encryption",
|
||||
"ec2_ebs_volume_encryption"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -87,7 +90,9 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"iam_user_mfa_enabled_console_access"
|
||||
"iam_user_mfa_enabled_console_access",
|
||||
"iam_user_hardware_mfa_enabled",
|
||||
"iam_root_mfa_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -102,7 +107,9 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"iam_user_mfa_enabled_console_access"
|
||||
"iam_user_mfa_enabled_console_access",
|
||||
"iam_user_hardware_mfa_enabled",
|
||||
"iam_root_mfa_enabled"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -117,7 +124,9 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"iam_root_mfa_enabled"
|
||||
"iam_root_mfa_enabled",
|
||||
"iam_root_hardware_mfa_enabled",
|
||||
"iam_user_mfa_enabled_console_access"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -162,7 +171,10 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"rds_instance_no_public_access"
|
||||
"rds_instance_no_public_access",
|
||||
"s3_bucket_public_access",
|
||||
"s3_bucket_public_list_acl",
|
||||
"s3_account_level_public_access_blocks"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -192,7 +204,8 @@
|
||||
}
|
||||
],
|
||||
"Checks": [
|
||||
"rds_instance_storage_encrypted"
|
||||
"rds_instance_storage_encrypted",
|
||||
"rds_instance_transport_encrypted"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -455,7 +455,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
|
||||
@@ -476,7 +477,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
|
||||
@@ -497,7 +499,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -518,7 +521,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -540,7 +544,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -561,7 +566,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
|
||||
@@ -578,11 +584,13 @@
|
||||
"Id": "2.3.1",
|
||||
"Description": "Ensure that encryption is enabled for RDS Instances",
|
||||
"Checks": [
|
||||
"rds_instance_storage_encrypted"
|
||||
"rds_instance_storage_encrypted",
|
||||
"rds_instance_transport_encrypted"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
|
||||
@@ -455,7 +455,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
|
||||
@@ -476,7 +477,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
|
||||
@@ -497,7 +499,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -518,7 +521,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -540,7 +544,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -561,7 +566,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
|
||||
@@ -578,11 +584,13 @@
|
||||
"Id": "2.3.1",
|
||||
"Description": "Ensure that encryption is enabled for RDS Instances",
|
||||
"Checks": [
|
||||
"rds_instance_storage_encrypted"
|
||||
"rds_instance_storage_encrypted",
|
||||
"rds_instance_transport_encrypted"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
@@ -603,7 +611,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
|
||||
@@ -624,7 +633,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.",
|
||||
@@ -645,7 +655,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.4 Elastic File System (EFS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.4 Elastic File System (EFS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",
|
||||
|
||||
@@ -303,7 +303,9 @@
|
||||
{
|
||||
"Id": "1.22",
|
||||
"Description": "Ensure access to AWSCloudShellFullAccess is restricted",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"iam_policy_cloudshell_admin_not_attached"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1. Identity and Access Management",
|
||||
@@ -474,7 +476,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
|
||||
@@ -491,11 +494,13 @@
|
||||
"Id": "2.1.2",
|
||||
"Description": "Ensure MFA Delete is enabled on S3 buckets",
|
||||
"Checks": [
|
||||
"s3_bucket_no_mfa_delete"
|
||||
"s3_bucket_no_mfa_delete",
|
||||
"cloudtrail_bucket_requires_mfa_delete"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -516,7 +521,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -538,7 +544,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -559,7 +566,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
|
||||
@@ -576,11 +584,13 @@
|
||||
"Id": "2.3.1",
|
||||
"Description": "Ensure that encryption is enabled for RDS Instances",
|
||||
"Checks": [
|
||||
"rds_instance_storage_encrypted"
|
||||
"rds_instance_storage_encrypted",
|
||||
"rds_instance_transport_encrypted"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
@@ -601,7 +611,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
|
||||
@@ -622,7 +633,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.",
|
||||
@@ -643,7 +655,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.4 Elastic File System (EFS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.4 Elastic File System (EFS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",
|
||||
@@ -1338,7 +1351,8 @@
|
||||
"Id": "5.6",
|
||||
"Description": "Ensure that EC2 Metadata Service only allows IMDSv2",
|
||||
"Checks": [
|
||||
"ec2_instance_imdsv2_enabled"
|
||||
"ec2_instance_imdsv2_enabled",
|
||||
"ec2_instance_account_imdsv2_enabled"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
|
||||
@@ -474,7 +474,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
|
||||
@@ -495,7 +496,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
|
||||
@@ -516,7 +518,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
|
||||
@@ -538,7 +541,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1. Simple Storage Service (S3)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.1. Simple Storage Service (S3)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
|
||||
@@ -559,7 +563,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
|
||||
@@ -580,7 +585,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
|
||||
@@ -601,7 +607,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
|
||||
@@ -622,7 +629,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.3. Relational Database Service (RDS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.3. Relational Database Service (RDS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to anypublicly accessible RDS database instance, you must disable the database PubliclyAccessible flag and update the VPC security group associated with the instance",
|
||||
@@ -643,7 +651,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.4 Elastic File System (EFS)",
|
||||
"Section": "2. Storage",
|
||||
"SubSection": "2.4 Elastic File System (EFS)",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Security defaults in Azure Active Directory (Azure AD) make it easier to be secure and help protect your organization. Security defaults contain preconfigured security settings for common attacks. Security defaults is available to everyone. The goal is to ensure that all organizations have a basic level of security",
|
||||
@@ -34,7 +35,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable multi-factor authentication for all roles, groups, and users that have write access or permissions to Azure resources. These include custom created objects or built-in roles such as; • Service Co-Administrators • Subscription Owners • Contributors",
|
||||
@@ -56,7 +58,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable multi-factor authentication for all non-privileged users.",
|
||||
@@ -76,7 +79,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Do not allow users to remember multi-factor authentication on devices.",
|
||||
@@ -98,7 +102,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Azure Active Directory Conditional Access allows an organization to configure Named locations and configure whether those locations are trusted or untrusted. These settings provide organizations the means to specify Geographical locations for use in conditional access policies, or define actual IP addresses and IP ranges and whether or not those IP addresses and/or ranges are trusted by the organization.",
|
||||
@@ -118,7 +123,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "CAUTION: If these policies are created without first auditing and testing the result, misconfiguration can potentially lock out administrators or create undesired access issues. Conditional Access Policies can be used to block access from geographic locations that are deemed out-of-scope for your organization or application. The scope and variables for this policy should be carefully examined and defined.",
|
||||
@@ -138,7 +144,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
|
||||
@@ -158,7 +165,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
|
||||
@@ -178,7 +186,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
|
||||
@@ -198,7 +207,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
|
||||
@@ -220,7 +230,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Require administrators or appropriately delegated users to create new tenants.",
|
||||
@@ -240,7 +250,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This recommendation extends guest access review by utilizing the Azure AD Privileged Identity Management feature provided in Azure AD Premium P2. Azure AD is extended to include Azure AD B2B collaboration, allowing you to invite people from outside your organization to be guest users in your cloud account and sign in with their own work, school, or social identities. Guest users allow you to share your company's applications and services with users from any other organization, while maintaining control over your own corporate data. Work with external partners, large or small, even if they don't have Azure AD or an IT department. A simple invitation and redemption process lets partners use their own credentials to access your company's resources a a guest user.",
|
||||
@@ -260,7 +270,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Azure AD is extended to include Azure AD B2B collaboration, allowing you to invite people from outside your organization to be guest users in your cloud account and sign in with their own work, school, or social identities. Guest users allow you to share your company's applications and services with users from any other organization, while maintaining control over your own corporate data. Work with external partners, large or small, even if they don't have Azure AD or an IT department. A simple invitation and redemption process lets partners use their own credentials to access your company's resources as a guest user. Guest users in every subscription should be review on a regular basis to ensure that inactive and unneeded accounts are removed.",
|
||||
@@ -280,7 +290,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensures that two alternate forms of identification are provided before allowing a password reset.",
|
||||
@@ -300,7 +310,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Azure provides a Global Banned Password policy that applies to Azure administrative and normal user accounts. This is not applied to user accounts that are synced from an on-premise Active Directory unless Azure AD Connect is used and you enable EnforceCloudPasswordPolicyForPasswordSyncedUsers. Please see the list in default values on the specifics of this policy. To further password security, it is recommended to further define a custom banned password policy.",
|
||||
@@ -320,7 +330,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that the number of days before users are asked to re-confirm their authentication information is not set to 0.",
|
||||
@@ -340,7 +350,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that users are notified on their primary and secondary emails on password resets.",
|
||||
@@ -360,7 +370,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that all Global Administrators are notified if any other administrator resets their password.",
|
||||
@@ -382,7 +392,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Require administrators to provide consent for applications before use.",
|
||||
@@ -404,7 +414,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Allow users to provide consent for selected permissions when a request is coming from a verified publisher.",
|
||||
@@ -424,7 +434,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Require administrators to provide consent for the apps before use.",
|
||||
@@ -446,7 +456,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Require administrators or appropriately delegated users to register third-party applications.",
|
||||
@@ -468,7 +478,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Limit guest user permissions.",
|
||||
@@ -490,7 +500,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict invitations to users with specific administrative roles only.",
|
||||
@@ -510,7 +520,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict access to the Azure AD administration portal to administrators only. NOTE: This only affects access to the Azure AD administrator's web portal. This setting does not prohibit privileged users from using other methods such as Rest API or Powershell to obtain sensitive information from Azure AD.",
|
||||
@@ -530,7 +540,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restricts group creation to administrators with permissions only.",
|
||||
@@ -552,7 +562,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict security group creation to administrators only.",
|
||||
@@ -572,7 +582,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict security group management to administrators only.",
|
||||
@@ -594,7 +604,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Restrict Microsoft 365 group creation to administrators only.",
|
||||
@@ -614,7 +624,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Joining or registering devices to the active directory should require Multi-factor authentication.",
|
||||
@@ -636,7 +646,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The principle of least privilege should be followed and only necessary privileges should be assigned instead of allowing full administrative access.",
|
||||
@@ -658,7 +668,7 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Resource locking is a powerful protection mechanism that can prevent inadvertent modification/deletion of resources within Azure subscriptions/Resource Groups and is a recommended NIST configuration.",
|
||||
@@ -678,7 +688,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1. Identity and Access Management",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Users who are set as subscription owners are able to make administrative changes to the subscriptions and move them into and out of Azure Active Directories.",
|
||||
@@ -700,7 +710,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Servers enables threat detection for Servers, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -722,7 +733,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for App Service enables threat detection for App Service, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -744,7 +756,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Databases enables threat detection for the instances running your database software. This provides threat intelligence, anomaly detection, and behavior analytics in the Azure Microsoft Defender for Cloud. Instead of being enabled on services like Platform as a Service (PaaS), this implementation will run within your instances as Infrastructure as a Service (IaaS) on the Operating Systems hosting your databases.",
|
||||
@@ -766,7 +779,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Azure SQL Databases enables threat detection for Azure SQL database servers, providing threat intelligence, anomaly detection, andbehavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -788,7 +802,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for SQL servers on machines enables threat detection for SQL servers on machines, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -810,7 +825,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Open-source relational databases enables threat detection for Open-source relational databases, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -832,7 +848,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Storage enables threat detection for Storage, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -854,7 +871,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Containers enables threat detection for Container Registries including Kubernetes, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -876,7 +894,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Defender for Azure Cosmos DB scans all incoming network requests for threats to your Azure Cosmos DB resources.",
|
||||
@@ -898,7 +917,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Turning on Microsoft Defender for Key Vault enables threat detection for Key Vault, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -920,7 +940,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Defender for DNS scans all network traffic exiting from within a subscription.",
|
||||
@@ -942,7 +963,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Defender for Resource Manager scans incoming administrative requests to change your infrastructure from both CLI and the Azure portal.",
|
||||
@@ -964,7 +986,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that the latest OS patches for all virtual machines are applied.",
|
||||
@@ -986,7 +1009,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "None of the settings offered by ASC Default policy should be set to effect Disabled.",
|
||||
@@ -1008,7 +1032,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable automatic provisioning of the monitoring agent to collect security data.",
|
||||
@@ -1030,7 +1055,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable automatic provisioning of vulnerability assessment for machines on both Azure and hybrid (Arc enabled) machines.",
|
||||
@@ -1050,7 +1076,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable automatic provisioning of the Microsoft Defender for Containers components.",
|
||||
@@ -1072,7 +1099,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable security alert emails to subscription owners.",
|
||||
@@ -1094,7 +1122,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Microsoft Defender for Cloud emails the subscription owners whenever a high-severity alert is triggered for their subscription. You should provide a security contact email address as an additional email address.",
|
||||
@@ -1116,7 +1145,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enables emailing security alerts to the subscription owner or other designated security contact.",
|
||||
@@ -1138,7 +1168,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "This integration setting enables Microsoft Defender for Cloud Apps (formerly 'Microsoft Cloud App Security' or 'MCAS' - see additional info) to communicate with Microsoft Defender for Cloud.",
|
||||
@@ -1160,7 +1191,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This integration setting enables Microsoft Defender for Endpoint (formerly 'Advanced Threat Protection' or 'ATP' or 'WDATP' - see additional info) to communicate with Microsoft Defender for Cloud. IMPORTANT: When enabling integration between DfE & DfC it needs to be taken into account that this will have some side effects that may be undesirable. 1. For server 2019 & above if defender is installed (default for these server SKU's) this will trigger a deployment of the new unified agent and link to any of the extended configuration in the Defender portal. 2. If the new unified agent is required for server SKU's of Win 2016 or Linux and lower there is additional integration that needs to be switched on and agents need to be aligned.",
|
||||
@@ -1182,7 +1214,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2 Microsoft Defender for IoT",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.2 Microsoft Defender for IoT",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Defender for IoT acts as a central security hub for IoT devices within your organization.",
|
||||
@@ -1524,7 +1557,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable auditing on SQL Servers.",
|
||||
@@ -1546,7 +1580,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that no SQL Databases allow ingress from 0.0.0.0/0 (ANY IP).",
|
||||
@@ -1568,7 +1603,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Transparent Data Encryption (TDE) with Customer-managed key support provides increased transparency and control over the TDE Protector, increased security with an HSM-backed external service, and promotion of separation of duties. With TDE, data is encrypted at rest with a symmetric key (called the database encryption key) stored in the database or data warehouse distribution. To protect this data encryption key (DEK) in the past, only a certificate that the Azure SQL Service managed could be used. Now, with Customer-managed key support for TDE, the DEK can be protected with an asymmetric key that is stored in the Azure Key Vault. The Azure Key Vault is a highly available and scalable cloud-based key store which offers central key management, leverages FIPS 140-2 Level 2 validated hardware security modules (HSMs), and allows separation of management of keys and data for additional security. Based on business needs or criticality of data/databases hosted on a SQL server, it is recommended that the TDE protector is encrypted by a key that is managed by the data owner (Customer-managed key).",
|
||||
@@ -1590,7 +1626,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Use Azure Active Directory Authentication for authentication with SQL Database to manage credentials in a single place.",
|
||||
@@ -1612,7 +1649,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable Transparent Data Encryption on every SQL server.",
|
||||
@@ -1634,7 +1672,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "SQL Server Audit Retention should be configured to be greater than 90 days.",
|
||||
@@ -1656,7 +1695,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable 'Microsoft Defender for SQL' on critical SQL Servers.",
|
||||
@@ -1678,7 +1718,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable Vulnerability Assessment (VA) service scans for critical SQL servers and corresponding SQL databases.",
|
||||
@@ -1700,7 +1741,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable Vulnerability Assessment (VA) service scans for critical SQL servers and corresponding SQL databases.",
|
||||
@@ -1722,7 +1764,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Configure 'Send scan reports to' with email addresses of concerned data owners/stakeholders for a critical SQL servers",
|
||||
@@ -1744,7 +1787,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable Vulnerability Assessment (VA) setting 'Also send email notifications to admins and subscription owners'.",
|
||||
@@ -1766,7 +1810,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable SSL connection on PostgreSQL Servers.",
|
||||
@@ -1788,7 +1833,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable log_checkpoints on PostgreSQL Servers.",
|
||||
@@ -1810,7 +1856,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable log_connections on PostgreSQL Servers.",
|
||||
@@ -1832,7 +1879,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable log_disconnections on PostgreSQL Servers.",
|
||||
@@ -1854,7 +1902,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable connection_throttling on PostgreSQL Servers.",
|
||||
@@ -1876,7 +1925,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure log_retention_days on PostgreSQL Servers is set to an appropriate value.",
|
||||
@@ -1898,7 +1948,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Disable access from Azure services to PostgreSQL Database Server.",
|
||||
@@ -1918,7 +1969,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Azure Database for PostgreSQL servers should be created with 'infrastructure double encryption' enabled.",
|
||||
@@ -1940,7 +1992,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable SSL connection on MYSQL Servers.",
|
||||
@@ -1962,7 +2015,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure TLS version on MySQL flexible servers is set to the default value.",
|
||||
@@ -1984,7 +2038,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable audit_log_enabled on MySQL Servers.",
|
||||
@@ -2006,7 +2061,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Set audit_log_enabled to include CONNECTION on MySQL Servers.",
|
||||
@@ -2028,7 +2084,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Limiting your Cosmos DB to only communicate on whitelisted networks lowers its attack footprint.",
|
||||
@@ -2050,7 +2107,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Private endpoints limit network traffic to approved sources.",
|
||||
@@ -2072,7 +2130,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Cosmos DB can use tokens or AAD for client authentication which in turn will use Azure RBAC for authorization. Using AAD is significantly more secure because AAD handles the credentials and allows for MFA and centralized management, and the Azure RBAC better integrated with the rest of Azure.",
|
||||
@@ -2094,7 +2153,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable Diagnostic settings for exporting activity logs. Diagnos tic settings are available for each individual resource within a subscription. Settings should be configured for allappropriate resources for your environment.",
|
||||
@@ -2116,7 +2176,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Prerequisite: A Diagnostic Setting must exist. If a Diagnostic Setting does not exist, the navigation and options within this recommendation will not be available. Please review the recommendation at the beginning of this subsection titled: 'Ensure that a 'Diagnostic Setting' exists.' The diagnostic setting should be configured to log the appropriate activities from the control/management plane.",
|
||||
@@ -2138,7 +2199,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The storage account container containing the activity log export should not be publicly accessible.",
|
||||
@@ -2160,7 +2222,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Storage accounts with the activity log exports can be configured to use Customer Managed Keys (CMK).",
|
||||
@@ -2182,7 +2245,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable AuditEvent logging for key vault instances to ensure interactions with key vaults are logged and available.",
|
||||
@@ -2204,7 +2268,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
|
||||
@@ -2226,7 +2291,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable AppServiceHTTPLogs diagnostic log category for Azure App Service instances to ensure all http requests are captured and centrally logged.",
|
||||
@@ -2248,7 +2314,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create Policy Assignment event.",
|
||||
@@ -2270,7 +2337,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Policy Assignment event.",
|
||||
@@ -2292,7 +2360,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an Activity Log Alert for the Create or Update Network Security Group event.",
|
||||
@@ -2314,7 +2383,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Network Security Group event.",
|
||||
@@ -2336,7 +2406,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update Security Solution event.",
|
||||
@@ -2358,7 +2429,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Security Solution event.",
|
||||
@@ -2380,7 +2452,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update SQL Server Firewall Rule event.",
|
||||
@@ -2402,7 +2475,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the 'Delete SQL Server Firewall Rule.'",
|
||||
@@ -2424,7 +2498,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update Public IP Addresses rule.",
|
||||
@@ -2446,7 +2521,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Public IP Address rule.",
|
||||
@@ -2466,7 +2542,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.3 Configuring Application Insights",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Resource Logs capture activity to the data access plane while the Activity log is a subscription-level log for the control plane. Resource-level diagnostic logs provide insight into operations that were performed within that resource itself; for example, reading or updating a secret from a Key Vault. Currently, 95 Azure resources support Azure Monitoring (See the more information section for a complete list), including Network Security Groups, Load Balancers, Key Vault, AD, Logic Apps, and CosmosDB. The content of these logs varies by resource type. A number of back-end services were not configured to log and store Resource Logs for certain activities or for a sufficient length. It is crucial that monitoring is correctly configured to log all relevant activities and retain those logs for a sufficient length of time. Given that the mean time to detection in an enterprise is 240 days, a minimum retention period of two years is recommended.",
|
||||
@@ -2486,7 +2562,7 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.3 Configuring Application Insights",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The use of Basic or Free SKUs in Azure whilst cost effective have significant limitations in terms of what can be monitored and what support can be realized from Microsoft. Typically, these SKU’s do not have a service SLA and Microsoft will usually refuse to provide support for them. Consequently Basic/Free SKUs should never be used for production workloads.",
|
||||
@@ -2508,7 +2584,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.3 Configuring Application Insights",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.3 Configuring Application Insights",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Application Insights within Azure act as an Application Performance Monitoring solution providing valuable data into how well an application performs and additional information when performing incident response. The types of log data collected include application metrics, telemetry data, and application trace logging data providing organizations with detailed information about application activity and application transactions. Both data sets help organizations adopt a proactive and retroactive means to handle security and performance related metrics within their modern applications.",
|
||||
|
||||
@@ -494,7 +494,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults Security Defaults",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Security defaults in Microsoft Entra ID make it easier to be secure and help protect your organization. Security defaults contain preconfigured security settings for common attacks. Security defaults is available to everyone. The goal is to ensure that all organizations have a basic level of security enabled at no extra cost. You may turn on security defaults in the Azure portal.",
|
||||
@@ -516,7 +517,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults Security Defaults",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable multi-factor authentication for all roles, groups, and users that have write access or permissions to Azure resources. These include custom created objects or built-in roles such as; - Service Co-Administrators - Subscription Owners - Contributors",
|
||||
@@ -538,7 +540,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable multi-factor authentication for all non-privileged users.",
|
||||
@@ -558,7 +561,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.1 Security Defaults",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.1 Security Defaults Security Defaults",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Do not allow users to remember multi-factor authentication on devices.",
|
||||
@@ -580,7 +584,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Entra ID Conditional Access allows an organization to configure `Named locations` and configure whether those locations are trusted or untrusted. These settings provide organizations the means to specify Geographical locations for use in conditional access policies, or define actual IP addresses and IP ranges and whether or not those IP addresses and/or ranges are trusted by the organization.",
|
||||
@@ -600,7 +605,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "**CAUTION**: If these policies are created without first auditing and testing the result, misconfiguration can potentially lock out administrators or create undesired access issues. Conditional Access Policies can be used to block access from geographic locations that are deemed out-of-scope for your organization or application. The scope and variables for this policy should be carefully examined and defined.",
|
||||
@@ -620,7 +626,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
|
||||
@@ -640,7 +647,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
|
||||
@@ -660,7 +668,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
|
||||
@@ -682,7 +691,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This recommendation ensures that users accessing the Windows Azure Service Management API (i.e. Azure Powershell, Azure CLI, Azure Resource Manager API, etc.) are required to use multifactor authentication (MFA) credentials when accessing resources through the Windows Azure Service Management API.",
|
||||
@@ -702,7 +712,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.2 Conditional Access",
|
||||
"Section": "1.Identity and Access Management",
|
||||
"SubSection": "1.2 Conditional Access",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This recommendation ensures that users accessing Microsoft Admin Portals (i.e. Microsoft 365 Admin, Microsoft 365 Defender, Exchange Admin Center, Azure Portal, etc.) are required to use multifactor authentication (MFA) credentials when logging into an Admin Portal.",
|
||||
@@ -724,7 +735,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Servers enables threat detection for Servers, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -746,7 +758,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for App Service enables threat detection for App Service, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -768,7 +781,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Azure SQL Databases enables threat detection for Managed Instance Azure SQL databases, providing threat intelligence, anomaly detection, and behavior analytics in Microsoft Defender for Cloud.",
|
||||
@@ -790,7 +804,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for SQL servers on machines enables threat detection for SQL servers on machines, providing threat intelligence, anomaly detection, and behavior analytics in Microsoft Defender for Cloud.",
|
||||
@@ -812,7 +827,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Open-source relational databases enables threat detection for Open-source relational databases, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -834,7 +850,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Microsoft Defender for Azure Cosmos DB scans all incoming network requests for threats to your Azure Cosmos DB resources.",
|
||||
@@ -856,7 +873,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Storage enables threat detection for Storage, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -878,7 +896,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Containers enables threat detection for Container Registries including Kubernetes, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud. The following services will be enabled for container instances: - Defender agent in Azure - Azure Policy for Kubernetes - Agentless discovery for Kubernetes - Agentless container vulnerability assessment",
|
||||
@@ -900,7 +919,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Turning on Microsoft Defender for Key Vault enables threat detection for Key Vault, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
|
||||
@@ -922,7 +942,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "[**NOTE:** As of August 1, customers with an existing subscription to Defender for DNS can continue to use the service, but new subscribers will receive alerts about suspicious DNS activity as part of Defender for Servers P2.] Microsoft Defender for DNS scans all network traffic exiting from within a subscription.",
|
||||
@@ -944,7 +965,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Microsoft Defender for Resource Manager scans incoming administrative requests to change your infrastructure from both CLI and the Azure portal.",
|
||||
@@ -966,7 +988,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that the latest OS patches for all virtual machines are applied.",
|
||||
@@ -988,7 +1011,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "The Microsoft Cloud Security Benchmark (or MCSB) is an Azure Policy Initiative containing many security policies to evaluate resource configuration against best practice recommendations. If a policy in the MCSB is set with effect type `Disabled`, it is not evaluated and may prevent administrators from being informed of valuable security recommendations.",
|
||||
@@ -1010,7 +1034,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable automatic provisioning of the monitoring agent to collect security data.",
|
||||
@@ -1032,7 +1057,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable automatic provisioning of vulnerability assessment for machines on both Azure and hybrid (Arc enabled) machines.",
|
||||
@@ -1052,7 +1078,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable automatic provisioning of the Microsoft Defender for Containers components.",
|
||||
@@ -1074,7 +1101,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable security alert emails to subscription owners.",
|
||||
@@ -1096,7 +1124,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Microsoft Defender for Cloud emails the subscription owners whenever a high-severity alert is triggered for their subscription. You should provide a security contact email address as an additional email address.",
|
||||
@@ -1118,7 +1147,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enables emailing security alerts to the subscription owner or other designated security contact.",
|
||||
@@ -1140,7 +1170,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This integration setting enables Microsoft Defender for Cloud Apps (formerly 'Microsoft Cloud App Security' or 'MCAS' - see additional info) to communicate with Microsoft Defender for Cloud.",
|
||||
@@ -1162,7 +1193,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "This integration setting enables Microsoft Defender for Endpoint (formerly 'Advanced Threat Protection' or 'ATP' or 'WDATP' - see additional info) to communicate with Microsoft Defender for Cloud. **IMPORTANT:** When enabling integration between DfE & DfC it needs to be taken into account that this will have some side effects that may be undesirable. 1. For server 2019 & above if defender is installed (default for these server SKU's) this will trigger a deployment of the new unified agent and link to any of the extended configuration in the Defender portal. 1. If the new unified agent is required for server SKU's of Win 2016 or Linux and lower there is additional integration that needs to be switched on and agents need to be aligned.",
|
||||
@@ -1182,7 +1214,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.1 Microsoft Defender for Cloud",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.1 Microsoft Defender for Cloud",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "An organization's attack surface is the collection of assets with a public network identifier or URI that an external threat actor can see or access from outside your cloud. It is the set of points on the boundary of a system, a system element, system component, or an environment where an attacker can try to enter, cause an effect on, or extract data from, that system, system element, system component, or environment. The larger the attack surface, the harder it is to protect. This tool can be configured to scan your organization's online infrastructure such as specified domains, hosts, CIDR blocks, and SSL certificates, and store them in an Inventory. Inventory items can be added, reviewed, approved, and removed, and may contain enrichments (insights) and additional information collected from the tool's different scan engines and open-source intelligence sources. A Defender EASM workspace will generate an Inventory of publicly exposed assets by crawling and scanning the internet using _Seeds_ you provide when setting up the tool. Seeds can be FQDNs, IP CIDR blocks, and WHOIS records. Defender EASM will generate Insights within 24-48 hours after Seeds are provided, and these insights include vulnerability data (CVEs), ports and protocols, and weak or expired SSL certificates that could be used by an attacker for reconnaisance or exploitation. Results are classified High/Medium/Low and some of them include proposed mitigations.",
|
||||
@@ -1204,7 +1237,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "2.2 Microsoft Defender for IoT",
|
||||
"Section": "2. Microsoft Defender",
|
||||
"SubSection": "2.2 Microsoft Defender for IoT",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Microsoft Defender for IoT acts as a central security hub for IoT devices within your organization.",
|
||||
@@ -1586,7 +1620,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable auditing on SQL Servers.",
|
||||
@@ -1608,7 +1643,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure that no SQL Databases allow ingress from 0.0.0.0/0 (ANY IP).",
|
||||
@@ -1630,7 +1666,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Transparent Data Encryption (TDE) with Customer-managed key support provides increased transparency and control over the TDE Protector, increased security with an HSM-backed external service, and promotion of separation of duties. With TDE, data is encrypted at rest with a symmetric key (called the database encryption key) stored in the database or data warehouse distribution. To protect this data encryption key (DEK) in the past, only a certificate that the Azure SQL Service managed could be used. Now, with Customer-managed key support for TDE, the DEK can be protected with an asymmetric key that is stored in the Azure Key Vault. The Azure Key Vault is a highly available and scalable cloud-based key store which offers central key management, leverages FIPS 140-2 Level 2 validated hardware security modules (HSMs), and allows separation of management of keys and data for additional security. Based on business needs or criticality of data/databases hosted on a SQL server, it is recommended that the TDE protector is encrypted by a key that is managed by the data owner (Customer-managed key).",
|
||||
@@ -1652,7 +1689,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Use Microsoft Entra authentication for authentication with SQL Database to manage credentials in a single place.",
|
||||
@@ -1674,7 +1712,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable Transparent Data Encryption on every SQL server.",
|
||||
@@ -1696,7 +1735,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.1 SQL Server - Auditing",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.1 SQL Server - Auditing",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "SQL Server Audit Retention should be configured to be greater than 90 days.",
|
||||
@@ -1718,7 +1758,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `SSL connection` on `PostgreSQL` Servers.",
|
||||
@@ -1740,7 +1781,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `log_checkpoints` on `PostgreSQL Servers`.",
|
||||
@@ -1762,7 +1804,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `log_connections` on `PostgreSQL Servers`.",
|
||||
@@ -1784,7 +1827,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `log_disconnections` on `PostgreSQL Servers`.",
|
||||
@@ -1806,7 +1850,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `connection_throttling` on `PostgreSQL Servers`.",
|
||||
@@ -1828,7 +1873,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure `log_retention_days` on `PostgreSQL Servers` is set to an appropriate value.",
|
||||
@@ -1850,7 +1896,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Disable access from Azure services to PostgreSQL Database Server.",
|
||||
@@ -1870,7 +1917,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Azure Database for PostgreSQL servers should be created with 'infrastructure double encryption' enabled.",
|
||||
@@ -1892,7 +1940,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable `SSL connection` on `MYSQL` Servers.",
|
||||
@@ -1914,7 +1963,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure `TLS version` on `MySQL flexible` servers is set to use TLS version 1.2 or higher.",
|
||||
@@ -1936,7 +1986,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable audit_log_enabled on MySQL Servers.",
|
||||
@@ -1958,7 +2009,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.4 MySQL Database",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.4 MySQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Set `audit_log_enabled` to include CONNECTION on MySQL Servers.",
|
||||
@@ -1980,7 +2032,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Limiting your Cosmos DB to only communicate on whitelisted networks lowers its attack footprint.",
|
||||
@@ -2002,7 +2055,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Private endpoints limit network traffic to approved sources.",
|
||||
@@ -2024,7 +2078,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "4.5 Cosmos DB",
|
||||
"Section": "4. Database Services",
|
||||
"SubSection": "4.5 Cosmos DB",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Cosmos DB can use tokens or Entra ID for client authentication which in turn will use Azure RBAC for authorization. Using Entra ID is significantly more secure because Entra ID handles the credentials and allows for MFA and centralized management, and the Azure RBAC better integrated with the rest of Azure.",
|
||||
@@ -2086,7 +2141,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable Diagnostic settings for exporting activity logs. Diagnostic settings are available for each individual resource within a subscription. Settings should be configured for all appropriate resources for your environment.",
|
||||
@@ -2108,7 +2164,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "**Prerequisite**: A Diagnostic Setting must exist. If a Diagnostic Setting does not exist, the navigation and options within this recommendation will not be available. Please review the recommendation at the beginning of this subsection titled: Ensure that a 'Diagnostic Setting' exists. The diagnostic setting should be configured to log the appropriate activities from the control/management plane.",
|
||||
@@ -2130,7 +2187,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Storage accounts with the activity log exports can be configured to use Customer Managed Keys (CMK).",
|
||||
@@ -2152,7 +2210,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enable AuditEvent logging for key vault instances to ensure interactions with key vaults are logged and available.",
|
||||
@@ -2174,7 +2233,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
|
||||
@@ -2196,7 +2256,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.1 Configuring Diagnostic Settings",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.1 Configuring Diagnostic Settings",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Enable AppServiceHTTPLogs diagnostic log category for Azure App Service instances to ensure all http requests are captured and centrally logged.",
|
||||
@@ -2218,7 +2279,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create Policy Assignment event.",
|
||||
@@ -2240,7 +2302,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Policy Assignment event.",
|
||||
@@ -2262,7 +2325,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an Activity Log Alert for the Create or Update Network Security Group event.",
|
||||
@@ -2284,7 +2348,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Network Security Group event.",
|
||||
@@ -2306,7 +2371,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update Security Solution event.",
|
||||
@@ -2328,7 +2394,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Security Solution event.",
|
||||
@@ -2350,7 +2417,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update SQL Server Firewall Rule event.",
|
||||
@@ -2372,7 +2440,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete SQL Server Firewall Rule.",
|
||||
@@ -2394,7 +2463,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Create or Update Public IP Addresses rule.",
|
||||
@@ -2416,7 +2486,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.2 Monitoring using Activity Log Alerts",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Create an activity log alert for the Delete Public IP Address rule.",
|
||||
@@ -2438,7 +2509,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "5.3 Configuring Application Insights. Storage Accounts",
|
||||
"Section": "5. Logging and Monitoring",
|
||||
"SubSection": "5.3 Configuring Application Insights. Storage Accounts",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Application Insights within Azure act as an Application Performance Monitoring solution providing valuable data into how well an application performs and additional information when performing incident response. The types of log data collected include application metrics, telemetry data, and application trace logging data providing organizations with detailed information about application activity and application transactions. Both data sets help organizations adopt a proactive and retroactive means to handle security and performance related metrics within their modern applications.",
|
||||
@@ -3043,7 +3115,9 @@
|
||||
{
|
||||
"Id": "9.4",
|
||||
"Description": "Ensure that Register with Entra ID is enabled on App Service",
|
||||
"Checks": [],
|
||||
"Checks": [
|
||||
"app_register_with_identity"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "9. AppService",
|
||||
|
||||
3499
prowler/compliance/azure/cis_3.0_azure.json
Normal file
3499
prowler/compliance/azure/cis_3.0_azure.json
Normal file
File diff suppressed because one or more lines are too long
@@ -1292,7 +1292,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances. This recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.",
|
||||
@@ -1313,7 +1314,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`",
|
||||
@@ -1334,7 +1336,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.",
|
||||
@@ -1355,7 +1358,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are: - `TERSE` - `DEFAULT` - `VERBOSE` `TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information. `VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error. Ensure an appropriate value is set to 'DEFAULT' or stricter.",
|
||||
@@ -1376,7 +1380,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`. Each severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.",
|
||||
@@ -1397,7 +1402,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are: - `none` - `ddl` - `mod` - `all` The value `ddl` logs all data definition statements. The value `mod` logs all ddl statements, plus data-modifying statements. The statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included. A value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.",
|
||||
@@ -1418,7 +1424,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Instance addresses can be public IP or private IP. Public IP means that the instance is accessible through the public internet. In contrast, instances using only private IP are not accessible through the public internet, but are accessible through a Virtual Private Cloud (VPC). Limiting network access to your database will limit potential attacks.",
|
||||
@@ -1439,7 +1446,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.",
|
||||
@@ -1460,7 +1468,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.",
|
||||
@@ -1481,7 +1490,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.",
|
||||
@@ -1502,7 +1512,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.",
|
||||
@@ -1523,7 +1534,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`. Each severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.",
|
||||
@@ -1544,7 +1556,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.",
|
||||
@@ -1565,7 +1578,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`",
|
||||
@@ -1586,7 +1600,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.",
|
||||
@@ -1607,7 +1622,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.",
|
||||
@@ -1628,7 +1644,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.",
|
||||
@@ -1649,7 +1666,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `off`.",
|
||||
@@ -1670,7 +1688,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.",
|
||||
|
||||
@@ -1330,7 +1330,8 @@
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances.This recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.",
|
||||
@@ -1352,7 +1353,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`",
|
||||
@@ -1374,7 +1376,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.1. MySQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.1. MySQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.",
|
||||
@@ -1396,7 +1399,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are:- `TERSE`- `DEFAULT`- `VERBOSE``TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information.`VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error.Ensure an appropriate value is set to 'DEFAULT' or stricter.",
|
||||
@@ -1418,7 +1422,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.",
|
||||
@@ -1440,7 +1445,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.",
|
||||
@@ -1462,7 +1468,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are:- `none`- `ddl`- `mod`- `all`The value `ddl` logs all data definition statements.The value `mod` logs all ddl statements, plus data-modifying statements.The statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included.A value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.",
|
||||
@@ -1484,7 +1491,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include (from lowest to highest severity) `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.Each severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.",
|
||||
@@ -1506,7 +1514,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include (from lowest to highest severity) `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.Each severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.",
|
||||
@@ -1528,7 +1537,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.",
|
||||
@@ -1550,7 +1560,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.2. PostgreSQL Database",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.2. PostgreSQL Database",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.",
|
||||
@@ -1572,7 +1583,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`",
|
||||
@@ -1594,7 +1606,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.This flag is deprecated for all SQL Server versions in CGP. Going forward, you can't set its value to on. However, if you have this flag enabled, we strongly recommend that you either remove the flag from your database or set it to off. For cross-database access, use the [Microsoft tutorial for signing stored procedures with a certificate](https://learn.microsoft.com/en-us/sql/relational-databases/tutorial-signing-stored-procedures-with-a-certificate?view=sql-server-ver16).",
|
||||
@@ -1616,7 +1629,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.",
|
||||
@@ -1638,7 +1652,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.",
|
||||
@@ -1660,7 +1675,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.",
|
||||
@@ -1682,7 +1698,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.",
|
||||
@@ -1704,7 +1721,8 @@
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "6.3. SQL Server",
|
||||
"Section": "6. Cloud SQL Database Services",
|
||||
"SubSection": "6.3. SQL Server",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "It is recommended not to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `on`.",
|
||||
|
||||
2895
prowler/compliance/kubernetes/cis_1.10_kubernetes.json
Normal file
2895
prowler/compliance/kubernetes/cis_1.10_kubernetes.json
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
0
prowler/compliance/microsoft365/__init__.py
Normal file
0
prowler/compliance/microsoft365/__init__.py
Normal file
134
prowler/compliance/microsoft365/cis_4.0_microsoft365.json
Normal file
134
prowler/compliance/microsoft365/cis_4.0_microsoft365.json
Normal file
@@ -0,0 +1,134 @@
|
||||
{
|
||||
"Framework": "CIS",
|
||||
"Version": "4.0",
|
||||
"Provider": "Microsoft365",
|
||||
"Description": "The CIS Microsoft 365 Foundations Benchmark provides prescriptive guidance for establishing a secure configuration posture for Microsoft 365 Cloud offerings running on any OS.",
|
||||
"Requirements": [
|
||||
{
|
||||
"Id": "1.1.1",
|
||||
"Description": "Ensure Administrative accounts are cloud-only",
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Microsoft 365 admin center",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Administrative accounts are special privileged accounts that could have varying levels of access to data, users, and settings. Regular user accounts should never be utilized for administrative tasks and care should be taken, in the case of a hybrid environment, to keep Administrative accounts separated from on-prem accounts. Administrative accounts should not have applications assigned so that they have no access to potentially vulnerable services (e.g., email, Teams, SharePoint, etc.) and only access to perform tasks as needed for administrative purposes. Ensure administrative accounts are not On-premises sync enabled.",
|
||||
"RationaleStatement": "In a hybrid environment, having separate accounts will help ensure that in the event of a breach in the cloud, that the breach does not affect the on-prem environment and vice versa.",
|
||||
"ImpactStatement": "Administrative users will have to switch accounts and utilize login/logout functionality when performing administrative tasks, as well as not benefiting from SSO. This will require a migration process from the 'daily driver' account to a dedicated admin account. When migrating permissions to the admin account, both M365 and Azure RBAC roles should be migrated as well. Once the new admin accounts are created, both of these permission sets should be moved from the daily driver account to the new admin account. Failure to migrate Azure RBAC roles can cause an admin to not be able to see their subscriptions/resources while using their admin accounts.",
|
||||
"RemediationProcedure": "Review all administrative accounts and ensure they are configured as cloud-only. Remove any on-premises synchronization for these accounts. Assign necessary roles and permissions exclusively to the dedicated cloud administrative accounts.",
|
||||
"AuditProcedure": "Log in to the Microsoft 365 Admin Center and review the list of administrative accounts. Verify that none of them are on-premises sync enabled.",
|
||||
"AdditionalInformation": "This recommendation is particularly relevant for hybrid environments and is aimed at enhancing the security of administrative accounts by isolating them from on-prem infrastructure.",
|
||||
"DefaultValue": "By default, administrative accounts may be either cloud-only or hybrid. This setting needs to be verified and adjusted according to the recommendation.",
|
||||
"References": "CIS Microsoft 365 Foundations Benchmark v4.0, Section 1.1.1"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.1.2",
|
||||
"Description": "Ensure two emergency access accounts have been defined",
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Microsoft 365 admin center",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manual",
|
||||
"Description": "Emergency access or 'break glass' accounts are limited for emergency scenarios where normal administrative accounts are unavailable. They are not assigned to a specific user and will have a combination of physical and technical controls to prevent them from being accessed outside a true emergency. These emergencies could include technical failures of a cellular provider or Microsoft-related service such as MFA, or the last remaining Global Administrator account becoming inaccessible. Ensure two Emergency Access accounts have been defined.",
|
||||
"RationaleStatement": "In various situations, an organization may require the use of a break glass account to gain emergency access. Losing access to administrative functions could result in a significant loss of support capability, reduced visibility into the security posture, and potential financial losses.",
|
||||
"ImpactStatement": "Improper implementation of emergency access accounts could weaken the organization's security posture. To mitigate this, at least one account should be excluded from all conditional access rules, and strong authentication mechanisms (e.g., long, high-entropy passwords or FIDO2 security keys) must be used to secure the accounts.",
|
||||
"RemediationProcedure": "Create two emergency access accounts and configure them according to Microsoft's recommendations. Ensure that these accounts are not assigned to specific users and are excluded from all conditional access rules. Secure the accounts with strong passwords or passwordless authentication methods, such as FIDO2 security keys. Regularly review and test these accounts to confirm their functionality.",
|
||||
"AuditProcedure": "Log in to the Microsoft 365 Admin Center and verify the existence of at least two emergency access accounts. Check their configurations to ensure they comply with Microsoft's recommendations, including exclusion from conditional access rules and proper security settings.",
|
||||
"AdditionalInformation": "Emergency access accounts are critical for maintaining administrative control during unexpected events. Regular audits and strict access controls are recommended to prevent misuse.",
|
||||
"DefaultValue": "By default, emergency access accounts are not configured. Organizations must create and secure these accounts proactively.",
|
||||
"References": "CIS Microsoft 365 Foundations Benchmark v4.0, Section 1.1.2; Microsoft documentation on emergency access accounts."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.1.3",
|
||||
"Description": "Ensure that between two and four global admins are designated",
|
||||
"Checks": [
|
||||
"admincenter_users_between_two_and_four_global_admins"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Microsoft 365 admin center",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "More than one global administrator should be designated so a single admin can be monitored and to provide redundancy should a single admin leave an organization. Additionally, there should be no more than four global admins set for any tenant. Ideally, global administrators will have no licenses assigned to them.",
|
||||
"RationaleStatement": "If there is only one global tenant administrator, he or she can perform malicious activity without the possibility of being discovered by another admin. If there are numerous global tenant administrators, the more likely it is that one of their accounts will be successfully breached by an external attacker.",
|
||||
"ImpactStatement": "If there is only one global administrator in a tenant, an additional global administrator will need to be identified and configured. If there are more than four global administrators, a review of role requirements for current global administrators will be required to identify which of the users require global administrator access.",
|
||||
"RemediationProcedure": "Review the list of global administrators in the tenant and ensure there are at least two but no more than four accounts assigned this role. Remove excess global administrator accounts and create additional ones if necessary. Avoid assigning licenses to these accounts.",
|
||||
"AuditProcedure": "Log in to the Microsoft 365 Admin Center and review the list of global administrators. Verify that there are at least two but no more than four global administrators configured.",
|
||||
"AdditionalInformation": "Global administrators play a critical role in tenant management. Ensuring a proper number of global administrators improves redundancy and security.",
|
||||
"DefaultValue": "By default, there may be a single global administrator configured for the tenant. Organizations need to manually adjust the count as per best practices.",
|
||||
"References": "CIS Microsoft 365 Foundations Benchmark v4.0, Section 1.1.3"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.1.4",
|
||||
"Description": "Ensure administrative accounts use licenses with a reduced application footprint",
|
||||
"Checks": [
|
||||
"admincenter_users_admins_reduced_license_footprint"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Microsoft 365 admin center",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Administrative accounts are special privileged accounts with varying levels of access to data, users, and settings. It is recommended that privileged accounts either not be licensed or use Microsoft Entra ID P1 or Microsoft Entra ID P2 licenses to minimize application exposure.",
|
||||
"RationaleStatement": "Ensuring administrative accounts do not use licenses with applications assigned to them reduces the attack surface of high-privileged identities. This minimizes the likelihood of these accounts being targeted by social engineering attacks or exposed to malicious content via licensed applications. Administrative activities should be restricted to dedicated accounts without access to collaborative tools like mailboxes.",
|
||||
"ImpactStatement": "Administrative users will need to switch accounts to perform privileged actions, requiring login/logout functionality and potentially losing the convenience of SSO. Alerts sent to Global Administrators or TenantAdmins by default might not be received if these accounts lack application-based licenses. Proper alert routing must be configured to avoid missed notifications.",
|
||||
"RemediationProcedure": "Review the licenses assigned to administrative accounts. Remove licenses granting access to collaborative applications and assign Microsoft Entra ID P1 or P2 licenses if participation in Microsoft 365 security services is required. Configure alerts to be sent to valid email addresses for monitoring.",
|
||||
"AuditProcedure": "Log in to the Microsoft 365 Admin Center and review the licenses assigned to administrative accounts. Confirm that administrative accounts either have no licenses or are limited to Microsoft Entra ID P1 or P2 licenses without collaborative applications enabled.",
|
||||
"AdditionalInformation": "Reducing the application footprint of administrative accounts improves security by minimizing potential attack vectors. Special care should be taken to configure alert routing properly to ensure critical notifications are not missed.",
|
||||
"DefaultValue": "By default, administrative accounts may have licenses assigned based on organizational setup. Manual review and adjustment are necessary to comply with this recommendation.",
|
||||
"References": "CIS Microsoft 365 Foundations Benchmark v4.0, Section 1.1.4; Microsoft documentation on Entra ID licenses and privileged account security."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.2.1",
|
||||
"Description": "Ensure that only organizationally managed/approved public groups exist",
|
||||
"Checks": [
|
||||
"admincenter_groups_not_public_visibility"
|
||||
],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Microsoft 365 admin center",
|
||||
"Profile": "Level 2",
|
||||
"AssessmentStatus": "Automated",
|
||||
"Description": "Microsoft 365 Groups enable shared resource access across Microsoft 365 services. The default privacy setting for groups is 'Public,' which allows users within the organization to access the group's resources. Ensure that only organizationally managed and approved public groups exist to prevent unauthorized access to sensitive information.",
|
||||
"RationaleStatement": "Public groups can be accessed by any user within the organization via several methods, such as self-adding through the Azure portal, sending an access request, or directly using the SharePoint URL. Without control over group privacy, sensitive organizational data might be exposed to unintended users.",
|
||||
"ImpactStatement": "Implementing this recommendation may result in an increased volume of access requests for group owners, particularly for groups previously intended to be public.",
|
||||
"RemediationProcedure": "Audit all Microsoft 365 public groups and ensure they are organizationally managed and approved. Convert unnecessary public groups to private groups and enforce strict policies for creating and approving public groups. Group owners should be instructed to monitor and review access requests.",
|
||||
"AuditProcedure": "Log in to the Microsoft 365 Admin Center and review the list of public groups. Verify that all public groups have been approved and are necessary for organizational purposes.",
|
||||
"AdditionalInformation": "Public groups expose data to all users within the organization, increasing the risk of accidental or unauthorized access. Periodic reviews of group privacy settings are recommended.",
|
||||
"DefaultValue": "By default, groups created in Microsoft 365 are set to 'Public' privacy.",
|
||||
"References": "CIS Microsoft 365 Foundations Benchmark v4.0, Section 1.2.1; Microsoft documentation on managing group privacy."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Id": "1.2.2",
|
||||
"Description": "Ensure sign-in to shared mailboxes is blocked",
|
||||
"Checks": [],
|
||||
"Attributes": [
|
||||
{
|
||||
"Section": "1.Microsoft 365 admin center",
|
||||
"Profile": "Level 1",
|
||||
"AssessmentStatus": "Manuel",
|
||||
"Description": "Shared mailboxes are used when multiple people need access to the same mailbox for functions such as support or reception. These mailboxes are created with a corresponding user account that includes a system-generated password. To enhance security, sign-in should be blocked for these shared mailbox accounts, ensuring access is granted only through delegation.",
|
||||
"RationaleStatement": "Blocking sign-in for shared mailbox accounts prevents unauthorized access or direct sign-in, ensuring that all interactions with the shared mailbox are through authorized delegation. This reduces the risk of attackers exploiting shared mailboxes for malicious purposes such as sending emails with spoofed identities.",
|
||||
"ImpactStatement": "Blocking sign-in to shared mailboxes requires users to access these mailboxes only through delegation. Administrators will need to monitor and ensure proper access permissions are assigned.",
|
||||
"RemediationProcedure": "Log in to the Microsoft 365 Admin Center and locate the shared mailboxes. For each shared mailbox, verify that sign-in is blocked by reviewing the associated user account settings. If sign-in is not blocked, adjust the account settings to enforce this configuration.",
|
||||
"AuditProcedure": "Review all shared mailboxes in the Microsoft 365 Admin Center. Confirm that the user accounts associated with these mailboxes have sign-in blocked.",
|
||||
"AdditionalInformation": "Shared mailboxes are often a target for exploitation due to their broad access and functional role. Blocking sign-in significantly reduces the attack surface.",
|
||||
"DefaultValue": "By default, shared mailboxes may have sign-in enabled unless explicitly configured otherwise.",
|
||||
"References": "CIS Microsoft 365 Foundations Benchmark v4.0, Section 1.2.2; Microsoft documentation on managing shared mailboxes."
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -12,7 +12,7 @@ from prowler.lib.logger import logger
|
||||
|
||||
timestamp = datetime.today()
|
||||
timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)
|
||||
prowler_version = "5.1.0"
|
||||
prowler_version = "5.3.0"
|
||||
html_logo_url = "https://github.com/prowler-cloud/prowler/"
|
||||
square_logo_img = "https://prowler.com/wp-content/uploads/logo-html.png"
|
||||
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
|
||||
@@ -28,6 +28,7 @@ class Provider(str, Enum):
|
||||
GCP = "gcp"
|
||||
AZURE = "azure"
|
||||
KUBERNETES = "kubernetes"
|
||||
MICROSOFT365 = "microsoft365"
|
||||
|
||||
|
||||
# Compliance
|
||||
@@ -58,6 +59,7 @@ aws_services_json_file = "aws_regions_by_service.json"
|
||||
# gcp_zones_json_file = "gcp_zones.json"
|
||||
|
||||
default_output_directory = getcwd() + "/output"
|
||||
tmp_output_directory = "/tmp/prowler_api_output"
|
||||
output_file_timestamp = timestamp.strftime("%Y%m%d%H%M%S")
|
||||
timestamp_iso = timestamp.isoformat(sep=" ", timespec="seconds")
|
||||
csv_file_suffix = ".csv"
|
||||
|
||||
@@ -378,6 +378,37 @@ aws:
|
||||
# Minimum retention period in hours for Kinesis streams
|
||||
min_kinesis_stream_retention_hours: 168 # 7 days
|
||||
|
||||
# Detect Secrets plugin configuration
|
||||
detect_secrets_plugins: [
|
||||
{"name": "ArtifactoryDetector"},
|
||||
{"name": "AWSKeyDetector"},
|
||||
{"name": "AzureStorageKeyDetector"},
|
||||
{"name": "BasicAuthDetector"},
|
||||
{"name": "CloudantDetector"},
|
||||
{"name": "DiscordBotTokenDetector"},
|
||||
{"name": "GitHubTokenDetector"},
|
||||
{"name": "GitLabTokenDetector"},
|
||||
{"name": "Base64HighEntropyString", "limit": 6.0},
|
||||
{"name": "HexHighEntropyString", "limit": 3.0},
|
||||
{"name": "IbmCloudIamDetector"},
|
||||
{"name": "IbmCosHmacDetector"},
|
||||
# {"name": "IPPublicDetector"}, https://github.com/Yelp/detect-secrets/pull/885
|
||||
{"name": "JwtTokenDetector"},
|
||||
{"name": "KeywordDetector"},
|
||||
{"name": "MailchimpDetector"},
|
||||
{"name": "NpmDetector"},
|
||||
{"name": "OpenAIDetector"},
|
||||
{"name": "PrivateKeyDetector"},
|
||||
{"name": "PypiTokenDetector"},
|
||||
{"name": "SendGridDetector"},
|
||||
{"name": "SlackDetector"},
|
||||
{"name": "SoftlayerDetector"},
|
||||
{"name": "SquareOAuthDetector"},
|
||||
{"name": "StripeDetector"},
|
||||
# {"name": "TelegramBotTokenDetector"}, https://github.com/Yelp/detect-secrets/pull/878
|
||||
{"name": "TwilioKeyDetector"},
|
||||
]
|
||||
|
||||
|
||||
# Azure Configuration
|
||||
azure:
|
||||
|
||||
44
prowler/config/microsoft365_mutelist_example.yaml
Normal file
44
prowler/config/microsoft365_mutelist_example.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
### Account, Check and/or Region can be * to apply for all the cases.
|
||||
### Account == Microsoft365 Tenant and Region == Microsoft365 Location
|
||||
### Resources and tags are lists that can have either Regex or Keywords.
|
||||
### Tags is an optional list that matches on tuples of 'key=value' and are "ANDed" together.
|
||||
### Use an alternation Regex to match one of multiple tags with "ORed" logic.
|
||||
### For each check you can except Accounts, Regions, Resources and/or Tags.
|
||||
########################### MUTELIST EXAMPLE ###########################
|
||||
Mutelist:
|
||||
Accounts:
|
||||
"*":
|
||||
Checks:
|
||||
"admincenter_groups_not_public_visibility":
|
||||
Regions:
|
||||
- "westeurope"
|
||||
Resources:
|
||||
- "sqlserver1" # Will ignore sqlserver1 in check sqlserver_tde_encryption_enabled located in westeurope
|
||||
Description: "Findings related with the check sqlserver_tde_encryption_enabled is muted for westeurope region and sqlserver1 resource"
|
||||
"defender_*":
|
||||
Regions:
|
||||
- "*"
|
||||
Resources:
|
||||
- "*" # Will ignore every Defender check in every location
|
||||
"*":
|
||||
Regions:
|
||||
- "*"
|
||||
Resources:
|
||||
- "test"
|
||||
Tags:
|
||||
- "test=test" # Will ignore every resource containing the string "test" and the tags 'test=test' and
|
||||
- "project=test|project=stage" # either of ('project=test' OR project=stage) in Azure subscription 1 and every location
|
||||
|
||||
"*":
|
||||
Checks:
|
||||
"admincenter_*":
|
||||
Regions:
|
||||
- "*"
|
||||
Resources:
|
||||
- "*"
|
||||
Exceptions:
|
||||
Accounts:
|
||||
- "Tenant1"
|
||||
Regions:
|
||||
- "eastus"
|
||||
- "eastus2" # Will ignore every resource in admincenter checks except the ones in Tenant1 located in eastus or eastus2
|
||||
@@ -139,13 +139,9 @@ def remove_custom_checks_module(input_folder: str, provider: str):
|
||||
def list_services(provider: str) -> set:
|
||||
available_services = set()
|
||||
checks_tuple = recover_checks_from_provider(provider)
|
||||
split_character = "\\" if os.name == "nt" else "/"
|
||||
for _, check_path in checks_tuple:
|
||||
# Format: /absolute_path/prowler/providers/{provider}/services/{service_name}/{check_name}
|
||||
if os.name == "nt":
|
||||
service_name = check_path.split("\\")[-2]
|
||||
else:
|
||||
service_name = check_path.split("/")[-2]
|
||||
available_services.add(service_name)
|
||||
available_services.add(check_path.split(split_character)[-2])
|
||||
return sorted(available_services)
|
||||
|
||||
|
||||
|
||||
@@ -83,6 +83,7 @@ class CIS_Requirement_Attribute(BaseModel):
|
||||
"""CIS Requirement Attribute"""
|
||||
|
||||
Section: str
|
||||
SubSection: Optional[str]
|
||||
Profile: CIS_Requirement_Attribute_Profile
|
||||
AssessmentStatus: CIS_Requirement_Attribute_AssessmentStatus
|
||||
Description: str
|
||||
|
||||
@@ -3,9 +3,9 @@ import os
|
||||
import re
|
||||
import sys
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import asdict, dataclass, is_dataclass
|
||||
from enum import Enum
|
||||
from typing import Set
|
||||
from typing import Any, Dict, Set
|
||||
|
||||
from pydantic import BaseModel, ValidationError, validator
|
||||
|
||||
@@ -405,15 +405,34 @@ class Check_Report:
|
||||
status: str
|
||||
status_extended: str
|
||||
check_metadata: CheckMetadata
|
||||
resource_metadata: dict
|
||||
resource: dict
|
||||
resource_details: str
|
||||
resource_tags: list
|
||||
muted: bool
|
||||
|
||||
def __init__(self, metadata, resource=None):
|
||||
def __init__(self, metadata: Dict, resource: Any) -> None:
|
||||
"""Initialize the Check's finding information.
|
||||
|
||||
Args:
|
||||
metadata: The metadata of the check.
|
||||
resource: Basic information about the resource. Defaults to None.
|
||||
Only accepted dict, list, BaseModels (dict attribute), custom models (with to_dict attribute) and dataclasses.
|
||||
"""
|
||||
self.status = ""
|
||||
self.check_metadata = CheckMetadata.parse_raw(metadata)
|
||||
self.resource_metadata = resource.dict() if resource else {}
|
||||
if isinstance(resource, dict):
|
||||
self.resource = resource
|
||||
elif hasattr(resource, "dict"):
|
||||
self.resource = resource.dict()
|
||||
elif hasattr(resource, "to_dict"):
|
||||
self.resource = resource.to_dict()
|
||||
elif is_dataclass(resource):
|
||||
self.resource = asdict(resource)
|
||||
else:
|
||||
logger.error(
|
||||
f"Resource metadata {type(resource)} in {self.check_metadata.CheckID} could not be converted to dict"
|
||||
)
|
||||
self.resource = {}
|
||||
self.status_extended = ""
|
||||
self.resource_details = ""
|
||||
self.resource_tags = getattr(resource, "tags", []) if resource else []
|
||||
@@ -428,20 +447,13 @@ class Check_Report_AWS(Check_Report):
|
||||
resource_arn: str
|
||||
region: str
|
||||
|
||||
def __init__(self, metadata, resource_metadata=None):
|
||||
super().__init__(metadata, resource_metadata)
|
||||
if resource_metadata:
|
||||
self.resource_id = (
|
||||
getattr(resource_metadata, "id", None)
|
||||
or getattr(resource_metadata, "name", None)
|
||||
or ""
|
||||
)
|
||||
self.resource_arn = getattr(resource_metadata, "arn", "")
|
||||
self.region = getattr(resource_metadata, "region", "")
|
||||
else:
|
||||
self.resource_id = ""
|
||||
self.resource_arn = ""
|
||||
self.region = ""
|
||||
def __init__(self, metadata: Dict, resource: Any) -> None:
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_id = (
|
||||
getattr(resource, "id", None) or getattr(resource, "name", None) or ""
|
||||
)
|
||||
self.resource_arn = getattr(resource, "arn", "")
|
||||
self.region = getattr(resource, "region", "")
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -453,12 +465,20 @@ class Check_Report_Azure(Check_Report):
|
||||
subscription: str
|
||||
location: str
|
||||
|
||||
def __init__(self, metadata):
|
||||
super().__init__(metadata)
|
||||
self.resource_name = ""
|
||||
self.resource_id = ""
|
||||
def __init__(self, metadata: Dict, resource: Any) -> None:
|
||||
"""Initialize the Azure Check's finding information.
|
||||
|
||||
Args:
|
||||
metadata: The metadata of the check.
|
||||
resource: Basic information about the resource. Defaults to None.
|
||||
"""
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_name = getattr(
|
||||
resource, "name", getattr(resource, "resource_name", "")
|
||||
)
|
||||
self.resource_id = getattr(resource, "id", getattr(resource, "resource_id", ""))
|
||||
self.subscription = ""
|
||||
self.location = "global"
|
||||
self.location = getattr(resource, "location", "global")
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -470,12 +490,29 @@ class Check_Report_GCP(Check_Report):
|
||||
project_id: str
|
||||
location: str
|
||||
|
||||
def __init__(self, metadata):
|
||||
super().__init__(metadata)
|
||||
self.resource_name = ""
|
||||
self.resource_id = ""
|
||||
self.project_id = ""
|
||||
self.location = ""
|
||||
def __init__(
|
||||
self,
|
||||
metadata: Dict,
|
||||
resource: Any,
|
||||
location=None,
|
||||
resource_name=None,
|
||||
resource_id=None,
|
||||
project_id=None,
|
||||
) -> None:
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_id = (
|
||||
resource_id
|
||||
or getattr(resource, "id", None)
|
||||
or getattr(resource, "name", None)
|
||||
or ""
|
||||
)
|
||||
self.resource_name = resource_name or getattr(resource, "name", "")
|
||||
self.project_id = project_id or getattr(resource, "project_id", "")
|
||||
self.location = (
|
||||
location
|
||||
or getattr(resource, "location", "")
|
||||
or getattr(resource, "region", "")
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -487,11 +524,40 @@ class Check_Report_Kubernetes(Check_Report):
|
||||
resource_id: str
|
||||
namespace: str
|
||||
|
||||
def __init__(self, metadata):
|
||||
super().__init__(metadata)
|
||||
self.resource_name = ""
|
||||
self.resource_id = ""
|
||||
self.namespace = ""
|
||||
def __init__(self, metadata: Dict, resource: Any) -> None:
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_id = (
|
||||
getattr(resource, "uid", None) or getattr(resource, "name", None) or ""
|
||||
)
|
||||
self.resource_name = getattr(resource, "name", "")
|
||||
self.namespace = getattr(resource, "namespace", "cluster-wide")
|
||||
if not self.namespace:
|
||||
self.namespace = "cluster-wide"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Check_Report_Microsoft365(Check_Report):
|
||||
"""Contains the Microsoft365 Check's finding information."""
|
||||
|
||||
resource_name: str
|
||||
resource_id: str
|
||||
tenant_id: str
|
||||
location: str
|
||||
|
||||
def __init__(self, metadata: Dict, resource: Any) -> None:
|
||||
"""Initialize the Microsoft365 Check's finding information.
|
||||
|
||||
Args:
|
||||
metadata: The metadata of the check.
|
||||
resource: Basic information about the resource. Defaults to None.
|
||||
"""
|
||||
super().__init__(metadata, resource)
|
||||
self.resource_name = getattr(
|
||||
resource, "name", getattr(resource, "resource_name", "")
|
||||
)
|
||||
self.resource_id = getattr(resource, "id", getattr(resource, "resource_id", ""))
|
||||
self.tenant_id = getattr(resource, "tenant_id", "")
|
||||
self.location = getattr(resource, "location", "global")
|
||||
|
||||
|
||||
# Testing Pending
|
||||
|
||||
@@ -26,7 +26,7 @@ class ProwlerArgumentParser:
|
||||
self.parser = argparse.ArgumentParser(
|
||||
prog="prowler",
|
||||
formatter_class=RawTextHelpFormatter,
|
||||
usage="prowler [-h] [--version] {aws,azure,gcp,kubernetes,dashboard} ...",
|
||||
usage="prowler [-h] [--version] {aws,azure,gcp,kubernetes,microsoft365,dashboard} ...",
|
||||
epilog="""
|
||||
Available Cloud Providers:
|
||||
{aws,azure,gcp,kubernetes}
|
||||
@@ -34,6 +34,7 @@ Available Cloud Providers:
|
||||
azure Azure Provider
|
||||
gcp GCP Provider
|
||||
kubernetes Kubernetes Provider
|
||||
microsoft365 Microsoft 365 Provider
|
||||
|
||||
Available components:
|
||||
dashboard Local dashboard
|
||||
@@ -72,7 +73,7 @@ Detailed documentation at https://docs.prowler.com
|
||||
# Init Providers Arguments
|
||||
init_providers_parser(self)
|
||||
|
||||
# Dahboard Parser
|
||||
# Dashboard Parser
|
||||
init_dashboard_parser(self)
|
||||
|
||||
def parse(self, args=None) -> argparse.Namespace:
|
||||
|
||||
@@ -14,7 +14,7 @@ def fill_common_finding_data(finding: dict, unix_timestamp: bool) -> dict:
|
||||
"status_extended": finding.status_extended,
|
||||
"muted": finding.muted,
|
||||
"resource_details": finding.resource_details,
|
||||
# "resource_metadata": finding.resource_metadata, TODO: add resource_metadata to the finding
|
||||
"resource": finding.resource,
|
||||
"resource_tags": unroll_tags(finding.resource_tags),
|
||||
}
|
||||
return finding_data
|
||||
|
||||
@@ -48,6 +48,7 @@ class AWSCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
@@ -78,6 +79,7 @@ class AWSCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
|
||||
@@ -48,6 +48,7 @@ class AzureCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
@@ -79,6 +80,7 @@ class AzureCIS(ComplianceOutput):
|
||||
Requirements_Id=requirement.Id,
|
||||
Requirements_Description=requirement.Description,
|
||||
Requirements_Attributes_Section=attribute.Section,
|
||||
Requirements_Attributes_SubSection=attribute.SubSection,
|
||||
Requirements_Attributes_Profile=attribute.Profile,
|
||||
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
|
||||
Requirements_Attributes_Description=attribute.Description,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user