Compare commits

..

26 Commits
5.1.0 ... 5.1.5

Author SHA1 Message Date
Prowler Bot
9ad4944142 fix(filters): fix dynamic filters (#6643)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-21 13:40:18 +01:00
Prowler Bot
7f33ea76a4 fix(OCSF): fix OCSF output when timestamp is UNIX format (#6627)
Co-authored-by: Rubén De la Torre Vico <rubendltv22@gmail.com>
2025-01-20 18:03:51 -05:00
Prowler Bot
1140c29384 fix(aws): list tags for DocumentDB clusters (#6622)
Co-authored-by: Kay Agahd <kagahd@users.noreply.github.com>
2025-01-20 17:22:06 -05:00
Prowler Bot
2441a62f39 fix: update Azure CIS with existing App checks (#6625)
Co-authored-by: Rubén De la Torre Vico <rubendltv22@gmail.com>
2025-01-20 16:27:14 -05:00
Pepe Fagoaga
c26a231fc1 chore(version): Update version to 5.1.5 (#6618) 2025-01-20 15:07:58 -05:00
Prowler Bot
2fb2315037 chore(RBAC): add permission's info (#6617)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-20 17:31:52 +01:00
Prowler Bot
a9e475481a fix(snippet-id): improve provider ID readability in tables (#6616)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-20 17:28:04 +01:00
Prowler Bot
826d7c4dc3 fix(rbac): remove invalid required permission (#6614)
Co-authored-by: Adrián Jesús Peña Rodríguez <adrianjpr@gmail.com>
2025-01-20 17:02:31 +01:00
Prowler Bot
b7f4b37f66 feat(api): restrict the deletion of users, only the user of the request can be deleted (#6613)
Co-authored-by: Adrián Jesús Peña Rodríguez <adrianjpr@gmail.com>
2025-01-20 17:02:21 +01:00
Prowler Bot
193d691bfe fix(RBAC): tweaks for edit role form (#6610)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-20 14:12:43 +01:00
Prowler Bot
a359bc581c fix(RBAC): restore manage_account permission for roles (#6603)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-20 11:53:08 +01:00
Prowler Bot
9a28ff025a fix(sqs): fix flaky test (#6595)
Co-authored-by: Daniel Barranquero <74871504+danibarranqueroo@users.noreply.github.com>
2025-01-17 12:40:11 -05:00
Prowler Bot
f1c7050700 fix(apigatewayv2): managed exception NotFoundException (#6590)
Co-authored-by: Hugo Pereira Brito <101209179+HugoPBrito@users.noreply.github.com>
2025-01-17 09:27:19 -05:00
Pepe Fagoaga
9391c27b9e chore(version): Update version to 5.1.4 (#6591) 2025-01-17 09:25:35 -05:00
Prowler Bot
4c54de092f feat(findings): Add resource_tag filters for findings endpoint (#6587)
Co-authored-by: Víctor Fernández Poyatos <victor@prowler.com>
2025-01-17 19:01:51 +05:45
Prowler Bot
690c482a43 fix(gcp): fix flaky tests from dns service (#6571)
Co-authored-by: Daniel Barranquero <74871504+danibarranqueroo@users.noreply.github.com>
2025-01-17 08:15:34 -05:00
Prowler Bot
ad2d857c6f feat(findings): add /findings/metadata to retrieve dynamic filters information (#6586)
Co-authored-by: Víctor Fernández Poyatos <victor@prowler.com>
2025-01-17 18:47:59 +05:45
Pepe Fagoaga
07ee59d2ef chore(version): Update version to 5.1.3 (#6584) 2025-01-17 18:46:08 +05:45
Prowler Bot
bec4617d0a fix(providers): update the label and placeholder based on the cloud provider (#6582)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-17 12:33:25 +01:00
Prowler Bot
94916f8305 fix(findings): remove filter delta_in applied by default (#6579)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-17 11:06:07 +01:00
Prowler Bot
44de651be3 fix(cis): add subsections if needed (#6568)
Co-authored-by: Pedro Martín <pedromarting3@gmail.com>
2025-01-16 14:49:49 -05:00
Prowler Bot
bdcba9c642 fix(detect_secrets): refactor logic for detect-secrets (#6566)
Co-authored-by: Pedro Martín <pedromarting3@gmail.com>
2025-01-16 13:07:18 -05:00
Prowler Bot
c172f75f1a fix(dep): address compatibility issues (#6557)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-16 14:35:06 +01:00
Prowler Bot
ec492fa13a feat(filters): add resource type filter for findings (#6525)
Co-authored-by: Pablo Lara <larabjj@gmail.com>
2025-01-15 08:43:49 +01:00
Prowler Bot
702659959c fix(Azure TDE): add filter for master DB (#6514)
Co-authored-by: johannes-engler-mw <132657752+johannes-engler-mw@users.noreply.github.com>
2025-01-14 15:25:27 -05:00
Pepe Fagoaga
fef332a591 chore(version): set next fixes 2025-01-14 18:05:04 +01:00
65 changed files with 12015 additions and 3962 deletions

View File

@@ -319,6 +319,27 @@ class FindingFilter(FilterSet):
field_name="resources__type", lookup_expr="icontains"
)
resource_tag_key = CharFilter(field_name="resources__tags__key")
resource_tag_key__in = CharInFilter(
field_name="resources__tags__key", lookup_expr="in"
)
resource_tag_key__icontains = CharFilter(
field_name="resources__tags__key", lookup_expr="icontains"
)
resource_tag_value = CharFilter(field_name="resources__tags__value")
resource_tag_value__in = CharInFilter(
field_name="resources__tags__value", lookup_expr="in"
)
resource_tag_value__icontains = CharFilter(
field_name="resources__tags__value", lookup_expr="icontains"
)
resource_tags = CharInFilter(
method="filter_resource_tag",
lookup_expr="in",
help_text="Filter by resource tags `key:value` pairs.\nMultiple values may be "
"separated by commas.",
)
scan = UUIDFilter(method="filter_scan_id")
scan__in = UUIDInFilter(method="filter_scan_id_in")
@@ -353,6 +374,12 @@ class FindingFilter(FilterSet):
},
}
@property
def qs(self):
# Force distinct results to prevent duplicates with many-to-many relationships
parent_qs = super().qs
return parent_qs.distinct()
# Convert filter values to UUIDv7 values for use with partitioning
def filter_scan_id(self, queryset, name, value):
try:
@@ -426,6 +453,16 @@ class FindingFilter(FilterSet):
return queryset.filter(id__lte=end).filter(inserted_at__lte=value)
def filter_resource_tag(self, queryset, name, value):
overall_query = Q()
for key_value_pair in value:
tag_key, tag_value = key_value_pair.split(":", 1)
overall_query |= Q(
resources__tags__key__icontains=tag_key,
resources__tags__value__icontains=tag_value,
)
return queryset.filter(overall_query).distinct()
@staticmethod
def maybe_date_to_datetime(value):
dt = value

View File

@@ -1,7 +1,7 @@
openapi: 3.0.3
info:
title: Prowler API
version: 1.1.0
version: 1.2.0
description: |-
Prowler API specification.
@@ -477,6 +477,51 @@ paths:
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_tag_key]
schema:
type: string
- in: query
name: filter[resource_tag_key__icontains]
schema:
type: string
- in: query
name: filter[resource_tag_key__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_tag_value]
schema:
type: string
- in: query
name: filter[resource_tag_value__icontains]
schema:
type: string
- in: query
name: filter[resource_tag_value__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_tags]
schema:
type: array
items:
type: string
description: |-
Filter by resource tags `key:value` pairs.
Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_type]
schema:
@@ -983,6 +1028,523 @@ paths:
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_tag_key]
schema:
type: string
- in: query
name: filter[resource_tag_key__icontains]
schema:
type: string
- in: query
name: filter[resource_tag_key__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_tag_value]
schema:
type: string
- in: query
name: filter[resource_tag_value__icontains]
schema:
type: string
- in: query
name: filter[resource_tag_value__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_tags]
schema:
type: array
items:
type: string
description: |-
Filter by resource tags `key:value` pairs.
Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_type]
schema:
type: string
- in: query
name: filter[resource_type__icontains]
schema:
type: string
- in: query
name: filter[resource_type__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_uid]
schema:
type: string
- in: query
name: filter[resource_uid__icontains]
schema:
type: string
- in: query
name: filter[resource_uid__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resources]
schema:
type: array
items:
type: string
format: uuid
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[scan]
schema:
type: string
format: uuid
- in: query
name: filter[scan__in]
schema:
type: array
items:
type: string
format: uuid
description: Multiple values may be separated by commas.
explode: false
style: form
- name: filter[search]
required: false
in: query
description: A search term.
schema:
type: string
- in: query
name: filter[service]
schema:
type: string
- in: query
name: filter[service__icontains]
schema:
type: string
- in: query
name: filter[service__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[severity]
schema:
type: string
enum:
- critical
- high
- informational
- low
- medium
description: |-
* `critical` - Critical
* `high` - High
* `medium` - Medium
* `low` - Low
* `informational` - Informational
- in: query
name: filter[severity__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[status]
schema:
type: string
enum:
- FAIL
- MANUAL
- MUTED
- PASS
description: |-
* `FAIL` - Fail
* `PASS` - Pass
* `MANUAL` - Manual
* `MUTED` - Muted
- in: query
name: filter[status__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[uid]
schema:
type: string
- in: query
name: filter[uid__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[updated_at]
schema:
type: string
format: date
- in: query
name: filter[updated_at__gte]
schema:
type: string
format: date-time
- in: query
name: filter[updated_at__lte]
schema:
type: string
format: date-time
- name: sort
required: false
in: query
description: '[list of fields to sort by](https://jsonapi.org/format/#fetching-sorting)'
schema:
type: array
items:
type: string
enum:
- id
- -id
- status
- -status
- severity
- -severity
- check_id
- -check_id
- inserted_at
- -inserted_at
- updated_at
- -updated_at
explode: false
tags:
- Finding
security:
- jwtAuth: []
deprecated: true
responses:
'200':
content:
application/vnd.api+json:
schema:
$ref: '#/components/schemas/FindingDynamicFilterResponse'
description: ''
/api/v1/findings/metadata:
get:
operationId: findings_metadata_retrieve
description: Fetch unique metadata values from a set of findings. This is useful
for dynamic filtering.
summary: Retrieve metadata values from findings
parameters:
- in: query
name: fields[findings-metadata]
schema:
type: array
items:
type: string
enum:
- services
- regions
- resource_types
- tags
description: endpoint return only specific fields in the response on a per-type
basis by including a fields[TYPE] query parameter.
explode: false
- in: query
name: filter[check_id]
schema:
type: string
- in: query
name: filter[check_id__icontains]
schema:
type: string
- in: query
name: filter[check_id__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[delta]
schema:
type: string
nullable: true
enum:
- changed
- new
description: |-
* `new` - New
* `changed` - Changed
- in: query
name: filter[delta__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[id]
schema:
type: string
format: uuid
- in: query
name: filter[id__in]
schema:
type: array
items:
type: string
format: uuid
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[impact]
schema:
type: string
enum:
- critical
- high
- informational
- low
- medium
description: |-
* `critical` - Critical
* `high` - High
* `medium` - Medium
* `low` - Low
* `informational` - Informational
- in: query
name: filter[impact__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[inserted_at]
schema:
type: string
format: date
- in: query
name: filter[inserted_at__date]
schema:
type: string
format: date
- in: query
name: filter[inserted_at__gte]
schema:
type: string
format: date
- in: query
name: filter[inserted_at__lte]
schema:
type: string
format: date
- in: query
name: filter[provider]
schema:
type: string
format: uuid
- in: query
name: filter[provider__in]
schema:
type: array
items:
type: string
format: uuid
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[provider_alias]
schema:
type: string
- in: query
name: filter[provider_alias__icontains]
schema:
type: string
- in: query
name: filter[provider_alias__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[provider_type]
schema:
type: string
enum:
- aws
- azure
- gcp
- kubernetes
description: |-
* `aws` - AWS
* `azure` - Azure
* `gcp` - GCP
* `kubernetes` - Kubernetes
- in: query
name: filter[provider_type__in]
schema:
type: array
items:
type: string
enum:
- aws
- azure
- gcp
- kubernetes
description: |-
Multiple values may be separated by commas.
* `aws` - AWS
* `azure` - Azure
* `gcp` - GCP
* `kubernetes` - Kubernetes
explode: false
style: form
- in: query
name: filter[provider_uid]
schema:
type: string
- in: query
name: filter[provider_uid__icontains]
schema:
type: string
- in: query
name: filter[provider_uid__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[region]
schema:
type: string
- in: query
name: filter[region__icontains]
schema:
type: string
- in: query
name: filter[region__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_name]
schema:
type: string
- in: query
name: filter[resource_name__icontains]
schema:
type: string
- in: query
name: filter[resource_name__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_tag_key]
schema:
type: string
- in: query
name: filter[resource_tag_key__icontains]
schema:
type: string
- in: query
name: filter[resource_tag_key__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_tag_value]
schema:
type: string
- in: query
name: filter[resource_tag_value__icontains]
schema:
type: string
- in: query
name: filter[resource_tag_value__in]
schema:
type: array
items:
type: string
description: Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_tags]
schema:
type: array
items:
type: string
description: |-
Filter by resource tags `key:value` pairs.
Multiple values may be separated by commas.
explode: false
style: form
- in: query
name: filter[resource_type]
schema:
@@ -1168,11 +1730,11 @@ paths:
security:
- jwtAuth: []
responses:
'201':
'200':
content:
application/vnd.api+json:
schema:
$ref: '#/components/schemas/OpenApiResponseResponse'
$ref: '#/components/schemas/FindingMetadataResponse'
description: ''
/api/v1/invitations/accept:
post:
@@ -2948,9 +3510,7 @@ paths:
- name
- manage_users
- manage_account
- manage_billing
- manage_providers
- manage_integrations
- manage_scans
- permission_state
- unlimited_visibility
@@ -3068,12 +3628,8 @@ paths:
- -manage_users
- manage_account
- -manage_account
- manage_billing
- -manage_billing
- manage_providers
- -manage_providers
- manage_integrations
- -manage_integrations
- manage_scans
- -manage_scans
- permission_state
@@ -3147,9 +3703,7 @@ paths:
- name
- manage_users
- manage_account
- manage_billing
- manage_providers
- manage_integrations
- manage_scans
- permission_state
- unlimited_visibility
@@ -4825,8 +5379,8 @@ paths:
description: ''
delete:
operationId: users_destroy
description: Remove a user account from the system.
summary: Delete a user account
description: Remove the current user account from the system.
summary: Delete the user account
parameters:
- in: path
name: id
@@ -5458,6 +6012,92 @@ components:
readOnly: true
required:
- scan
FindingDynamicFilter:
type: object
required:
- type
- id
additionalProperties: false
properties:
type:
allOf:
- $ref: '#/components/schemas/FindingDynamicFilterTypeEnum'
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
member is used to describe resource objects that share common attributes
and relationships.
id: {}
attributes:
type: object
properties:
services:
type: array
items:
type: string
regions:
type: array
items:
type: string
required:
- services
- regions
FindingDynamicFilterResponse:
type: object
properties:
data:
$ref: '#/components/schemas/FindingDynamicFilter'
required:
- data
FindingDynamicFilterTypeEnum:
type: string
enum:
- finding-dynamic-filters
FindingMetadata:
type: object
required:
- type
- id
additionalProperties: false
properties:
type:
allOf:
- $ref: '#/components/schemas/FindingMetadataTypeEnum'
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
member is used to describe resource objects that share common attributes
and relationships.
id: {}
attributes:
type: object
properties:
services:
type: array
items:
type: string
regions:
type: array
items:
type: string
resource_types:
type: array
items:
type: string
tags:
description: Tags are described as key-value pairs.
required:
- services
- regions
- resource_types
- tags
FindingMetadataResponse:
type: object
properties:
data:
$ref: '#/components/schemas/FindingMetadata'
required:
- data
FindingMetadataTypeEnum:
type: string
enum:
- findings-metadata
FindingResponse:
type: object
properties:
@@ -5902,8 +6542,6 @@ components:
- data
description: A related resource object from type roles
title: roles
required:
- roles
InvitationUpdateResponse:
type: object
properties:
@@ -5915,7 +6553,6 @@ components:
type: object
required:
- type
- id
additionalProperties: false
properties:
type:
@@ -5924,9 +6561,6 @@ components:
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
member is used to describe resource objects that share common attributes
and relationships.
id:
type: string
format: uuid
attributes:
type: object
properties:
@@ -6437,8 +7071,6 @@ components:
- data
description: A related resource object from type roles
title: roles
required:
- roles
required:
- data
PatchedProviderGroupMembershipRequest:
@@ -6850,12 +7482,8 @@ components:
type: boolean
manage_account:
type: boolean
manage_billing:
type: boolean
manage_providers:
type: boolean
manage_integrations:
type: boolean
manage_scans:
type: boolean
permission_state:
@@ -7131,37 +7759,6 @@ components:
required:
- name
- email
relationships:
type: object
properties:
roles:
type: object
properties:
data:
type: array
items:
type: object
properties:
id:
type: string
format: uuid
title: Resource Identifier
description: The identifier of the related object.
type:
type: string
enum:
- roles
title: Resource Type Name
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
member is used to describe resource objects that share
common attributes and relationships.
required:
- id
- type
required:
- data
description: A related resource object from type roles
title: roles
required:
- data
Provider:
@@ -8537,12 +9134,8 @@ components:
type: boolean
manage_account:
type: boolean
manage_billing:
type: boolean
manage_providers:
type: boolean
manage_integrations:
type: boolean
manage_scans:
type: boolean
permission_state:
@@ -8670,12 +9263,8 @@ components:
type: boolean
manage_account:
type: boolean
manage_billing:
type: boolean
manage_providers:
type: boolean
manage_integrations:
type: boolean
manage_scans:
type: boolean
permission_state:
@@ -8808,12 +9397,8 @@ components:
type: boolean
manage_account:
type: boolean
manage_billing:
type: boolean
manage_providers:
type: boolean
manage_integrations:
type: boolean
manage_scans:
type: boolean
permission_state:
@@ -9877,37 +10462,6 @@ components:
required:
- name
- email
relationships:
type: object
properties:
roles:
type: object
properties:
data:
type: array
items:
type: object
properties:
id:
type: string
format: uuid
title: Resource Identifier
description: The identifier of the related object.
type:
type: string
enum:
- roles
title: Resource Type Name
description: The [type](https://jsonapi.org/format/#document-resource-object-identification)
member is used to describe resource objects that share common
attributes and relationships.
required:
- id
- type
required:
- data
description: A related resource object from type roles
title: roles
UserUpdateResponse:
type: object
properties:

View File

@@ -261,6 +261,16 @@ class TestUserViewSet:
assert response.status_code == status.HTTP_204_NO_CONTENT
assert not User.objects.filter(id=create_test_user.id).exists()
def test_users_destroy_other_user(
self, authenticated_client, create_test_user, users_fixture
):
user = users_fixture[2]
response = authenticated_client.delete(
reverse("user-detail", kwargs={"pk": str(user.id)})
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert User.objects.filter(id=create_test_user.id).exists()
def test_users_destroy_invalid_user(self, authenticated_client, create_test_user):
another_user = User.objects.create_user(
password="otherpassword", email="other@example.com"
@@ -268,7 +278,7 @@ class TestUserViewSet:
response = authenticated_client.delete(
reverse("user-detail", kwargs={"pk": another_user.id})
)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert User.objects.filter(id=another_user.id).exists()
@pytest.mark.parametrize(
@@ -2444,6 +2454,15 @@ class TestFindingViewSet:
("search", "ec2", 2),
# full text search on finding tags
("search", "value2", 2),
("resource_tag_key", "key", 2),
("resource_tag_key__in", "key,key2", 2),
("resource_tag_key__icontains", "key", 2),
("resource_tag_value", "value", 2),
("resource_tag_value__in", "value,value2", 2),
("resource_tag_value__icontains", "value", 2),
("resource_tags", "key:value", 2),
("resource_tags", "not:exists", 0),
("resource_tags", "not:exists,key:value", 2),
]
),
)
@@ -2582,30 +2601,34 @@ class TestFindingViewSet:
)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_findings_services_regions_retrieve(
self, authenticated_client, findings_fixture
):
def test_findings_metadata_retrieve(self, authenticated_client, findings_fixture):
finding_1, *_ = findings_fixture
response = authenticated_client.get(
reverse("finding-findings_services_regions"),
reverse("finding-metadata"),
{"filter[inserted_at]": finding_1.updated_at.strftime("%Y-%m-%d")},
)
data = response.json()
expected_services = {"ec2", "s3"}
expected_regions = {"eu-west-1", "us-east-1"}
expected_tags = {"key": ["value"], "key2": ["value2"]}
expected_resource_types = {"prowler-test"}
assert data["data"]["type"] == "finding-dynamic-filters"
assert data["data"]["type"] == "findings-metadata"
assert data["data"]["id"] is None
assert set(data["data"]["attributes"]["services"]) == expected_services
assert set(data["data"]["attributes"]["regions"]) == expected_regions
assert (
set(data["data"]["attributes"]["resource_types"]) == expected_resource_types
)
assert data["data"]["attributes"]["tags"] == expected_tags
def test_findings_services_regions_severity_retrieve(
def test_findings_metadata_severity_retrieve(
self, authenticated_client, findings_fixture
):
finding_1, *_ = findings_fixture
response = authenticated_client.get(
reverse("finding-findings_services_regions"),
reverse("finding-metadata"),
{
"filter[severity__in]": ["low", "medium"],
"filter[inserted_at]": finding_1.updated_at.strftime("%Y-%m-%d"),
@@ -2615,26 +2638,34 @@ class TestFindingViewSet:
expected_services = {"s3"}
expected_regions = {"eu-west-1"}
expected_tags = {"key": ["value"], "key2": ["value2"]}
expected_resource_types = {"prowler-test"}
assert data["data"]["type"] == "finding-dynamic-filters"
assert data["data"]["type"] == "findings-metadata"
assert data["data"]["id"] is None
assert set(data["data"]["attributes"]["services"]) == expected_services
assert set(data["data"]["attributes"]["regions"]) == expected_regions
assert (
set(data["data"]["attributes"]["resource_types"]) == expected_resource_types
)
assert data["data"]["attributes"]["tags"] == expected_tags
def test_findings_services_regions_future_date(self, authenticated_client):
def test_findings_metadata_future_date(self, authenticated_client):
response = authenticated_client.get(
reverse("finding-findings_services_regions"),
reverse("finding-metadata"),
{"filter[inserted_at]": "2048-01-01"},
)
data = response.json()
assert data["data"]["type"] == "finding-dynamic-filters"
assert data["data"]["type"] == "findings-metadata"
assert data["data"]["id"] is None
assert data["data"]["attributes"]["services"] == []
assert data["data"]["attributes"]["regions"] == []
assert data["data"]["attributes"]["tags"] == {}
assert data["data"]["attributes"]["resource_types"] == []
def test_findings_services_regions_invalid_date(self, authenticated_client):
def test_findings_metadata_invalid_date(self, authenticated_client):
response = authenticated_client.get(
reverse("finding-findings_services_regions"),
reverse("finding-metadata"),
{"filter[inserted_at]": "2048-01-011"},
)
assert response.json() == {

View File

@@ -917,6 +917,7 @@ class FindingSerializer(RLSSerializer):
}
# To be removed when the related endpoint is removed as well
class FindingDynamicFilterSerializer(serializers.Serializer):
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
@@ -925,6 +926,18 @@ class FindingDynamicFilterSerializer(serializers.Serializer):
resource_name = "finding-dynamic-filters"
class FindingMetadataSerializer(serializers.Serializer):
services = serializers.ListField(child=serializers.CharField(), allow_empty=True)
regions = serializers.ListField(child=serializers.CharField(), allow_empty=True)
resource_types = serializers.ListField(
child=serializers.CharField(), allow_empty=True
)
tags = serializers.JSONField(help_text="Tags are described as key-value pairs.")
class Meta:
resource_name = "findings-metadata"
# Provider secrets
class BaseWriteProviderSecretSerializer(BaseWriteSerializer):
@staticmethod

View File

@@ -4,6 +4,7 @@ from django.contrib.postgres.aggregates import ArrayAgg
from django.contrib.postgres.search import SearchQuery
from django.db import transaction
from django.db.models import Count, F, OuterRef, Prefetch, Q, Subquery, Sum
from django.db.models.functions import JSONObject
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_control
@@ -87,6 +88,7 @@ from api.v1.serializers import (
ComplianceOverviewFullSerializer,
ComplianceOverviewSerializer,
FindingDynamicFilterSerializer,
FindingMetadataSerializer,
FindingSerializer,
InvitationAcceptSerializer,
InvitationCreateSerializer,
@@ -192,7 +194,7 @@ class SchemaView(SpectacularAPIView):
def get(self, request, *args, **kwargs):
spectacular_settings.TITLE = "Prowler API"
spectacular_settings.VERSION = "1.1.1"
spectacular_settings.VERSION = "1.2.0"
spectacular_settings.DESCRIPTION = (
"Prowler API specification.\n\nThis file is auto-generated."
)
@@ -275,8 +277,8 @@ class SchemaView(SpectacularAPIView):
),
destroy=extend_schema(
tags=["User"],
summary="Delete a user account",
description="Remove a user account from the system.",
summary="Delete the user account",
description="Remove the current user account from the system.",
),
me=extend_schema(
tags=["User"],
@@ -340,6 +342,12 @@ class UserViewSet(BaseUserViewset):
status=status.HTTP_200_OK,
)
def destroy(self, request, *args, **kwargs):
if kwargs["pk"] != str(self.request.user.id):
raise ValidationError("Only the current user can be deleted.")
return super().destroy(request, *args, **kwargs)
@extend_schema(
parameters=[
OpenApiParameter(
@@ -1044,7 +1052,7 @@ class ScanViewSet(BaseRLSViewSet):
"""
if self.request.method in SAFE_METHODS:
# No permissions required for GET requests
self.required_permissions = [Permissions.MANAGE_PROVIDERS]
self.required_permissions = []
else:
# Require permission for non-GET requests
self.required_permissions = [Permissions.MANAGE_SCANS]
@@ -1274,7 +1282,13 @@ class ResourceViewSet(BaseRLSViewSet):
tags=["Finding"],
summary="Retrieve the services and regions that are impacted by findings",
description="Fetch services and regions affected in findings.",
responses={201: OpenApiResponse(response=MembershipSerializer)},
filters=True,
deprecated=True,
),
metadata=extend_schema(
tags=["Finding"],
summary="Retrieve metadata values from findings",
description="Fetch unique metadata values from a set of findings. This is useful for dynamic filtering.",
filters=True,
),
)
@@ -1308,6 +1322,8 @@ class FindingViewSet(BaseRLSViewSet):
def get_serializer_class(self):
if self.action == "findings_services_regions":
return FindingDynamicFilterSerializer
elif self.action == "metadata":
return FindingMetadataSerializer
return super().get_serializer_class()
@@ -1376,6 +1392,51 @@ class FindingViewSet(BaseRLSViewSet):
return Response(data=serializer.data, status=status.HTTP_200_OK)
@action(detail=False, methods=["get"], url_name="metadata")
def metadata(self, request):
queryset = self.get_queryset()
filtered_queryset = self.filter_queryset(queryset)
result = filtered_queryset.aggregate(
services=ArrayAgg("resources__service", flat=True, distinct=True),
regions=ArrayAgg("resources__region", flat=True, distinct=True),
tags=ArrayAgg(
JSONObject(
key=F("resources__tags__key"), value=F("resources__tags__value")
),
distinct=True,
filter=Q(resources__tags__key__isnull=False),
),
resource_types=ArrayAgg("resources__type", flat=True, distinct=True),
)
if result["services"] is None:
result["services"] = []
if result["regions"] is None:
result["regions"] = []
if result["regions"] is None:
result["regions"] = []
if result["resource_types"] is None:
result["resource_types"] = []
if result["tags"] is None:
result["tags"] = []
tags_dict = {}
for t in result["tags"]:
key, value = t["key"], t["value"]
if key not in tags_dict:
tags_dict[key] = []
tags_dict[key].append(value)
result["tags"] = tags_dict
serializer = self.get_serializer(
data=result,
)
serializer.is_valid(raise_exception=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
@extend_schema_view(
list=extend_schema(

View File

@@ -455,7 +455,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
@@ -476,7 +477,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
@@ -497,7 +499,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
@@ -518,7 +521,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
@@ -540,7 +544,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
@@ -561,7 +566,8 @@
],
"Attributes": [
{
"Section": "2.2. Elastic Compute Cloud (EC2)",
"Section": "2. Storage",
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
@@ -582,7 +588,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",

View File

@@ -455,7 +455,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.",
@@ -476,7 +477,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
@@ -497,7 +499,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
@@ -518,7 +521,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
@@ -540,7 +544,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
@@ -561,7 +566,8 @@
],
"Attributes": [
{
"Section": "2.2. Elastic Compute Cloud (EC2)",
"Section": "2. Storage",
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
@@ -582,7 +588,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
@@ -603,7 +610,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
@@ -624,7 +632,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.",
@@ -645,7 +654,8 @@
],
"Attributes": [
{
"Section": "2.4 Elastic File System (EFS)",
"Section": "2. Storage",
"SubSection": "2.4 Elastic File System (EFS)",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",

View File

@@ -474,7 +474,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
@@ -495,7 +496,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
@@ -516,7 +518,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
@@ -538,7 +541,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
@@ -559,7 +563,8 @@
],
"Attributes": [
{
"Section": "2.2. Elastic Compute Cloud (EC2)",
"Section": "2. Storage",
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
@@ -580,7 +585,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
@@ -601,7 +607,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
@@ -622,7 +629,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to any publicly accessible RDS database instance, you must disable the database Publicly Accessible flag and update the VPC security group associated with the instance.",
@@ -643,7 +651,8 @@
],
"Attributes": [
{
"Section": "2.4 Elastic File System (EFS)",
"Section": "2. Storage",
"SubSection": "2.4 Elastic File System (EFS)",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",

View File

@@ -474,7 +474,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.",
@@ -495,7 +496,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
@@ -516,7 +518,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Amazon S3 buckets can contain sensitive data, that for security purposes should be discovered, monitored, classified and protected. Macie along with other 3rd party tools can automatically provide an inventory of Amazon S3 buckets.",
@@ -538,7 +541,8 @@
],
"Attributes": [
{
"Section": "2.1. Simple Storage Service (S3)",
"Section": "2. Storage",
"SubSection": "2.1. Simple Storage Service (S3)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon S3 provides `Block public access (bucket settings)` and `Block public access (account settings)` to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, `Block public access (bucket settings)` prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, `Block public access (account settings)` prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.",
@@ -559,7 +563,8 @@
],
"Attributes": [
{
"Section": "2.2. Elastic Compute Cloud (EC2)",
"Section": "2. Storage",
"SubSection": "2.2. Elastic Compute Cloud (EC2)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Elastic Compute Cloud (EC2) supports encryption at rest when using the Elastic Block Store (EBS) service. While disabled by default, forcing encryption at EBS volume creation is supported.",
@@ -580,7 +585,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Amazon RDS encrypted DB instances use the industry standard AES-256 encryption algorithm to encrypt your data on the server that hosts your Amazon RDS DB instances. After your data is encrypted, Amazon RDS handles authentication of access and decryption of your data transparently with a minimal impact on performance.",
@@ -601,7 +607,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that RDS database instances have the Auto Minor Version Upgrade flag enabled in order to receive automatically minor engine upgrades during the specified maintenance window. So, RDS instances can get the new features, bug fixes, and security patches for their database engines.",
@@ -622,7 +629,8 @@
],
"Attributes": [
{
"Section": "2.3. Relational Database Service (RDS)",
"Section": "2. Storage",
"SubSection": "2.3. Relational Database Service (RDS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure and verify that RDS database instances provisioned in your AWS account do restrict unauthorized access in order to minimize security risks. To restrict access to anypublicly accessible RDS database instance, you must disable the database PubliclyAccessible flag and update the VPC security group associated with the instance",
@@ -643,7 +651,8 @@
],
"Attributes": [
{
"Section": "2.4 Elastic File System (EFS)",
"Section": "2. Storage",
"SubSection": "2.4 Elastic File System (EFS)",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "EFS data should be encrypted at rest using AWS KMS (Key Management Service).",

View File

@@ -12,7 +12,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1. Identity and Access Management",
"SubSection": "1.1 Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Security defaults in Azure Active Directory (Azure AD) make it easier to be secure and help protect your organization. Security defaults contain preconfigured security settings for common attacks. Security defaults is available to everyone. The goal is to ensure that all organizations have a basic level of security",
@@ -34,7 +35,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1. Identity and Access Management",
"SubSection": "1.1 Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Enable multi-factor authentication for all roles, groups, and users that have write access or permissions to Azure resources. These include custom created objects or built-in roles such as; • Service Co-Administrators • Subscription Owners • Contributors",
@@ -56,7 +58,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1. Identity and Access Management",
"SubSection": "1.1 Security Defaults",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable multi-factor authentication for all non-privileged users.",
@@ -76,7 +79,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1. Identity and Access Management",
"SubSection": "1.1 Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Do not allow users to remember multi-factor authentication on devices.",
@@ -98,7 +102,8 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Azure Active Directory Conditional Access allows an organization to configure Named locations and configure whether those locations are trusted or untrusted. These settings provide organizations the means to specify Geographical locations for use in conditional access policies, or define actual IP addresses and IP ranges and whether or not those IP addresses and/or ranges are trusted by the organization.",
@@ -118,7 +123,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "CAUTION: If these policies are created without first auditing and testing the result, misconfiguration can potentially lock out administrators or create undesired access issues. Conditional Access Policies can be used to block access from geographic locations that are deemed out-of-scope for your organization or application. The scope and variables for this policy should be carefully examined and defined.",
@@ -138,7 +144,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
@@ -158,7 +165,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
@@ -178,7 +186,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
@@ -198,7 +207,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
@@ -220,7 +230,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Require administrators or appropriately delegated users to create new tenants.",
@@ -240,7 +250,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "This recommendation extends guest access review by utilizing the Azure AD Privileged Identity Management feature provided in Azure AD Premium P2. Azure AD is extended to include Azure AD B2B collaboration, allowing you to invite people from outside your organization to be guest users in your cloud account and sign in with their own work, school, or social identities. Guest users allow you to share your company's applications and services with users from any other organization, while maintaining control over your own corporate data. Work with external partners, large or small, even if they don't have Azure AD or an IT department. A simple invitation and redemption process lets partners use their own credentials to access your company's resources a a guest user.",
@@ -260,7 +270,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Azure AD is extended to include Azure AD B2B collaboration, allowing you to invite people from outside your organization to be guest users in your cloud account and sign in with their own work, school, or social identities. Guest users allow you to share your company's applications and services with users from any other organization, while maintaining control over your own corporate data. Work with external partners, large or small, even if they don't have Azure AD or an IT department. A simple invitation and redemption process lets partners use their own credentials to access your company's resources as a guest user. Guest users in every subscription should be review on a regular basis to ensure that inactive and unneeded accounts are removed.",
@@ -280,7 +290,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensures that two alternate forms of identification are provided before allowing a password reset.",
@@ -300,7 +310,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Microsoft Azure provides a Global Banned Password policy that applies to Azure administrative and normal user accounts. This is not applied to user accounts that are synced from an on-premise Active Directory unless Azure AD Connect is used and you enable EnforceCloudPasswordPolicyForPasswordSyncedUsers. Please see the list in default values on the specifics of this policy. To further password security, it is recommended to further define a custom banned password policy.",
@@ -320,7 +330,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensure that the number of days before users are asked to re-confirm their authentication information is not set to 0.",
@@ -340,7 +350,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensure that users are notified on their primary and secondary emails on password resets.",
@@ -360,7 +370,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensure that all Global Administrators are notified if any other administrator resets their password.",
@@ -382,7 +392,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Require administrators to provide consent for applications before use.",
@@ -404,7 +414,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Allow users to provide consent for selected permissions when a request is coming from a verified publisher.",
@@ -424,7 +434,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Require administrators to provide consent for the apps before use.",
@@ -446,7 +456,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Require administrators or appropriately delegated users to register third-party applications.",
@@ -468,7 +478,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Limit guest user permissions.",
@@ -490,7 +500,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Restrict invitations to users with specific administrative roles only.",
@@ -510,7 +520,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Restrict access to the Azure AD administration portal to administrators only. NOTE: This only affects access to the Azure AD administrator's web portal. This setting does not prohibit privileged users from using other methods such as Rest API or Powershell to obtain sensitive information from Azure AD.",
@@ -530,7 +540,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Restricts group creation to administrators with permissions only.",
@@ -552,7 +562,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Restrict security group creation to administrators only.",
@@ -572,7 +582,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Restrict security group management to administrators only.",
@@ -594,7 +604,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Restrict Microsoft 365 group creation to administrators only.",
@@ -614,7 +624,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Joining or registering devices to the active directory should require Multi-factor authentication.",
@@ -636,7 +646,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The principle of least privilege should be followed and only necessary privileges should be assigned instead of allowing full administrative access.",
@@ -658,7 +668,7 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Resource locking is a powerful protection mechanism that can prevent inadvertent modification/deletion of resources within Azure subscriptions/Resource Groups and is a recommended NIST configuration.",
@@ -678,7 +688,7 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1. Identity and Access Management",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Users who are set as subscription owners are able to make administrative changes to the subscriptions and move them into and out of Azure Active Directories.",
@@ -700,7 +710,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Servers enables threat detection for Servers, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -722,7 +733,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for App Service enables threat detection for App Service, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -744,7 +756,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Databases enables threat detection for the instances running your database software. This provides threat intelligence, anomaly detection, and behavior analytics in the Azure Microsoft Defender for Cloud. Instead of being enabled on services like Platform as a Service (PaaS), this implementation will run within your instances as Infrastructure as a Service (IaaS) on the Operating Systems hosting your databases.",
@@ -766,7 +779,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Azure SQL Databases enables threat detection for Azure SQL database servers, providing threat intelligence, anomaly detection, andbehavior analytics in the Microsoft Defender for Cloud.",
@@ -788,7 +802,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for SQL servers on machines enables threat detection for SQL servers on machines, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -810,7 +825,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Open-source relational databases enables threat detection for Open-source relational databases, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -832,7 +848,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Storage enables threat detection for Storage, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -854,7 +871,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Containers enables threat detection for Container Registries including Kubernetes, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -876,7 +894,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Microsoft Defender for Azure Cosmos DB scans all incoming network requests for threats to your Azure Cosmos DB resources.",
@@ -898,7 +917,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Turning on Microsoft Defender for Key Vault enables threat detection for Key Vault, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -920,7 +940,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Microsoft Defender for DNS scans all network traffic exiting from within a subscription.",
@@ -942,7 +963,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Microsoft Defender for Resource Manager scans incoming administrative requests to change your infrastructure from both CLI and the Azure portal.",
@@ -964,7 +986,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Ensure that the latest OS patches for all virtual machines are applied.",
@@ -986,7 +1009,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "None of the settings offered by ASC Default policy should be set to effect Disabled.",
@@ -1008,7 +1032,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Enable automatic provisioning of the monitoring agent to collect security data.",
@@ -1030,7 +1055,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable automatic provisioning of vulnerability assessment for machines on both Azure and hybrid (Arc enabled) machines.",
@@ -1050,7 +1076,8 @@
"Checks": [],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable automatic provisioning of the Microsoft Defender for Containers components.",
@@ -1072,7 +1099,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Enable security alert emails to subscription owners.",
@@ -1094,7 +1122,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Microsoft Defender for Cloud emails the subscription owners whenever a high-severity alert is triggered for their subscription. You should provide a security contact email address as an additional email address.",
@@ -1116,7 +1145,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enables emailing security alerts to the subscription owner or other designated security contact.",
@@ -1138,7 +1168,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "This integration setting enables Microsoft Defender for Cloud Apps (formerly 'Microsoft Cloud App Security' or 'MCAS' - see additional info) to communicate with Microsoft Defender for Cloud.",
@@ -1160,7 +1191,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "This integration setting enables Microsoft Defender for Endpoint (formerly 'Advanced Threat Protection' or 'ATP' or 'WDATP' - see additional info) to communicate with Microsoft Defender for Cloud. IMPORTANT: When enabling integration between DfE & DfC it needs to be taken into account that this will have some side effects that may be undesirable. 1. For server 2019 & above if defender is installed (default for these server SKU's) this will trigger a deployment of the new unified agent and link to any of the extended configuration in the Defender portal. 2. If the new unified agent is required for server SKU's of Win 2016 or Linux and lower there is additional integration that needs to be switched on and agents need to be aligned.",
@@ -1182,7 +1214,8 @@
],
"Attributes": [
{
"Section": "2.2 Microsoft Defender for IoT",
"Section": "2. Microsoft Defender",
"SubSection": "2.2 Microsoft Defender for IoT",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Microsoft Defender for IoT acts as a central security hub for IoT devices within your organization.",
@@ -1524,7 +1557,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable auditing on SQL Servers.",
@@ -1546,7 +1580,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that no SQL Databases allow ingress from 0.0.0.0/0 (ANY IP).",
@@ -1568,7 +1603,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Transparent Data Encryption (TDE) with Customer-managed key support provides increased transparency and control over the TDE Protector, increased security with an HSM-backed external service, and promotion of separation of duties. With TDE, data is encrypted at rest with a symmetric key (called the database encryption key) stored in the database or data warehouse distribution. To protect this data encryption key (DEK) in the past, only a certificate that the Azure SQL Service managed could be used. Now, with Customer-managed key support for TDE, the DEK can be protected with an asymmetric key that is stored in the Azure Key Vault. The Azure Key Vault is a highly available and scalable cloud-based key store which offers central key management, leverages FIPS 140-2 Level 2 validated hardware security modules (HSMs), and allows separation of management of keys and data for additional security. Based on business needs or criticality of data/databases hosted on a SQL server, it is recommended that the TDE protector is encrypted by a key that is managed by the data owner (Customer-managed key).",
@@ -1590,7 +1626,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Use Azure Active Directory Authentication for authentication with SQL Database to manage credentials in a single place.",
@@ -1612,7 +1649,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable Transparent Data Encryption on every SQL server.",
@@ -1634,7 +1672,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "SQL Server Audit Retention should be configured to be greater than 90 days.",
@@ -1656,7 +1695,8 @@
],
"Attributes": [
{
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
"Section": "4. Database Services",
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Enable 'Microsoft Defender for SQL' on critical SQL Servers.",
@@ -1678,7 +1718,8 @@
],
"Attributes": [
{
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
"Section": "4. Database Services",
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Enable Vulnerability Assessment (VA) service scans for critical SQL servers and corresponding SQL databases.",
@@ -1700,7 +1741,8 @@
],
"Attributes": [
{
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
"Section": "4. Database Services",
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Enable Vulnerability Assessment (VA) service scans for critical SQL servers and corresponding SQL databases.",
@@ -1722,7 +1764,8 @@
],
"Attributes": [
{
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
"Section": "4. Database Services",
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Configure 'Send scan reports to' with email addresses of concerned data owners/stakeholders for a critical SQL servers",
@@ -1744,7 +1787,8 @@
],
"Attributes": [
{
"Section": "4.2 SQL Server - Microsoft Defender for SQL",
"Section": "4. Database Services",
"SubSection": "4.2 SQL Server - Microsoft Defender for SQL",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable Vulnerability Assessment (VA) setting 'Also send email notifications to admins and subscription owners'.",
@@ -1766,7 +1810,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable SSL connection on PostgreSQL Servers.",
@@ -1788,7 +1833,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable log_checkpoints on PostgreSQL Servers.",
@@ -1810,7 +1856,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable log_connections on PostgreSQL Servers.",
@@ -1832,7 +1879,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable log_disconnections on PostgreSQL Servers.",
@@ -1854,7 +1902,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable connection_throttling on PostgreSQL Servers.",
@@ -1876,7 +1925,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure log_retention_days on PostgreSQL Servers is set to an appropriate value.",
@@ -1898,7 +1948,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Disable access from Azure services to PostgreSQL Database Server.",
@@ -1918,7 +1969,8 @@
"Checks": [],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Azure Database for PostgreSQL servers should be created with 'infrastructure double encryption' enabled.",
@@ -1940,7 +1992,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable SSL connection on MYSQL Servers.",
@@ -1962,7 +2015,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure TLS version on MySQL flexible servers is set to the default value.",
@@ -1984,7 +2038,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable audit_log_enabled on MySQL Servers.",
@@ -2006,7 +2061,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Set audit_log_enabled to include CONNECTION on MySQL Servers.",
@@ -2028,7 +2084,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Limiting your Cosmos DB to only communicate on whitelisted networks lowers its attack footprint.",
@@ -2050,7 +2107,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Private endpoints limit network traffic to approved sources.",
@@ -2072,7 +2130,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Cosmos DB can use tokens or AAD for client authentication which in turn will use Azure RBAC for authorization. Using AAD is significantly more secure because AAD handles the credentials and allows for MFA and centralized management, and the Azure RBAC better integrated with the rest of Azure.",
@@ -2094,7 +2153,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Enable Diagnostic settings for exporting activity logs. Diagnos tic settings are available for each individual resource within a subscription. Settings should be configured for allappropriate resources for your environment.",
@@ -2116,7 +2176,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Prerequisite: A Diagnostic Setting must exist. If a Diagnostic Setting does not exist, the navigation and options within this recommendation will not be available. Please review the recommendation at the beginning of this subsection titled: 'Ensure that a 'Diagnostic Setting' exists.' The diagnostic setting should be configured to log the appropriate activities from the control/management plane.",
@@ -2138,7 +2199,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The storage account container containing the activity log export should not be publicly accessible.",
@@ -2160,7 +2222,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Storage accounts with the activity log exports can be configured to use Customer Managed Keys (CMK).",
@@ -2182,7 +2245,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable AuditEvent logging for key vault instances to ensure interactions with key vaults are logged and available.",
@@ -2204,7 +2268,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
@@ -2226,7 +2291,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable AppServiceHTTPLogs diagnostic log category for Azure App Service instances to ensure all http requests are captured and centrally logged.",
@@ -2248,7 +2314,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create Policy Assignment event.",
@@ -2270,7 +2337,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Policy Assignment event.",
@@ -2292,7 +2360,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an Activity Log Alert for the Create or Update Network Security Group event.",
@@ -2314,7 +2383,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Network Security Group event.",
@@ -2336,7 +2406,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update Security Solution event.",
@@ -2358,7 +2429,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Security Solution event.",
@@ -2380,7 +2452,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update SQL Server Firewall Rule event.",
@@ -2402,7 +2475,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the 'Delete SQL Server Firewall Rule.'",
@@ -2424,7 +2498,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update Public IP Addresses rule.",
@@ -2446,7 +2521,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Public IP Address rule.",
@@ -2466,7 +2542,7 @@
"Checks": [],
"Attributes": [
{
"Section": "5.3 Configuring Application Insights",
"Section": "5. Logging and Monitoring",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Resource Logs capture activity to the data access plane while the Activity log is a subscription-level log for the control plane. Resource-level diagnostic logs provide insight into operations that were performed within that resource itself; for example, reading or updating a secret from a Key Vault. Currently, 95 Azure resources support Azure Monitoring (See the more information section for a complete list), including Network Security Groups, Load Balancers, Key Vault, AD, Logic Apps, and CosmosDB. The content of these logs varies by resource type. A number of back-end services were not configured to log and store Resource Logs for certain activities or for a sufficient length. It is crucial that monitoring is correctly configured to log all relevant activities and retain those logs for a sufficient length of time. Given that the mean time to detection in an enterprise is 240 days, a minimum retention period of two years is recommended.",
@@ -2486,7 +2562,7 @@
"Checks": [],
"Attributes": [
{
"Section": "5.3 Configuring Application Insights",
"Section": "5. Logging and Monitoring",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The use of Basic or Free SKUs in Azure whilst cost effective have significant limitations in terms of what can be monitored and what support can be realized from Microsoft. Typically, these SKUs do not have a service SLA and Microsoft will usually refuse to provide support for them. Consequently Basic/Free SKUs should never be used for production workloads.",
@@ -2508,7 +2584,8 @@
],
"Attributes": [
{
"Section": "5.3 Configuring Application Insights",
"Section": "5. Logging and Monitoring",
"SubSection": "5.3 Configuring Application Insights",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Application Insights within Azure act as an Application Performance Monitoring solution providing valuable data into how well an application performs and additional information when performing incident response. The types of log data collected include application metrics, telemetry data, and application trace logging data providing organizations with detailed information about application activity and application transactions. Both data sets help organizations adopt a proactive and retroactive means to handle security and performance related metrics within their modern applications.",

View File

@@ -494,7 +494,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults Security Defaults",
"Section": "1.Identity and Access Management",
"SubSection": "1.1 Security Defaults Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Security defaults in Microsoft Entra ID make it easier to be secure and help protect your organization. Security defaults contain preconfigured security settings for common attacks. Security defaults is available to everyone. The goal is to ensure that all organizations have a basic level of security enabled at no extra cost. You may turn on security defaults in the Azure portal.",
@@ -516,7 +517,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults Security Defaults",
"Section": "1.Identity and Access Management",
"SubSection": "1.1 Security Defaults Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Enable multi-factor authentication for all roles, groups, and users that have write access or permissions to Azure resources. These include custom created objects or built-in roles such as; - Service Co-Administrators - Subscription Owners - Contributors",
@@ -538,7 +540,8 @@
],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1.Identity and Access Management",
"SubSection": "1.1 Security Defaults Security Defaults",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable multi-factor authentication for all non-privileged users.",
@@ -558,7 +561,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.1 Security Defaults",
"Section": "1.Identity and Access Management",
"SubSection": "1.1 Security Defaults Security Defaults",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Do not allow users to remember multi-factor authentication on devices.",
@@ -580,7 +584,8 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Microsoft Entra ID Conditional Access allows an organization to configure `Named locations` and configure whether those locations are trusted or untrusted. These settings provide organizations the means to specify Geographical locations for use in conditional access policies, or define actual IP addresses and IP ranges and whether or not those IP addresses and/or ranges are trusted by the organization.",
@@ -600,7 +605,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "**CAUTION**: If these policies are created without first auditing and testing the result, misconfiguration can potentially lock out administrators or create undesired access issues. Conditional Access Policies can be used to block access from geographic locations that are deemed out-of-scope for your organization or application. The scope and variables for this policy should be carefully examined and defined.",
@@ -620,7 +626,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
@@ -640,7 +647,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on logins.",
@@ -660,7 +668,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "For designated users, they will be prompted to use their multi-factor authentication (MFA) process on login.",
@@ -682,7 +691,8 @@
],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "This recommendation ensures that users accessing the Windows Azure Service Management API (i.e. Azure Powershell, Azure CLI, Azure Resource Manager API, etc.) are required to use multifactor authentication (MFA) credentials when accessing resources through the Windows Azure Service Management API.",
@@ -702,7 +712,8 @@
"Checks": [],
"Attributes": [
{
"Section": "1.2 Conditional Access",
"Section": "1.Identity and Access Management",
"SubSection": "1.2 Conditional Access",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "This recommendation ensures that users accessing Microsoft Admin Portals (i.e. Microsoft 365 Admin, Microsoft 365 Defender, Exchange Admin Center, Azure Portal, etc.) are required to use multifactor authentication (MFA) credentials when logging into an Admin Portal.",
@@ -724,7 +735,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Servers enables threat detection for Servers, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -746,7 +758,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for App Service enables threat detection for App Service, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -768,7 +781,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Azure SQL Databases enables threat detection for Managed Instance Azure SQL databases, providing threat intelligence, anomaly detection, and behavior analytics in Microsoft Defender for Cloud.",
@@ -790,7 +804,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for SQL servers on machines enables threat detection for SQL servers on machines, providing threat intelligence, anomaly detection, and behavior analytics in Microsoft Defender for Cloud.",
@@ -812,7 +827,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Open-source relational databases enables threat detection for Open-source relational databases, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -834,7 +850,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Microsoft Defender for Azure Cosmos DB scans all incoming network requests for threats to your Azure Cosmos DB resources.",
@@ -856,7 +873,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Storage enables threat detection for Storage, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -878,7 +896,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Containers enables threat detection for Container Registries including Kubernetes, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud. The following services will be enabled for container instances: - Defender agent in Azure - Azure Policy for Kubernetes - Agentless discovery for Kubernetes - Agentless container vulnerability assessment",
@@ -900,7 +919,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Turning on Microsoft Defender for Key Vault enables threat detection for Key Vault, providing threat intelligence, anomaly detection, and behavior analytics in the Microsoft Defender for Cloud.",
@@ -922,7 +942,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "[**NOTE:** As of August 1, customers with an existing subscription to Defender for DNS can continue to use the service, but new subscribers will receive alerts about suspicious DNS activity as part of Defender for Servers P2.] Microsoft Defender for DNS scans all network traffic exiting from within a subscription.",
@@ -944,7 +965,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Microsoft Defender for Resource Manager scans incoming administrative requests to change your infrastructure from both CLI and the Azure portal.",
@@ -966,7 +988,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that the latest OS patches for all virtual machines are applied.",
@@ -988,7 +1011,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "The Microsoft Cloud Security Benchmark (or MCSB) is an Azure Policy Initiative containing many security policies to evaluate resource configuration against best practice recommendations. If a policy in the MCSB is set with effect type `Disabled`, it is not evaluated and may prevent administrators from being informed of valuable security recommendations.",
@@ -1010,7 +1034,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable automatic provisioning of the monitoring agent to collect security data.",
@@ -1032,7 +1057,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable automatic provisioning of vulnerability assessment for machines on both Azure and hybrid (Arc enabled) machines.",
@@ -1052,7 +1078,8 @@
"Checks": [],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Enable automatic provisioning of the Microsoft Defender for Containers components.",
@@ -1074,7 +1101,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable security alert emails to subscription owners.",
@@ -1096,7 +1124,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Microsoft Defender for Cloud emails the subscription owners whenever a high-severity alert is triggered for their subscription. You should provide a security contact email address as an additional email address.",
@@ -1118,7 +1147,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enables emailing security alerts to the subscription owner or other designated security contact.",
@@ -1140,7 +1170,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "This integration setting enables Microsoft Defender for Cloud Apps (formerly 'Microsoft Cloud App Security' or 'MCAS' - see additional info) to communicate with Microsoft Defender for Cloud.",
@@ -1162,7 +1193,8 @@
],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "This integration setting enables Microsoft Defender for Endpoint (formerly 'Advanced Threat Protection' or 'ATP' or 'WDATP' - see additional info) to communicate with Microsoft Defender for Cloud. **IMPORTANT:** When enabling integration between DfE & DfC it needs to be taken into account that this will have some side effects that may be undesirable. 1. For server 2019 & above if defender is installed (default for these server SKU's) this will trigger a deployment of the new unified agent and link to any of the extended configuration in the Defender portal. 1. If the new unified agent is required for server SKU's of Win 2016 or Linux and lower there is additional integration that needs to be switched on and agents need to be aligned.",
@@ -1182,7 +1214,8 @@
"Checks": [],
"Attributes": [
{
"Section": "2.1 Microsoft Defender for Cloud",
"Section": "2. Microsoft Defender",
"SubSection": "2.1 Microsoft Defender for Cloud",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "An organization's attack surface is the collection of assets with a public network identifier or URI that an external threat actor can see or access from outside your cloud. It is the set of points on the boundary of a system, a system element, system component, or an environment where an attacker can try to enter, cause an effect on, or extract data from, that system, system element, system component, or environment. The larger the attack surface, the harder it is to protect. This tool can be configured to scan your organization's online infrastructure such as specified domains, hosts, CIDR blocks, and SSL certificates, and store them in an Inventory. Inventory items can be added, reviewed, approved, and removed, and may contain enrichments (insights) and additional information collected from the tool's different scan engines and open-source intelligence sources. A Defender EASM workspace will generate an Inventory of publicly exposed assets by crawling and scanning the internet using _Seeds_ you provide when setting up the tool. Seeds can be FQDNs, IP CIDR blocks, and WHOIS records. Defender EASM will generate Insights within 24-48 hours after Seeds are provided, and these insights include vulnerability data (CVEs), ports and protocols, and weak or expired SSL certificates that could be used by an attacker for reconnaisance or exploitation. Results are classified High/Medium/Low and some of them include proposed mitigations.",
@@ -1204,7 +1237,8 @@
],
"Attributes": [
{
"Section": "2.2 Microsoft Defender for IoT",
"Section": "2. Microsoft Defender",
"SubSection": "2.2 Microsoft Defender for IoT",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Microsoft Defender for IoT acts as a central security hub for IoT devices within your organization.",
@@ -1586,7 +1620,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable auditing on SQL Servers.",
@@ -1608,7 +1643,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure that no SQL Databases allow ingress from 0.0.0.0/0 (ANY IP).",
@@ -1630,7 +1666,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Transparent Data Encryption (TDE) with Customer-managed key support provides increased transparency and control over the TDE Protector, increased security with an HSM-backed external service, and promotion of separation of duties. With TDE, data is encrypted at rest with a symmetric key (called the database encryption key) stored in the database or data warehouse distribution. To protect this data encryption key (DEK) in the past, only a certificate that the Azure SQL Service managed could be used. Now, with Customer-managed key support for TDE, the DEK can be protected with an asymmetric key that is stored in the Azure Key Vault. The Azure Key Vault is a highly available and scalable cloud-based key store which offers central key management, leverages FIPS 140-2 Level 2 validated hardware security modules (HSMs), and allows separation of management of keys and data for additional security. Based on business needs or criticality of data/databases hosted on a SQL server, it is recommended that the TDE protector is encrypted by a key that is managed by the data owner (Customer-managed key).",
@@ -1652,7 +1689,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Use Microsoft Entra authentication for authentication with SQL Database to manage credentials in a single place.",
@@ -1674,7 +1712,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable Transparent Data Encryption on every SQL server.",
@@ -1696,7 +1735,8 @@
],
"Attributes": [
{
"Section": "4.1 SQL Server - Auditing",
"Section": "4. Database Services",
"SubSection": "4.1 SQL Server - Auditing",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "SQL Server Audit Retention should be configured to be greater than 90 days.",
@@ -1718,7 +1758,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `SSL connection` on `PostgreSQL` Servers.",
@@ -1740,7 +1781,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `log_checkpoints` on `PostgreSQL Servers`.",
@@ -1762,7 +1804,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `log_connections` on `PostgreSQL Servers`.",
@@ -1784,7 +1827,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `log_disconnections` on `PostgreSQL Servers`.",
@@ -1806,7 +1850,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `connection_throttling` on `PostgreSQL Servers`.",
@@ -1828,7 +1873,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure `log_retention_days` on `PostgreSQL Servers` is set to an appropriate value.",
@@ -1850,7 +1896,8 @@
],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Disable access from Azure services to PostgreSQL Database Server.",
@@ -1870,7 +1917,8 @@
"Checks": [],
"Attributes": [
{
"Section": "4.3 PostgreSQL Database Server. Storage Accounts",
"Section": "4. Database Services",
"SubSection": "4.3 PostgreSQL Database Server. Storage Accounts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Azure Database for PostgreSQL servers should be created with 'infrastructure double encryption' enabled.",
@@ -1892,7 +1940,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable `SSL connection` on `MYSQL` Servers.",
@@ -1914,7 +1963,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure `TLS version` on `MySQL flexible` servers is set to use TLS version 1.2 or higher.",
@@ -1936,7 +1986,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable audit_log_enabled on MySQL Servers.",
@@ -1958,7 +2009,8 @@
],
"Attributes": [
{
"Section": "4.4 MySQL Database",
"Section": "4. Database Services",
"SubSection": "4.4 MySQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Set `audit_log_enabled` to include CONNECTION on MySQL Servers.",
@@ -1980,7 +2032,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Limiting your Cosmos DB to only communicate on whitelisted networks lowers its attack footprint.",
@@ -2002,7 +2055,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Private endpoints limit network traffic to approved sources.",
@@ -2024,7 +2078,8 @@
],
"Attributes": [
{
"Section": "4.5 Cosmos DB",
"Section": "4. Database Services",
"SubSection": "4.5 Cosmos DB",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Cosmos DB can use tokens or Entra ID for client authentication which in turn will use Azure RBAC for authorization. Using Entra ID is significantly more secure because Entra ID handles the credentials and allows for MFA and centralized management, and the Azure RBAC better integrated with the rest of Azure.",
@@ -2086,7 +2141,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "Enable Diagnostic settings for exporting activity logs. Diagnostic settings are available for each individual resource within a subscription. Settings should be configured for all appropriate resources for your environment.",
@@ -2108,7 +2164,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "**Prerequisite**: A Diagnostic Setting must exist. If a Diagnostic Setting does not exist, the navigation and options within this recommendation will not be available. Please review the recommendation at the beginning of this subsection titled: Ensure that a 'Diagnostic Setting' exists. The diagnostic setting should be configured to log the appropriate activities from the control/management plane.",
@@ -2130,7 +2187,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Storage accounts with the activity log exports can be configured to use Customer Managed Keys (CMK).",
@@ -2152,7 +2210,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enable AuditEvent logging for key vault instances to ensure interactions with key vaults are logged and available.",
@@ -2174,7 +2233,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Ensure that network flow logs are captured and fed into a central log analytics workspace.",
@@ -2196,7 +2256,8 @@
],
"Attributes": [
{
"Section": "5.1 Configuring Diagnostic Settings",
"Section": "5. Logging and Monitoring",
"SubSection": "5.1 Configuring Diagnostic Settings",
"Profile": "Level 2",
"AssessmentStatus": "Manual",
"Description": "Enable AppServiceHTTPLogs diagnostic log category for Azure App Service instances to ensure all http requests are captured and centrally logged.",
@@ -2218,7 +2279,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create Policy Assignment event.",
@@ -2240,7 +2302,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Policy Assignment event.",
@@ -2262,7 +2325,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an Activity Log Alert for the Create or Update Network Security Group event.",
@@ -2284,7 +2348,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Network Security Group event.",
@@ -2306,7 +2371,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update Security Solution event.",
@@ -2328,7 +2394,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Security Solution event.",
@@ -2350,7 +2417,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update SQL Server Firewall Rule event.",
@@ -2372,7 +2440,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete SQL Server Firewall Rule.",
@@ -2394,7 +2463,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Create or Update Public IP Addresses rule.",
@@ -2416,7 +2486,8 @@
],
"Attributes": [
{
"Section": "5.2 Monitoring using Activity Log Alerts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.2 Monitoring using Activity Log Alerts",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Create an activity log alert for the Delete Public IP Address rule.",
@@ -2438,7 +2509,8 @@
],
"Attributes": [
{
"Section": "5.3 Configuring Application Insights. Storage Accounts",
"Section": "5. Logging and Monitoring",
"SubSection": "5.3 Configuring Application Insights. Storage Accounts",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "Application Insights within Azure act as an Application Performance Monitoring solution providing valuable data into how well an application performs and additional information when performing incident response. The types of log data collected include application metrics, telemetry data, and application trace logging data providing organizations with detailed information about application activity and application transactions. Both data sets help organizations adopt a proactive and retroactive means to handle security and performance related metrics within their modern applications.",
@@ -3043,7 +3115,9 @@
{
"Id": "9.4",
"Description": "Ensure that Register with Entra ID is enabled on App Service",
"Checks": [],
"Checks": [
"app_register_with_identity"
],
"Attributes": [
{
"Section": "9. AppService",

File diff suppressed because one or more lines are too long

View File

@@ -1292,7 +1292,8 @@
"Checks": [],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances. This recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.",
@@ -1313,7 +1314,8 @@
],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`",
@@ -1334,7 +1336,8 @@
],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.",
@@ -1355,7 +1358,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are: - `TERSE` - `DEFAULT` - `VERBOSE` `TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information. `VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error. Ensure an appropriate value is set to 'DEFAULT' or stricter.",
@@ -1376,7 +1380,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`. Each severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.",
@@ -1397,7 +1402,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are: - `none` - `ddl` - `mod` - `all` The value `ddl` logs all data definition statements. The value `mod` logs all ddl statements, plus data-modifying statements. The statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included. A value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.",
@@ -1418,7 +1424,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Instance addresses can be public IP or private IP. Public IP means that the instance is accessible through the public internet. In contrast, instances using only private IP are not accessible through the public internet, but are accessible through a Virtual Private Cloud (VPC). Limiting network access to your database will limit potential attacks.",
@@ -1439,7 +1446,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.",
@@ -1460,7 +1468,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.",
@@ -1481,7 +1490,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.",
@@ -1502,7 +1512,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.",
@@ -1523,7 +1534,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`. Each severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.",
@@ -1544,7 +1556,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.",
@@ -1565,7 +1578,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`",
@@ -1586,7 +1600,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.",
@@ -1607,7 +1622,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.",
@@ -1628,7 +1644,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.",
@@ -1649,7 +1666,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `off`.",
@@ -1670,7 +1688,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.",

View File

@@ -1330,7 +1330,8 @@
"Checks": [],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Manual",
"Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances.This recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.",
@@ -1352,7 +1353,8 @@
],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`",
@@ -1374,7 +1376,8 @@
],
"Attributes": [
{
"Section": "6.1. MySQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.1. MySQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.",
@@ -1396,7 +1399,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are:- `TERSE`- `DEFAULT`- `VERBOSE``TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information.`VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error.Ensure an appropriate value is set to 'DEFAULT' or stricter.",
@@ -1418,7 +1422,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.",
@@ -1440,7 +1445,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.",
@@ -1462,7 +1468,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 2",
"AssessmentStatus": "Automated",
"Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are:- `none`- `ddl`- `mod`- `all`The value `ddl` logs all data definition statements.The value `mod` logs all ddl statements, plus data-modifying statements.The statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included.A value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.",
@@ -1484,7 +1491,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include (from lowest to highest severity) `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.Each severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.",
@@ -1506,7 +1514,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include (from lowest to highest severity) `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.Each severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.",
@@ -1528,7 +1537,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.",
@@ -1550,7 +1560,8 @@
],
"Attributes": [
{
"Section": "6.2. PostgreSQL Database",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.2. PostgreSQL Database",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.",
@@ -1572,7 +1583,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`",
@@ -1594,7 +1606,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.This flag is deprecated for all SQL Server versions in CGP. Going forward, you can't set its value to on. However, if you have this flag enabled, we strongly recommend that you either remove the flag from your database or set it to off. For cross-database access, use the [Microsoft tutorial for signing stored procedures with a certificate](https://learn.microsoft.com/en-us/sql/relational-databases/tutorial-signing-stored-procedures-with-a-certificate?view=sql-server-ver16).",
@@ -1616,7 +1629,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.",
@@ -1638,7 +1652,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.",
@@ -1660,7 +1675,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.",
@@ -1682,7 +1698,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.",
@@ -1704,7 +1721,8 @@
],
"Attributes": [
{
"Section": "6.3. SQL Server",
"Section": "6. Cloud SQL Database Services",
"SubSection": "6.3. SQL Server",
"Profile": "Level 1",
"AssessmentStatus": "Automated",
"Description": "It is recommended not to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `on`.",

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,7 @@ from prowler.lib.logger import logger
timestamp = datetime.today()
timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc)
prowler_version = "5.1.0"
prowler_version = "5.1.5"
html_logo_url = "https://github.com/prowler-cloud/prowler/"
square_logo_img = "https://prowler.com/wp-content/uploads/logo-html.png"
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"

View File

@@ -83,6 +83,7 @@ class CIS_Requirement_Attribute(BaseModel):
"""CIS Requirement Attribute"""
Section: str
SubSection: Optional[str]
Profile: CIS_Requirement_Attribute_Profile
AssessmentStatus: CIS_Requirement_Attribute_AssessmentStatus
Description: str

View File

@@ -48,6 +48,7 @@ class AWSCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -78,6 +79,7 @@ class AWSCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,

View File

@@ -48,6 +48,7 @@ class AzureCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -79,6 +80,7 @@ class AzureCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,

View File

@@ -48,6 +48,7 @@ class GCPCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -78,6 +79,7 @@ class GCPCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,

View File

@@ -50,6 +50,7 @@ class KubernetesCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,
@@ -81,6 +82,7 @@ class KubernetesCIS(ComplianceOutput):
Requirements_Id=requirement.Id,
Requirements_Description=requirement.Description,
Requirements_Attributes_Section=attribute.Section,
Requirements_Attributes_SubSection=attribute.SubSection,
Requirements_Attributes_Profile=attribute.Profile,
Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus,
Requirements_Attributes_Description=attribute.Description,

View File

@@ -1,3 +1,5 @@
from typing import Optional
from pydantic import BaseModel
@@ -14,6 +16,7 @@ class AWSCISModel(BaseModel):
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str]
Requirements_Attributes_Profile: str
Requirements_Attributes_AssessmentStatus: str
Requirements_Attributes_Description: str
@@ -44,6 +47,7 @@ class AzureCISModel(BaseModel):
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str]
Requirements_Attributes_Profile: str
Requirements_Attributes_AssessmentStatus: str
Requirements_Attributes_Description: str
@@ -75,6 +79,7 @@ class GCPCISModel(BaseModel):
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str]
Requirements_Attributes_Profile: str
Requirements_Attributes_AssessmentStatus: str
Requirements_Attributes_Description: str
@@ -105,6 +110,7 @@ class KubernetesCISModel(BaseModel):
Requirements_Id: str
Requirements_Description: str
Requirements_Attributes_Section: str
Requirements_Attributes_SubSection: Optional[str]
Requirements_Attributes_Profile: str
Requirements_Attributes_AssessmentStatus: str
Requirements_Attributes_Description: str

View File

@@ -1,4 +1,5 @@
import os
from datetime import datetime
from typing import List
from py_ocsf_models.events.base_event import SeverityID, StatusID
@@ -68,7 +69,11 @@ class OCSF(Output):
activity_name=finding_activity.name,
finding_info=FindingInformation(
created_time_dt=finding.timestamp,
created_time=int(finding.timestamp.timestamp()),
created_time=(
int(finding.timestamp.timestamp())
if isinstance(finding.timestamp, datetime)
else finding.timestamp
),
desc=finding.metadata.Description,
title=finding.metadata.CheckTitle,
uid=finding.uid,
@@ -77,7 +82,11 @@ class OCSF(Output):
types=finding.metadata.CheckType,
),
time_dt=finding.timestamp,
time=int(finding.timestamp.timestamp()),
time=(
int(finding.timestamp.timestamp())
if isinstance(finding.timestamp, datetime)
else finding.timestamp
),
remediation=Remediation(
desc=finding.metadata.Remediation.Recommendation.Text,
references=list(

View File

@@ -1,5 +1,6 @@
from typing import Optional
from botocore.exceptions import ClientError
from pydantic import BaseModel
from prowler.lib.logger import logger
@@ -7,7 +8,6 @@ from prowler.lib.scan_filters.scan_filters import is_resource_filtered
from prowler.providers.aws.lib.service.service import AWSService
################## ApiGatewayV2
class ApiGatewayV2(AWSService):
def __init__(self, provider):
# Call AWSService's __init__
@@ -71,6 +71,15 @@ class ApiGatewayV2(AWSService):
tags=[stage.get("Tags")],
)
)
except ClientError as error:
if error.response["Error"]["Code"] == "NotFoundException":
logger.warning(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
else:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"

View File

@@ -1,3 +1,4 @@
import hashlib
import json
from prowler.lib.check.models import Check, Check_Report_AWS
@@ -28,11 +29,19 @@ class awslambda_function_no_secrets_in_variables(Check):
data=json.dumps(function.environment, indent=2),
excluded_secrets=secrets_ignore_patterns,
)
original_env_vars = {}
for name, value in function.environment.items():
original_env_vars.update(
{
hashlib.sha1( # nosec B324 SHA1 is used here for non-security-critical unique identifiers
value.encode("utf-8")
).hexdigest(): name
}
)
if detect_secrets_output:
environment_variable_names = list(function.environment.keys())
secrets_string = ", ".join(
[
f"{secret['type']} in variable {environment_variable_names[int(secret['line_number']) - 2]}"
f"{secret['type']} in variable {original_env_vars[secret['hashed_secret']]}"
for secret in detect_secrets_output
]
)

View File

@@ -65,6 +65,17 @@ class DocumentDB(AWSService):
def _list_tags_for_resource(self):
logger.info("DocumentDB - List Tags...")
try:
for cluster_arn, cluster in self.db_clusters.items():
try:
regional_client = self.regional_clients[cluster.region]
response = regional_client.list_tags_for_resource(
ResourceName=cluster_arn
)["TagList"]
cluster.tags = response
except Exception as error:
logger.error(
f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
for instance_arn, instance in self.db_instances.items():
try:
regional_client = self.regional_clients[instance.region]

View File

@@ -1,3 +1,4 @@
import hashlib
from json import dumps
from prowler.lib.check.models import Check, Check_Report_AWS
@@ -25,8 +26,16 @@ class ecs_task_definitions_no_environment_secrets(Check):
if container.environment:
dump_env_vars = {}
original_env_vars = {}
for env_var in container.environment:
dump_env_vars.update({env_var.name: env_var.value})
original_env_vars.update(
{
hashlib.sha1( # nosec B324 SHA1 is used here for non-security-critical unique identifiers
env_var.value.encode("utf-8")
).hexdigest(): env_var.name
}
)
env_data = dumps(dump_env_vars, indent=2)
detect_secrets_output = detect_secrets_scan(
@@ -35,7 +44,7 @@ class ecs_task_definitions_no_environment_secrets(Check):
if detect_secrets_output:
secrets_string = ", ".join(
[
f"{secret['type']} on line {secret['line_number']}"
f"{secret['type']} on the environment variable {original_env_vars[secret['hashed_secret']]}"
for secret in detect_secrets_output
]
)

View File

@@ -12,6 +12,8 @@ class sqlserver_tde_encryption_enabled(Check):
)
if len(databases) > 0:
for database in databases:
if database.name.lower() == "master":
continue
report = Check_Report_Azure(self.metadata())
report.subscription = subscription
report.resource_name = database.name

View File

@@ -23,7 +23,7 @@ packages = [
{include = "dashboard"}
]
readme = "README.md"
version = "5.1.0"
version = "5.1.5"
[tool.poetry.dependencies]
alive-progress = "3.2.0"

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -42,6 +42,10 @@ class TestKubernetesCIS:
output_data.Requirements_Attributes_Section
== CIS_1_8_KUBERNETES.Requirements[0].Attributes[0].Section
)
assert (
output_data.Requirements_Attributes_SubSection
== CIS_1_8_KUBERNETES.Requirements[0].Attributes[0].SubSection
)
assert (
output_data.Requirements_Attributes_Profile
== CIS_1_8_KUBERNETES.Requirements[0].Attributes[0].Profile
@@ -105,6 +109,10 @@ class TestKubernetesCIS:
output_data_manual.Requirements_Attributes_Section
== CIS_1_8_KUBERNETES.Requirements[1].Attributes[0].Section
)
assert (
output_data.Requirements_Attributes_SubSection
== CIS_1_8_KUBERNETES.Requirements[0].Attributes[0].SubSection
)
assert (
output_data_manual.Requirements_Attributes_Profile
== CIS_1_8_KUBERNETES.Requirements[1].Attributes[0].Profile
@@ -173,5 +181,5 @@ class TestKubernetesCIS:
mock_file.seek(0)
content = mock_file.read()
expected_csv = f"PROVIDER;DESCRIPTION;CONTEXT;NAMESPACE;ASSESSMENTDATE;REQUIREMENTS_ID;REQUIREMENTS_DESCRIPTION;REQUIREMENTS_ATTRIBUTES_SECTION;REQUIREMENTS_ATTRIBUTES_PROFILE;REQUIREMENTS_ATTRIBUTES_ASSESSMENTSTATUS;REQUIREMENTS_ATTRIBUTES_DESCRIPTION;REQUIREMENTS_ATTRIBUTES_RATIONALESTATEMENT;REQUIREMENTS_ATTRIBUTES_IMPACTSTATEMENT;REQUIREMENTS_ATTRIBUTES_REMEDIATIONPROCEDURE;REQUIREMENTS_ATTRIBUTES_AUDITPROCEDURE;REQUIREMENTS_ATTRIBUTES_ADDITIONALINFORMATION;REQUIREMENTS_ATTRIBUTES_REFERENCES;REQUIREMENTS_ATTRIBUTES_DEFAULTVALUE;STATUS;STATUSEXTENDED;RESOURCEID;RESOURCENAME;CHECKID;MUTED\r\nkubernetes;This CIS Kubernetes Benchmark provides prescriptive guidance for establishing a secure configuration posture for Kubernetes v1.27.;test-cluster;test-namespace;{datetime.now()};1.1.3;Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive;1.1 Control Plane Node Configuration Files;Level 1 - Master Node;Automated;Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.;The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.;;Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml ```;Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml ``` Verify that the permissions are `600` or more restrictive.;;https://kubernetes.io/docs/admin/kube-apiserver/;By default, the `kube-controller-manager.yaml` file has permissions of `640`.;PASS;;;;test-check-id;False\r\nkubernetes;This CIS Kubernetes Benchmark provides prescriptive guidance for establishing a secure configuration posture for Kubernetes v1.27.;;;{datetime.now()};1.1.4;Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive;1.1 Control Plane Node Configuration Files;Level 1 - Master Node;Automated;Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.;The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.;;Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml ```;Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml ``` Verify that the permissions are `600` or more restrictive.;;https://kubernetes.io/docs/admin/kube-apiserver/;By default, the `kube-controller-manager.yaml` file has permissions of `640`.;MANUAL;Manual check;manual_check;Manual check;manual;False\r\n"
expected_csv = f"PROVIDER;DESCRIPTION;CONTEXT;NAMESPACE;ASSESSMENTDATE;REQUIREMENTS_ID;REQUIREMENTS_DESCRIPTION;REQUIREMENTS_ATTRIBUTES_SECTION;REQUIREMENTS_ATTRIBUTES_SUBSECTION;REQUIREMENTS_ATTRIBUTES_PROFILE;REQUIREMENTS_ATTRIBUTES_ASSESSMENTSTATUS;REQUIREMENTS_ATTRIBUTES_DESCRIPTION;REQUIREMENTS_ATTRIBUTES_RATIONALESTATEMENT;REQUIREMENTS_ATTRIBUTES_IMPACTSTATEMENT;REQUIREMENTS_ATTRIBUTES_REMEDIATIONPROCEDURE;REQUIREMENTS_ATTRIBUTES_AUDITPROCEDURE;REQUIREMENTS_ATTRIBUTES_ADDITIONALINFORMATION;REQUIREMENTS_ATTRIBUTES_REFERENCES;REQUIREMENTS_ATTRIBUTES_DEFAULTVALUE;STATUS;STATUSEXTENDED;RESOURCEID;RESOURCENAME;CHECKID;MUTED\r\nkubernetes;This CIS Kubernetes Benchmark provides prescriptive guidance for establishing a secure configuration posture for Kubernetes v1.27.;test-cluster;test-namespace;{datetime.now()};1.1.3;Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive;1. Control Plane;1.1 Control Plane Node Configuration Files;Level 1 - Master Node;Automated;Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.;The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.;;Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml ```;Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml ``` Verify that the permissions are `600` or more restrictive.;;https://kubernetes.io/docs/admin/kube-apiserver/;By default, the `kube-controller-manager.yaml` file has permissions of `640`.;PASS;;;;test-check-id;False\r\nkubernetes;This CIS Kubernetes Benchmark provides prescriptive guidance for establishing a secure configuration posture for Kubernetes v1.27.;;;{datetime.now()};1.1.4;Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive;1.1 Control Plane Node Configuration Files;;Level 1 - Master Node;Automated;Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.;The controller manager pod specification file controls various parameters that set the behavior of the Controller Manager on the master node. You should restrict its file permissions to maintain the integrity of the file. The file should be writable by only the administrators on the system.;;Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` chmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml ```;Run the below command (based on the file location on your system) on the Control Plane node. For example, ``` stat -c %a /etc/kubernetes/manifests/kube-controller-manager.yaml ``` Verify that the permissions are `600` or more restrictive.;;https://kubernetes.io/docs/admin/kube-apiserver/;By default, the `kube-controller-manager.yaml` file has permissions of `640`.;MANUAL;Manual check;manual_check;Manual check;manual;False\r\n"
assert content == expected_csv

View File

@@ -28,7 +28,8 @@ CIS_1_4_AWS = Compliance(
Description="Ensure MFA Delete is enabled on S3 buckets",
Attributes=[
CIS_Requirement_Attribute(
Section="2.1. Simple Storage Service (S3)",
Section="2. Storage",
SubSection="2.1. Simple Storage Service (S3)",
Profile="Level 1",
AssessmentStatus="Automated",
Description="Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
@@ -47,7 +48,8 @@ CIS_1_4_AWS = Compliance(
Description="Ensure MFA Delete is enabled on S3 buckets",
Attributes=[
CIS_Requirement_Attribute(
Section="2.1. Simple Storage Service (S3)",
Section="2. Storage",
SubSection="2.1. Simple Storage Service (S3)",
Profile="Level 1",
AssessmentStatus="Automated",
Description="Once MFA Delete is enabled on your sensitive and classified S3 bucket it requires the user to have two forms of authentication.",
@@ -75,7 +77,8 @@ CIS_2_0_AZURE = Compliance(
Description="Ensure That Microsoft Defender for Databases Is Set To 'On'",
Attributes=[
CIS_Requirement_Attribute(
Section="2.1 Microsoft Defender for Cloud",
Section="2. Defender",
SubSection="2.1 Microsoft Defender for Cloud",
Profile="Level 2",
AssessmentStatus="Manual",
Description="Turning on Microsoft Defender for Databases enables threat detection for the instances running your database software. This provides threat intelligence, anomaly detection, and behavior analytics in the Azure Microsoft Defender for Cloud. Instead of being enabled on services like Platform as a Service (PaaS), this implementation will run within your instances as Infrastructure as a Service (IaaS) on the Operating Systems hosting your databases.",
@@ -95,7 +98,8 @@ CIS_2_0_AZURE = Compliance(
Description="Ensure That Microsoft Defender for Databases Is Set To 'On'",
Attributes=[
CIS_Requirement_Attribute(
Section="2.1 Microsoft Defender for Cloud",
Section="2. Defender",
SubSection="2.1 Microsoft Defender for Cloud",
Profile="Level 2",
AssessmentStatus="Manual",
Description="Turning on Microsoft Defender for Databases enables threat detection for the instances running your database software. This provides threat intelligence, anomaly detection, and behavior analytics in the Azure Microsoft Defender for Cloud. Instead of being enabled on services like Platform as a Service (PaaS), this implementation will run within your instances as Infrastructure as a Service (IaaS) on the Operating Systems hosting your databases.",
@@ -124,7 +128,8 @@ CIS_2_0_GCP = Compliance(
Description="Ensure That Microsoft Defender for Databases Is Set To 'On'",
Attributes=[
CIS_Requirement_Attribute(
Section="2. Logging and Monitoring",
Section="2. Logging",
SubSection="2.1. Logging and Monitoring",
Profile="Level 1",
AssessmentStatus="Automated",
Description="GCP Cloud Asset Inventory is services that provides a historical view of GCP resources and IAM policies through a time-series database. The information recorded includes metadata on Google Cloud resources, metadata on policies set on Google Cloud projects or resources, and runtime information gathered within a Google Cloud resource.",
@@ -143,7 +148,7 @@ CIS_2_0_GCP = Compliance(
Description="Ensure That Microsoft Defender for Databases Is Set To 'On'",
Attributes=[
CIS_Requirement_Attribute(
Section="2. Logging and Monitoring",
Section="2. Logging",
Profile="Level 1",
AssessmentStatus="Automated",
Description="GCP Cloud Asset Inventory is services that provides a historical view of GCP resources and IAM policies through a time-series database. The information recorded includes metadata on Google Cloud resources, metadata on policies set on Google Cloud projects or resources, and runtime information gathered within a Google Cloud resource.",
@@ -171,7 +176,8 @@ CIS_1_8_KUBERNETES = Compliance(
Description="Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive",
Attributes=[
CIS_Requirement_Attribute(
Section="1.1 Control Plane Node Configuration Files",
Section="1. Control Plane",
SubSection="1.1 Control Plane Node Configuration Files",
Profile="Level 1 - Master Node",
AssessmentStatus="Automated",
Description="Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.",

View File

@@ -1,4 +1,5 @@
from datetime import datetime
from typing import Union
from prowler.config.config import prowler_version
from prowler.lib.check.models import CheckMetadata, Code, Recommendation, Remediation
@@ -19,7 +20,7 @@ def generate_finding_output(
resource_name: str = "",
resource_tags: dict = {},
compliance: dict = {"test-compliance": "test-compliance"},
timestamp: datetime = None,
timestamp: Union[int, datetime] = None,
provider: str = "aws",
partition: str = "aws",
description: str = "check description",

View File

@@ -1,5 +1,5 @@
import json
from datetime import datetime
from datetime import datetime, timezone
from io import StringIO
import requests
@@ -36,7 +36,15 @@ class TestOCSF:
muted=False,
region=AWS_REGION_EU_WEST_1,
resource_tags={"Name": "test", "Environment": "dev"},
)
),
# Test with int timestamp (UNIX timestamp)
generate_finding_output(
status="FAIL",
severity="medium",
muted=False,
region=AWS_REGION_EU_WEST_1,
timestamp=1619600000,
),
]
ocsf = OCSF(findings)
@@ -101,6 +109,14 @@ class TestOCSF:
"compliance": findings[0].compliance,
}
# Test with int timestamp (UNIX timestamp)
output_data = ocsf.data[1]
assert output_data.time == 1619600000
assert output_data.time_dt == datetime.fromtimestamp(
1619600000, tz=timezone.utc
)
def test_validate_ocsf(self):
mock_file = StringIO()
findings = [

View File

@@ -25,6 +25,16 @@ def mock_make_api_call(self, operation_name, kwarg):
if operation_name == "GetAuthorizers":
return {"Items": [{"AuthorizerId": "authorizer-id", "Name": "test-authorizer"}]}
elif operation_name == "GetStages":
if kwarg["ApiId"] == "not-found-api":
raise botocore.exceptions.ClientError(
{
"Error": {
"Code": "NotFoundException",
"Message": "API not found",
}
},
"GetStages",
)
return {
"Items": [
{
@@ -120,3 +130,24 @@ class Test_ApiGatewayV2_Service:
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
apigatewayv2 = ApiGatewayV2(aws_provider)
assert apigatewayv2.apis[0].stages[0].logging is True
# Test ApiGatewayV2 Get Stages with NotFoundException
@mock_aws
@patch("prowler.providers.aws.services.apigatewayv2.apigatewayv2_service.logger")
def test_get_stages_not_found_exception(self, mock_logger):
# Generate ApiGatewayV2 Client
apigatewayv2_client = client("apigatewayv2", region_name=AWS_REGION_US_EAST_1)
# Create ApiGatewayV2 Rest API
apigatewayv2_client.create_api(Name="test-api", ProtocolType="HTTP")
aws_provider = set_mocked_aws_provider([AWS_REGION_US_EAST_1])
apigatewayv2 = ApiGatewayV2(aws_provider)
# Force API ID to trigger NotFoundException
apigatewayv2.apis[0].id = "not-found-api"
# Call _get_stages to trigger the exception
apigatewayv2._get_stages()
mock_logger.warning.assert_called_once()
assert "NotFoundException" in mock_logger.warning.call_args[0][0]

View File

@@ -179,7 +179,7 @@ class Test_DocumentDB_Service:
parameter_group="default.docdb3.6",
deletion_protection=True,
region=AWS_REGION_US_EAST_1,
tags=[],
tags=[{"Key": "environment", "Value": "test"}],
)
}

View File

@@ -132,7 +132,7 @@ class Test_ecs_task_definitions_no_environment_secrets:
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"Potential secrets found in ECS task definition {TASK_NAME} with revision {TASK_REVISION}: Secrets in container test-container -> Secret Keyword on line 2."
== f"Potential secrets found in ECS task definition {TASK_NAME} with revision {TASK_REVISION}: Secrets in container test-container -> Secret Keyword on the environment variable DB_PASSWORD."
)
assert result[0].resource_id == f"{TASK_NAME}:{TASK_REVISION}"
assert result[0].resource_arn == task_arn

View File

@@ -137,7 +137,10 @@ class Test_sqs_queues_not_publicly_accessible:
)
with mock.patch(
"prowler.providers.aws.services.sqs.sqs_service.SQS",
sqs_client,
new=sqs_client,
), mock.patch(
"prowler.providers.aws.services.sqs.sqs_client.sqs_client",
new=sqs_client,
):
from prowler.providers.aws.services.sqs.sqs_queues_not_publicly_accessible.sqs_queues_not_publicly_accessible import (
sqs_queues_not_publicly_accessible,

View File

@@ -177,3 +177,68 @@ class Test_sqlserver_tde_encryption_enabled:
assert result[0].resource_name == database_name
assert result[0].resource_id == database_id
assert result[0].location == "location"
def test_sql_servers_database_encryption_disabled_on_master_db(self):
sqlserver_client = mock.MagicMock
sql_server_name = "SQL Server Name"
sql_server_id = str(uuid4())
database_master_name = "MASTER"
database_master_id = str(uuid4())
database_master = Database(
id=database_master_id,
name=database_master_name,
type="type",
location="location",
managed_by="managed_by",
tde_encryption=TransparentDataEncryption(status="Disabled"),
)
database_name = "Database Name"
database_id = str(uuid4())
database = Database(
id=database_id,
name=database_name,
type="type",
location="location",
managed_by="managed_by",
tde_encryption=TransparentDataEncryption(status="Enabled"),
)
sqlserver_client.sql_servers = {
AZURE_SUBSCRIPTION_ID: [
Server(
id=sql_server_id,
name=sql_server_name,
public_network_access="",
minimal_tls_version="",
administrators=None,
auditing_policies=None,
firewall_rules=None,
databases=[database_master, database],
encryption_protector=None,
location="location",
)
]
}
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_azure_provider(),
), mock.patch(
"prowler.providers.azure.services.sqlserver.sqlserver_tde_encryption_enabled.sqlserver_tde_encryption_enabled.sqlserver_client",
new=sqlserver_client,
):
from prowler.providers.azure.services.sqlserver.sqlserver_tde_encryption_enabled.sqlserver_tde_encryption_enabled import (
sqlserver_tde_encryption_enabled,
)
check = sqlserver_tde_encryption_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"Database {database_name} from SQL Server {sql_server_name} from subscription {AZURE_SUBSCRIPTION_ID} has TDE enabled"
)
assert result[0].subscription == AZURE_SUBSCRIPTION_ID
assert result[0].resource_name == database_name
assert result[0].resource_id == database_id
assert result[0].location == "location"

View File

@@ -1,13 +1,18 @@
from re import search
from unittest import mock
from tests.providers.gcp.gcp_fixtures import GCP_PROJECT_ID, set_mocked_gcp_provider
from tests.providers.gcp.gcp_fixtures import (
GCP_EU1_LOCATION,
GCP_PROJECT_ID,
set_mocked_gcp_provider,
)
class Test_dns_dnssec_disabled:
def test_dns_no_managed_zones(self):
dns_client = mock.MagicMock
dns_client.managed_zones = []
dns_client.region = GCP_EU1_LOCATION
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
@@ -51,6 +56,7 @@ class Test_dns_dnssec_disabled:
dns_client = mock.MagicMock
dns_client.project_ids = [GCP_PROJECT_ID]
dns_client.managed_zones = [managed_zone]
dns_client.region = GCP_EU1_LOCATION
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
@@ -101,6 +107,7 @@ class Test_dns_dnssec_disabled:
dns_client = mock.MagicMock
dns_client.project_ids = [GCP_PROJECT_ID]
dns_client.managed_zones = [managed_zone]
dns_client.region = GCP_EU1_LOCATION
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",

View File

@@ -1,13 +1,18 @@
from re import search
from unittest import mock
from tests.providers.gcp.gcp_fixtures import GCP_PROJECT_ID, set_mocked_gcp_provider
from tests.providers.gcp.gcp_fixtures import (
GCP_EU1_LOCATION,
GCP_PROJECT_ID,
set_mocked_gcp_provider,
)
class Test_dns_rsasha1_in_use_to_key_sign_in_dnssec:
def test_dns_no_managed_zones(self):
dns_client = mock.MagicMock
dns_client.managed_zones = []
dns_client.region = GCP_EU1_LOCATION
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
@@ -51,6 +56,7 @@ class Test_dns_rsasha1_in_use_to_key_sign_in_dnssec:
dns_client = mock.MagicMock
dns_client.project_ids = [GCP_PROJECT_ID]
dns_client.managed_zones = [managed_zone]
dns_client.region = GCP_EU1_LOCATION
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
@@ -101,6 +107,7 @@ class Test_dns_rsasha1_in_use_to_key_sign_in_dnssec:
dns_client = mock.MagicMock
dns_client.project_ids = [GCP_PROJECT_ID]
dns_client.managed_zones = [managed_zone]
dns_client.region = GCP_EU1_LOCATION
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",

View File

@@ -1,13 +1,18 @@
from re import search
from unittest import mock
from tests.providers.gcp.gcp_fixtures import GCP_PROJECT_ID, set_mocked_gcp_provider
from tests.providers.gcp.gcp_fixtures import (
GCP_EU1_LOCATION,
GCP_PROJECT_ID,
set_mocked_gcp_provider,
)
class Test_dns_rsasha1_in_use_to_zone_sign_in_dnssec:
def test_dns_no_managed_zones(self):
dns_client = mock.MagicMock
dns_client.managed_zones = []
dns_client.region = GCP_EU1_LOCATION
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
@@ -51,6 +56,7 @@ class Test_dns_rsasha1_in_use_to_zone_sign_in_dnssec:
dns_client = mock.MagicMock
dns_client.project_ids = [GCP_PROJECT_ID]
dns_client.managed_zones = [managed_zone]
dns_client.region = GCP_EU1_LOCATION
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
@@ -101,6 +107,7 @@ class Test_dns_rsasha1_in_use_to_zone_sign_in_dnssec:
dns_client = mock.MagicMock
dns_client.project_ids = [GCP_PROJECT_ID]
dns_client.managed_zones = [managed_zone]
dns_client.region = GCP_EU1_LOCATION
with mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",

View File

@@ -28,9 +28,7 @@ export const getFindings = async ({
if (sort) url.searchParams.append("sort", sort);
Object.entries(filters).forEach(([key, value]) => {
if (key !== "filter[search]") {
url.searchParams.append(key, String(value));
}
url.searchParams.append(key, String(value));
});
try {
@@ -51,7 +49,7 @@ export const getFindings = async ({
}
};
export const getServicesRegions = async ({
export const getMetadataInfo = async ({
query = "",
sort = "",
filters = {},
@@ -59,13 +57,18 @@ export const getServicesRegions = async ({
const session = await auth();
const keyServer = process.env.API_BASE_URL;
const url = new URL(`${keyServer}/findings/findings_services_regions`);
const url = new URL(`${keyServer}/findings/metadata`);
if (query) url.searchParams.append("filter[search]", query);
if (sort) url.searchParams.append("sort", sort);
Object.entries(filters).forEach(([key, value]) => {
if (key !== "filter[search]") {
// Define filters to exclude
const excludedFilters = ["region__in", "service__in", "resource_type__in"];
if (
key !== "filter[search]" &&
!excludedFilters.some((filter) => key.includes(filter))
) {
url.searchParams.append(key, String(value));
}
});

View File

@@ -158,10 +158,10 @@ export const updateProviderGroup = async (
const providersJson = formData.get("providers") as string;
const rolesJson = formData.get("roles") as string;
const providers = providersJson ? JSON.parse(providersJson) : [];
const roles = rolesJson ? JSON.parse(rolesJson) : [];
const providers = providersJson ? JSON.parse(providersJson) : null;
const roles = rolesJson ? JSON.parse(rolesJson) : null;
const payload: ManageGroupPayload = {
const payload: Partial<ManageGroupPayload> = {
data: {
type: "provider-groups",
id: providerGroupId,
@@ -170,14 +170,15 @@ export const updateProviderGroup = async (
},
};
// Add relationships only if there are items
if (providers.length >= 0) {
payload.data.relationships!.providers = { data: providers };
// Add relationships only if changes are detected
if (providers) {
payload.data!.relationships!.providers = { data: providers };
}
if (roles.length >= 0) {
payload.data.relationships!.roles = { data: roles };
if (roles) {
payload.data!.relationships!.roles = { data: roles };
}
try {
const url = `${keyServer}/provider-groups/${providerGroupId}`;
const response = await fetch(url, {
@@ -191,9 +192,6 @@ export const updateProviderGroup = async (
});
if (!response.ok) {
const errorResponse = await response.json();
// eslint-disable-next-line no-console
console.error("Error response:", errorResponse);
throw new Error(
`Failed to update provider group: ${response.status} ${response.statusText}`,
);
@@ -203,8 +201,6 @@ export const updateProviderGroup = async (
revalidatePath("/manage-groups");
return parseStringify(data);
} catch (error) {
// eslint-disable-next-line no-console
console.error("Unexpected error:", error);
return {
error: getErrorMessage(error),
};

View File

@@ -81,25 +81,31 @@ export const addRole = async (formData: FormData) => {
const name = formData.get("name") as string;
const groups = formData.getAll("groups[]") as string[];
// Prepare base payload
const payload: any = {
data: {
type: "roles",
attributes: {
name,
manage_users: formData.get("manage_users") === "true",
manage_account: formData.get("manage_account") === "true",
manage_billing: formData.get("manage_billing") === "true",
manage_providers: formData.get("manage_providers") === "true",
manage_integrations: formData.get("manage_integrations") === "true",
manage_scans: formData.get("manage_scans") === "true",
manage_account: formData.get("manage_account") === "true",
// TODO: Add back when we have integrations ready
// manage_integrations: formData.get("manage_integrations") === "true",
unlimited_visibility: formData.get("unlimited_visibility") === "true",
},
relationships: {},
},
};
// Add relationships only if there are items
// Conditionally include manage_billing for cloud environment
if (process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true") {
payload.data.attributes.manage_billing =
formData.get("manage_billing") === "true";
}
// Add provider groups relationships only if there are items
if (groups.length > 0) {
payload.data.relationships.provider_groups = {
data: groups.map((groupId: string) => ({
@@ -147,19 +153,26 @@ export const updateRole = async (formData: FormData, roleId: string) => {
type: "roles",
id: roleId,
attributes: {
...(name && { name }),
...(name && { name }), // Include name only if provided
manage_users: formData.get("manage_users") === "true",
manage_account: formData.get("manage_account") === "true",
manage_billing: formData.get("manage_billing") === "true",
manage_providers: formData.get("manage_providers") === "true",
manage_integrations: formData.get("manage_integrations") === "true",
manage_account: formData.get("manage_account") === "true",
manage_scans: formData.get("manage_scans") === "true",
// TODO: Add back when we have integrations ready
// manage_integrations: formData.get("manage_integrations") === "true",
unlimited_visibility: formData.get("unlimited_visibility") === "true",
},
relationships: {},
},
};
// Conditionally include manage_billing for cloud environments
if (process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true") {
payload.data.attributes.manage_billing =
formData.get("manage_billing") === "true";
}
// Add provider groups relationships only if there are items
if (groups.length > 0) {
payload.data.relationships.provider_groups = {
data: groups.map((groupId: string) => ({
@@ -182,6 +195,7 @@ export const updateRole = async (formData: FormData, roleId: string) => {
},
body,
});
const data = await response.json();
revalidatePath("/roles");
return data;

View File

@@ -1,7 +1,7 @@
import { Spacer } from "@nextui-org/react";
import React, { Suspense } from "react";
import { getFindings, getServicesRegions } from "@/actions/findings";
import { getFindings, getMetadataInfo } from "@/actions/findings";
import { getProviders } from "@/actions/providers";
import { getScans } from "@/actions/scans";
import { filterFindings } from "@/components/filters/data-filters";
@@ -35,7 +35,6 @@ export default async function Findings({
// Extract all filter parameters and combine with default filters
const defaultFilters = {
"filter[status__in]": "FAIL, PASS",
"filter[delta__in]": "new",
};
const filters: Record<string, string> = {
@@ -47,15 +46,17 @@ export default async function Findings({
const query = filters["filter[search]"] || "";
const servicesRegionsData = await getServicesRegions({
const metadataInfoData = await getMetadataInfo({
query,
sort: encodedSort,
filters,
});
// Extract unique regions and services from the new endpoint
const uniqueRegions = servicesRegionsData?.data?.attributes?.regions || [];
const uniqueServices = servicesRegionsData?.data?.attributes?.services || [];
const uniqueRegions = metadataInfoData?.data?.attributes?.regions || [];
const uniqueServices = metadataInfoData?.data?.attributes?.services || [];
const uniqueResourceTypes =
metadataInfoData?.data?.attributes?.resource_types || [];
// Get findings data
const providersData = await getProviders({});
const scansData = await getScans({});
@@ -72,7 +73,7 @@ export default async function Findings({
// Extract scan UUIDs with "completed" state and more than one resource
const completedScans = scansData?.data
?.filter(
(scan: any) =>
(scan: ScanProps) =>
scan.attributes.state === "completed" &&
scan.attributes.unique_resource_count > 1,
)
@@ -104,6 +105,11 @@ export default async function Findings({
labelCheckboxGroup: "Services",
values: uniqueServices,
},
{
key: "resource_type__in",
labelCheckboxGroup: "Resource Type",
values: uniqueResourceTypes,
},
{
key: "provider_uid__in",
labelCheckboxGroup: "Provider UID",

View File

@@ -1,6 +1,6 @@
import { Alert, cn } from "@nextui-org/react";
import React from "react";
import { InfoIcon } from "@/components/icons";
import {
UpdateViaCredentialsForm,
UpdateViaRoleForm,
@@ -17,33 +17,23 @@ export default function UpdateCredentialsPage({ searchParams }: Props) {
{searchParams.type === "aws" && !searchParams.via && (
<>
<div className="flex flex-col gap-4">
<p className="text-sm text-default-500">
If the provider was set up with static credentials, updates must
use static credentials. If it was set up with a role, updates must
use a role.
<p className="text-sm text-default-700">
To update provider credentials,{" "}
<strong>
the same type that was originally configured must be used.
</strong>
</p>
<Alert
color="warning"
variant="faded"
classNames={{
base: cn([
"border-1 border-default-200 dark:border-default-100",
"gap-x-4",
]),
}}
description={
<>
To update provider credentials,{" "}
<strong>
you must use the same type that was originally configured.
</strong>{" "}
</>
}
/>
<p className="text-sm text-default-500">
To switch from static credentials to a role (or vice versa), you
need to delete the provider and set it up again.
<div className="flex items-center rounded-lg border border-system-warning bg-system-warning-medium p-4 text-sm dark:text-default-300">
<InfoIcon className="mr-2 inline h-4 w-4 flex-shrink-0" />
<p>
If the provider was configured with static credentials, updates
must also use static credentials. If it was configured with a
role, updates must use a role.
</p>
</div>
<p className="text-sm text-default-700">
To switch from static credentials to a role (or vice versa), the
provider must be deleted and set up again.
</p>
<SelectViaAWS initialVia={searchParams.via} />
</div>

View File

@@ -66,17 +66,18 @@ export default async function Scans({
<>
<Spacer y={8} />
<NoProvidersConnected />
<Spacer y={4} />
</>
) : (
<>
<LaunchScanWorkflow providers={providerInfo} />
<Spacer y={4} />
<ScanWarningBar />
<Spacer y={8} />
</>
)}
<div className="grid grid-cols-12 items-start gap-4">
<div className="col-span-12">
<ScanWarningBar />
<Spacer y={4} />
<div className="flex flex-row items-center justify-between">
<DataTableFilterCustom filters={filterScans || []} />
<ButtonRefreshData

View File

@@ -50,10 +50,12 @@ export const EditGroupForm = ({
try {
const updatedFields: Partial<FormValues> = {};
// Detect changes in the name
if (values.name !== providerGroupData.name) {
updatedFields.name = values.name;
}
// Detect changes in providers
if (
JSON.stringify(values.providers) !==
JSON.stringify(providerGroupData.providers)
@@ -61,12 +63,14 @@ export const EditGroupForm = ({
updatedFields.providers = values.providers;
}
// Detect changes in roles
if (
JSON.stringify(values.roles) !== JSON.stringify(providerGroupData.roles)
) {
updatedFields.roles = values.roles;
}
// If no changes, notify the user and exit
if (Object.keys(updatedFields).length === 0) {
toast({
title: "No changes detected",
@@ -75,6 +79,7 @@ export const EditGroupForm = ({
return;
}
// Create FormData dynamically
const formData = new FormData();
if (updatedFields.name) {
formData.append("name", updatedFields.name);
@@ -94,6 +99,7 @@ export const EditGroupForm = ({
formData.append("roles", JSON.stringify(rolesData));
}
// Call the update action
const data = await updateProviderGroup(providerGroupId, formData);
if (data?.errors && data.errors.length > 0) {

View File

@@ -70,7 +70,7 @@ export const ColumnProviders: ColumnDef<ProviderProps>[] = [
const {
attributes: { uid },
} = getProviderData(row);
return <SnippetId className="h-7 max-w-48" entityId={uid} />;
return <SnippetId className="h-7" entityId={uid} />;
},
},
{

View File

@@ -22,6 +22,37 @@ import { RadioGroupProvider } from "../../radio-group-provider";
export type FormValues = z.infer<typeof addProviderFormSchema>;
// Helper function for labels and placeholders
const getProviderFieldDetails = (providerType?: string) => {
switch (providerType) {
case "aws":
return {
label: "Account ID",
placeholder: "123456...",
};
case "gcp":
return {
label: "Project ID",
placeholder: "project_id...",
};
case "azure":
return {
label: "Subscription ID",
placeholder: "fc94207a-d396-4a14-a7fd-12a...",
};
case "kubernetes":
return {
label: "Kubernetes Context",
placeholder: "context_name....",
};
default:
return {
label: "Provider UID",
placeholder: "Enter the provider UID",
};
}
};
export const ConnectAccountForm = () => {
const { toast } = useToast();
const [prevStep, setPrevStep] = useState(1);
@@ -39,6 +70,8 @@ export const ConnectAccountForm = () => {
});
const providerType = form.watch("providerType");
const providerFieldDetails = getProviderFieldDetails(providerType);
const isLoading = form.formState.isSubmitting;
const onSubmitClient = async (values: FormValues) => {
@@ -107,7 +140,12 @@ export const ConnectAccountForm = () => {
}
};
const handleBackStep = () => setPrevStep((prev) => prev - 1);
const handleBackStep = () => {
setPrevStep((prev) => prev - 1);
// Reset the providerUid and providerAlias fields when going back
form.setValue("providerUid", "");
form.setValue("providerAlias", "");
};
useEffect(() => {
if (providerType) {
@@ -144,9 +182,9 @@ export const ConnectAccountForm = () => {
control={form.control}
name="providerUid"
type="text"
label="Provider UID"
label={providerFieldDetails.label}
labelPlacement="inside"
placeholder="Enter the provider UID"
placeholder={providerFieldDetails.placeholder}
variant="bordered"
isRequired
isInvalid={!!form.formState.errors.providerUid}

View File

@@ -1,8 +1,9 @@
"use client";
import { zodResolver } from "@hookform/resolvers/zod";
import { Checkbox, Divider } from "@nextui-org/react";
import { SaveIcon } from "lucide-react";
import { Checkbox, Divider, Tooltip } from "@nextui-org/react";
import clsx from "clsx";
import { InfoIcon, SaveIcon } from "lucide-react";
import { useRouter } from "next/navigation";
import { useEffect } from "react";
import { Controller, useForm } from "react-hook-form";
@@ -16,6 +17,7 @@ import {
CustomInput,
} from "@/components/ui/custom";
import { Form } from "@/components/ui/form";
import { permissionFormFields } from "@/lib";
import { addRoleFormSchema, ApiError } from "@/types";
type FormValues = z.infer<typeof addRoleFormSchema>;
@@ -33,13 +35,13 @@ export const AddRoleForm = ({
defaultValues: {
name: "",
manage_users: false,
manage_account: false,
manage_billing: false,
manage_providers: false,
manage_integrations: false,
manage_scans: false,
unlimited_visibility: false,
groups: [],
...(process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true" && {
manage_billing: false,
}),
},
});
@@ -64,7 +66,7 @@ export const AddRoleForm = ({
"manage_account",
"manage_billing",
"manage_providers",
"manage_integrations",
// "manage_integrations",
"manage_scans",
"unlimited_visibility",
];
@@ -79,18 +81,22 @@ export const AddRoleForm = ({
const onSubmitClient = async (values: FormValues) => {
const formData = new FormData();
formData.append("name", values.name);
formData.append("manage_users", String(values.manage_users));
formData.append("manage_account", String(values.manage_account));
formData.append("manage_billing", String(values.manage_billing));
formData.append("manage_providers", String(values.manage_providers));
formData.append("manage_integrations", String(values.manage_integrations));
formData.append("manage_scans", String(values.manage_scans));
formData.append("manage_account", String(values.manage_account));
formData.append(
"unlimited_visibility",
String(values.unlimited_visibility),
);
// Conditionally append manage_account and manage_billing
if (process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true") {
formData.append("manage_billing", String(values.manage_billing));
}
if (values.groups && values.groups.length > 0) {
values.groups.forEach((group) => {
formData.append("groups[]", group);
@@ -134,21 +140,6 @@ export const AddRoleForm = ({
}
};
const permissions = [
{ field: "manage_users", label: "Invite and Manage Users" },
...(process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true"
? [
{ field: "manage_account", label: "Manage Account" },
{ field: "manage_billing", label: "Manage Billing" },
]
: []),
{ field: "manage_providers", label: "Manage Cloud Providers" },
// TODO: Add back when we have integrations ready
// { field: "manage_integrations", label: "Manage Integrations" },
{ field: "manage_scans", label: "Manage Scans" },
{ field: "unlimited_visibility", label: "Unlimited Visibility" },
];
return (
<Form {...form}>
<form
@@ -172,7 +163,7 @@ export const AddRoleForm = ({
{/* Select All Checkbox */}
<Checkbox
isSelected={permissions.every((perm) =>
isSelected={permissionFormFields.every((perm) =>
form.watch(perm.field as keyof FormValues),
)}
onChange={(e) => onSelectAllChange(e.target.checked)}
@@ -186,19 +177,37 @@ export const AddRoleForm = ({
{/* Permissions Grid */}
<div className="grid grid-cols-2 gap-4">
{permissions.map(({ field, label }) => (
<Checkbox
key={field}
{...form.register(field as keyof FormValues)}
isSelected={!!form.watch(field as keyof FormValues)}
classNames={{
label: "text-small",
wrapper: "checkbox-update",
}}
>
{label}
</Checkbox>
))}
{permissionFormFields
.filter(
(permission) =>
permission.field !== "manage_billing" ||
process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true",
)
.map(({ field, label, description }) => (
<div key={field} className="flex items-center gap-2">
<Checkbox
{...form.register(field as keyof FormValues)}
isSelected={!!form.watch(field as keyof FormValues)}
classNames={{
label: "text-small",
wrapper: "checkbox-update",
}}
>
{label}
</Checkbox>
<Tooltip content={description} placement="right">
<div className="flex w-fit items-center justify-center">
<InfoIcon
className={clsx(
"cursor-pointer text-default-400 group-data-[selected=true]:text-foreground",
)}
aria-hidden={"true"}
width={16}
/>
</div>
</Tooltip>
</div>
))}
</div>
</div>
<Divider className="my-4" />

View File

@@ -1,8 +1,9 @@
"use client";
import { zodResolver } from "@hookform/resolvers/zod";
import { Checkbox, Divider } from "@nextui-org/react";
import { SaveIcon } from "lucide-react";
import { Checkbox, Divider, Tooltip } from "@nextui-org/react";
import { clsx } from "clsx";
import { InfoIcon, SaveIcon } from "lucide-react";
import { useRouter } from "next/navigation";
import { useEffect } from "react";
import { Controller, useForm } from "react-hook-form";
@@ -16,6 +17,7 @@ import {
CustomInput,
} from "@/components/ui/custom";
import { Form } from "@/components/ui/form";
import { permissionFormFields } from "@/lib";
import { ApiError, editRoleFormSchema } from "@/types";
type FormValues = z.infer<typeof editRoleFormSchema>;
@@ -95,16 +97,21 @@ export const EditRoleForm = ({
}
updatedFields.manage_users = values.manage_users;
updatedFields.manage_account = values.manage_account;
updatedFields.manage_billing = values.manage_billing;
updatedFields.manage_providers = values.manage_providers;
updatedFields.manage_integrations = values.manage_integrations;
updatedFields.manage_account = values.manage_account;
// updatedFields.manage_integrations = values.manage_integrations;
updatedFields.manage_scans = values.manage_scans;
updatedFields.unlimited_visibility = values.unlimited_visibility;
if (process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true") {
updatedFields.manage_billing = values.manage_billing;
}
if (
JSON.stringify(values.groups) !==
JSON.stringify(roleData.data.relationships?.provider_groups?.data)
JSON.stringify(
roleData.data.relationships?.provider_groups?.data.map((g) => g.id),
)
) {
updatedFields.groups = values.groups;
}
@@ -157,21 +164,6 @@ export const EditRoleForm = ({
}
};
const permissions = [
{ field: "manage_users", label: "Invite and Manage Users" },
...(process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true"
? [
{ field: "manage_account", label: "Manage Account" },
{ field: "manage_billing", label: "Manage Billing" },
]
: []),
{ field: "manage_providers", label: "Manage Cloud Providers" },
// TODO: Add back when we have integrations ready
// { field: "manage_integrations", label: "Manage Integrations" },
{ field: "manage_scans", label: "Manage Scans" },
{ field: "unlimited_visibility", label: "Unlimited Visibility" },
];
return (
<Form {...form}>
<form
@@ -195,7 +187,7 @@ export const EditRoleForm = ({
{/* Select All Checkbox */}
<Checkbox
isSelected={permissions.every((perm) =>
isSelected={permissionFormFields.every((perm) =>
form.watch(perm.field as keyof FormValues),
)}
onChange={(e) => onSelectAllChange(e.target.checked)}
@@ -209,19 +201,37 @@ export const EditRoleForm = ({
{/* Permissions Grid */}
<div className="grid grid-cols-2 gap-4">
{permissions.map(({ field, label }) => (
<Checkbox
key={field}
{...form.register(field as keyof FormValues)}
isSelected={!!form.watch(field as keyof FormValues)}
classNames={{
label: "text-small",
wrapper: "checkbox-update",
}}
>
{label}
</Checkbox>
))}
{permissionFormFields
.filter(
(permission) =>
permission.field !== "manage_billing" ||
process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true",
)
.map(({ field, label, description }) => (
<div key={field} className="flex items-center gap-2">
<Checkbox
{...form.register(field as keyof FormValues)}
isSelected={!!form.watch(field as keyof FormValues)}
classNames={{
label: "text-small",
wrapper: "checkbox-update",
}}
>
{label}
</Checkbox>
<Tooltip content={description} placement="right">
<div className="flex w-fit items-center justify-center">
<InfoIcon
className={clsx(
"cursor-pointer text-default-400 group-data-[selected=true]:text-foreground",
)}
aria-hidden={"true"}
width={16}
/>
</div>
</Tooltip>
</div>
))}
</div>
</div>
<Divider className="my-4" />

View File

@@ -1,19 +1,18 @@
import { Alert, cn } from "@nextui-org/react";
"use client";
import { InfoIcon } from "../icons";
export const ScanWarningBar = () => {
return (
<Alert
color="warning"
title="Waiting for Your Scan to Show Up?"
description="Your scan is being processed and may take a few minutes to appear on the table. It will show up shortly."
variant="faded"
isClosable
classNames={{
base: cn([
"border-1 border-default-200 dark:border-default-100",
"gap-x-4",
]),
}}
/>
<div className="flex items-center rounded-lg border border-system-warning bg-system-warning-medium p-4 text-sm dark:text-default-300">
<InfoIcon className="mr-4 inline h-4 w-4 flex-shrink-0" />
<div className="flex flex-col gap-1">
<strong>Waiting for Your Scan to Show Up?</strong>
<p>
It may take a few minutes for the scan to appear on the table and be
displayed.
</p>
</div>
</div>
);
};

View File

@@ -31,66 +31,50 @@ export const CustomDropdownSelection: React.FC<
const [selectedValues, setSelectedValues] = useState<Set<string>>(
new Set(selectedKeys),
);
const allValues = values.map((item) => item.id);
const memoizedValues = useMemo(() => values, [values]);
const allValues = useMemo(() => values.map((item) => item.id), [values]);
// Update the internal state when selectedKeys changes from props
// Update internal state when selectedKeys changes
useEffect(() => {
const newSelection = new Set(selectedKeys);
if (
JSON.stringify(Array.from(selectedValues)) !==
JSON.stringify(Array.from(newSelection))
) {
if (selectedKeys.length === allValues.length) {
newSelection.add("all");
}
setSelectedValues(newSelection);
if (selectedKeys.length === allValues.length) {
newSelection.add("all");
}
}, [selectedKeys, allValues.length, selectedValues]);
setSelectedValues(newSelection);
}, [selectedKeys, allValues]);
const onSelectionChange = useCallback(
(keys: string[]) => {
setSelectedValues((prevSelected) => {
const newSelection = new Set(keys);
const newSelection = new Set(keys);
// If all values are selected and "all" is not included,
// add "all" automatically
if (
newSelection.size === allValues.length &&
!newSelection.has("all")
) {
return new Set(["all", ...allValues]);
} else if (prevSelected.has("all")) {
// If "all" was previously selected, remove it
if (newSelection.has("all")) {
// Handle "Select All" behavior
if (newSelection.size === allValues.length + 1) {
setSelectedValues(new Set(["all", ...allValues]));
onChange(name, allValues); // Exclude "all" in the callback
} else {
newSelection.delete("all");
return new Set(allValues.filter((key) => newSelection.has(key)));
setSelectedValues(newSelection);
onChange(name, Array.from(newSelection));
}
return newSelection;
});
// Notify the change without including "all"
const selectedValues = keys.filter((key) => key !== "all");
onChange(name, selectedValues);
} else {
setSelectedValues(newSelection);
onChange(name, Array.from(newSelection));
}
},
[allValues, name, onChange],
);
const handleSelectAllClick = useCallback(() => {
setSelectedValues((prevSelected: Set<string>) => {
const newSelection: Set<string> = prevSelected.has("all")
? new Set()
: new Set(["all", ...allValues]);
// Notify the change without including "all"
const selectedValues = Array.from(newSelection).filter(
(key) => key !== "all",
);
onChange(name, selectedValues);
return newSelection;
});
}, [allValues, name, onChange]);
if (selectedValues.has("all")) {
setSelectedValues(new Set());
onChange(name, []);
} else {
const newSelection = new Set(["all", ...allValues]);
setSelectedValues(newSelection);
onChange(name, allValues);
}
}, [allValues, name, onChange, selectedValues]);
return (
<div className="relative flex w-full flex-col gap-2">
@@ -119,7 +103,8 @@ export const CustomDropdownSelection: React.FC<
wrapper: "checkbox-update",
}}
value="all"
onClick={handleSelectAllClick}
isSelected={selectedValues.has("all")}
onChange={handleSelectAllClick}
>
Select All
</Checkbox>
@@ -128,7 +113,7 @@ export const CustomDropdownSelection: React.FC<
hideScrollBar
className="flex max-h-96 max-w-56 flex-col gap-y-2 py-2"
>
{memoizedValues.map(({ id, name }) => (
{values.map(({ id, name }) => (
<Checkbox
classNames={{
label: "text-small font-normal",

View File

@@ -1,4 +1,4 @@
import { Snippet } from "@nextui-org/react";
import { Snippet, Tooltip } from "@nextui-org/react";
import React from "react";
import { CopyIcon, DoneIcon, IdIcon } from "@/components/icons";
@@ -10,7 +10,7 @@ interface SnippetIdProps {
export const SnippetId: React.FC<SnippetIdProps> = ({ entityId, ...props }) => {
return (
<Snippet
className="flex h-4 items-center py-0"
className="flex h-6 items-center py-0"
color="default"
size="sm"
variant="flat"
@@ -22,9 +22,11 @@ export const SnippetId: React.FC<SnippetIdProps> = ({ entityId, ...props }) => {
>
<p className="flex items-center space-x-2">
<IdIcon size={18} />
<span className="no-scrollbar w-14 overflow-hidden overflow-x-scroll text-ellipsis whitespace-nowrap text-xs">
{entityId}
</span>
<Tooltip content={entityId} placement="top">
<span className="no-scrollbar w-24 overflow-hidden overflow-x-scroll text-ellipsis whitespace-nowrap text-xs">
{entityId}
</span>
</Tooltip>
</p>
</Snippet>
);

View File

@@ -1,5 +1,5 @@
import { getTask } from "@/actions/task";
import { MetaDataProps } from "@/types";
import { MetaDataProps, PermissionInfo } from "@/types";
export async function checkTaskStatus(
taskId: string,
@@ -183,3 +183,45 @@ export const regions = [
{ key: "us-west2", label: "GCP - US West (Los Angeles)" },
{ key: "us-east4", label: "GCP - US East (Northern Virginia)" },
];
export const permissionFormFields: PermissionInfo[] = [
{
field: "manage_users",
label: "Invite and Manage Users",
description: "Allows inviting new users and managing existing user details",
},
{
field: "manage_account",
label: "Manage Account",
description: "Provides access to account settings and RBAC configuration",
},
{
field: "unlimited_visibility",
label: "Unlimited Visibility",
description:
"Provides complete visibility across all the providers and its related resources",
},
{
field: "manage_providers",
label: "Manage Cloud Providers",
description:
"Allows configuration and management of cloud provider connections",
},
// {
// field: "manage_integrations",
// label: "Manage Integrations",
// description:
// "Controls the setup and management of third-party integrations",
// },
{
field: "manage_scans",
label: "Manage Scans",
description: "Allows launching and configuring scans security scans",
},
{
field: "manage_billing",
label: "Manage Billing",
description: "Provides access to billing settings and invoices",
},
];

6739
ui/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
{
"dependencies": {
"@hookform/resolvers": "^3.9.0",
"@nextui-org/react": "^2.6.11",
"@nextui-org/react": "2.4.8",
"@nextui-org/system": "2.2.1",
"@nextui-org/theme": "2.2.5",
"@radix-ui/react-alert-dialog": "^1.1.1",
@@ -89,5 +89,8 @@
"format:write": "./node_modules/.bin/prettier --config .prettierrc.json --write ./app",
"prepare": "husky"
},
"overrides": {
"@react-types/shared": "3.26.0"
},
"version": "0.0.1"
}

View File

@@ -122,6 +122,7 @@ module.exports = {
"0px -6px 24px #FFFFFF, 0px 7px 16px rgba(104, 132, 157, 0.5)",
up: "0.3rem 0.3rem 0.6rem #c8d0e7, -0.2rem -0.2rem 0.5rem #fff",
down: "inset 0.2rem 0.2rem 0.5rem #c8d0e7, inset -0.2rem -0.2rem 0.5rem #fff",
box: "rgba(0, 0, 0, 0.05) 0px 0px 0px 1px",
},
animation: {
"fade-in": "fade-in 200ms ease-out 0s 1 normal forwards running",

View File

@@ -26,6 +26,11 @@ export type NextUIColors =
| "danger"
| "default";
export interface PermissionInfo {
field: string;
label: string;
description: string;
}
export interface FindingsByStatusData {
data: {
type: "findings-overview";

View File

@@ -6,7 +6,7 @@ export const addRoleFormSchema = z.object({
manage_account: z.boolean().default(false),
manage_billing: z.boolean().default(false),
manage_providers: z.boolean().default(false),
manage_integrations: z.boolean().default(false),
// manage_integrations: z.boolean().default(false),
manage_scans: z.boolean().default(false),
unlimited_visibility: z.boolean().default(false),
groups: z.array(z.string()).optional(),
@@ -18,7 +18,7 @@ export const editRoleFormSchema = z.object({
manage_account: z.boolean().default(false),
manage_billing: z.boolean().default(false),
manage_providers: z.boolean().default(false),
manage_integrations: z.boolean().default(false),
// manage_integrations: z.boolean().default(false),
manage_scans: z.boolean().default(false),
unlimited_visibility: z.boolean().default(false),
groups: z.array(z.string()).optional(),