Compare commits

..

30 Commits

Author SHA1 Message Date
pedrooot 44150687e1 chore(changelog): update with latest changes 2026-04-16 17:35:22 +02:00
pedrooot 9843d850e1 fix(ui): disable Next.js 16 Server Function arg logging to stop leaking sign-in credentials 2026-04-16 17:32:38 +02:00
Alejandro Bailo 489c6c1073 fix: CHANGELOG minor issue (#10758) 2026-04-16 17:07:22 +02:00
Adrián Peña b08b072288 fix(api): exclude muted findings from pass_count, fail_count and manual_count (#10753) 2026-04-16 15:56:08 +02:00
Josema Camacho ca29e354b6 chore(deps): bump msgraph-sdk to 1.55.0 and azure-mgmt-resource to 24.0.0, remove marshmallow (#10733) 2026-04-16 15:34:28 +02:00
Alejandro Bailo 85a3927950 fix(ui): upgrade React 19.2.5 and Next.js 16.2.3 to mitigate CVE-2026-23869 (#10752) 2026-04-16 15:24:10 +02:00
Rubén De la Torre Vico 04fe3f65e0 chore(deps): enable Dependabot pre-commit ecosystem and bump hooks (#10732) 2026-04-16 13:38:11 +02:00
Andoni Alonso 297c9d0734 fix(sdk): move #10726 changelog entry to unreleased version (#10728) 2026-04-16 13:10:00 +02:00
Erich Blume a2a1a73749 fix(image): --registry-list crashes with AttributeError on global_provider (#10691)
Co-authored-by: Andoni A. <14891798+andoniaf@users.noreply.github.com>
2026-04-16 13:02:25 +02:00
lydiavilchez 08fbe17e29 fix(googleworkspace): treat secure Google defaults as PASS for Drive checks (#10727) 2026-04-16 13:01:55 +02:00
lydiavilchez d920f78059 fix(googleworkspace): treat secure Google defaults as PASS for Calendar checks (#10726) 2026-04-16 12:51:40 +02:00
Pepe Fagoaga 12bf3d5e70 fix(db): add missing tenant_id filter in queries (#10722) 2026-04-16 11:55:38 +02:00
Adrián Peña 4002c28b5d fix(api): add fallback handling for missing resources in findings (#10708) 2026-04-16 11:45:06 +02:00
Andoni Alonso 2439f54280 fix(sdk): allow account-scoped tokens in Cloudflare connection test (#10723) 2026-04-16 11:38:15 +02:00
Prowler Bot b0e59156e6 chore(ui): Bump version to v5.25.0 (#10711)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2026-04-15 20:14:46 +02:00
Prowler Bot f013bd4a53 docs: Update version to v5.24.0 (#10714)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2026-04-15 20:14:17 +02:00
Prowler Bot 6ad15f900f chore(release): Bump version to v5.25.0 (#10710)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2026-04-15 20:14:06 +02:00
Prowler Bot 1784bf38ab chore(api): Bump version to v1.26.0 (#10715)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2026-04-15 20:13:33 +02:00
Pepe Fagoaga ba5b23245f chore: review changelog for v5.24 (#10707) 2026-04-15 18:05:55 +02:00
Daniel Barranquero 43913b1592 feat(aws): support excluding regions from scans via CLI, env var, and config (#10688) 2026-04-15 17:59:46 +02:00
Alan Buscaglia 9e31160887 fix(ui): improve attack paths scan table UX and fix info banner variant (#10704)
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
Co-authored-by: alejandrobailo <alejandrobailo94@gmail.com>
2026-04-15 17:33:29 +02:00
Pepe Fagoaga 9a0c73256e chore: delete .opencode (#10702) 2026-04-15 15:10:40 +02:00
Alejandro Bailo 2a160a10df refactor(ui): remove legacy side drawers and clean code (#10692) 2026-04-15 13:55:57 +02:00
Alan Buscaglia 8d8bee165b feat(ui): improve attack paths scan selection UX (#10685) 2026-04-15 13:54:25 +02:00
Alan Buscaglia 606efec9f8 fix(ui): keep update credentials wizard open (#10675) 2026-04-15 13:50:20 +02:00
Alan Buscaglia d5354e8b1d feat(ui): add syntax highlighting to finding groups remediation code (#10698) 2026-04-15 12:58:35 +02:00
Rubén De la Torre Vico a96e5890dc docs: replace Excalidraw diagrams with Mermaid and fix architecture connections (#10697) 2026-04-15 12:51:29 +02:00
Pepe Fagoaga bb81c5dd2d docs: add contextual menu for copy and issue/feat (#10699) 2026-04-15 12:50:29 +02:00
Daniel Barranquero c3acb818d9 fix(vercel): handle team-scoped firewall config responses (#10695) 2026-04-15 11:59:20 +02:00
Andoni Alonso e6fc59267b docs: add Finding Groups documentation page (#10696)
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2026-04-15 11:58:39 +02:00
138 changed files with 7185 additions and 5288 deletions
+1 -1
View File
@@ -145,7 +145,7 @@ SENTRY_RELEASE=local
NEXT_PUBLIC_SENTRY_ENVIRONMENT=${SENTRY_ENVIRONMENT}
#### Prowler release version ####
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.24.0
NEXT_PUBLIC_PROWLER_RELEASE_VERSION=v5.25.0
# Social login credentials
SOCIAL_GOOGLE_OAUTH_CALLBACK_URL="${AUTH_URL}/api/auth/callback/google"
+12
View File
@@ -66,6 +66,18 @@ updates:
cooldown:
default-days: 7
- package-ecosystem: "pre-commit"
directory: "/"
schedule:
interval: "monthly"
open-pull-requests-limit: 25
target-branch: master
labels:
- "dependencies"
- "pre-commit"
cooldown:
default-days: 7
# Dependabot Updates are temporary disabled - 2025/04/15
# v4.6
# - package-ecosystem: "pip"
+1
View File
@@ -84,6 +84,7 @@ continue.json
.continuerc.json
# AI Coding Assistants - OpenCode
.opencode/
opencode.json
# AI Coding Assistants - GitHub Copilot
+9 -9
View File
@@ -1,7 +1,7 @@
repos:
## GENERAL
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
rev: v6.0.0
hooks:
- id: check-merge-conflict
- id: check-yaml
@@ -16,7 +16,7 @@ repos:
## TOML
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
rev: v2.13.0
rev: v2.16.0
hooks:
- id: pretty-format-toml
args: [--autofix]
@@ -24,21 +24,21 @@ repos:
## GITHUB ACTIONS
- repo: https://github.com/zizmorcore/zizmor-pre-commit
rev: v1.6.0
rev: v1.24.1
hooks:
- id: zizmor
files: ^\.github/
## BASH
- repo: https://github.com/koalaman/shellcheck-precommit
rev: v0.10.0
rev: v0.11.0
hooks:
- id: shellcheck
exclude: contrib
## PYTHON
- repo: https://github.com/myint/autoflake
rev: v2.3.1
rev: v2.3.3
hooks:
- id: autoflake
exclude: ^skills/
@@ -50,20 +50,20 @@ repos:
]
- repo: https://github.com/pycqa/isort
rev: 5.13.2
rev: 8.0.1
hooks:
- id: isort
exclude: ^skills/
args: ["--profile", "black"]
- repo: https://github.com/psf/black
rev: 24.4.2
rev: 26.3.1
hooks:
- id: black
exclude: ^skills/
- repo: https://github.com/pycqa/flake8
rev: 7.0.0
rev: 7.3.0
hooks:
- id: flake8
exclude: (contrib|^skills/)
@@ -93,7 +93,7 @@ repos:
pass_filenames: false
- repo: https://github.com/hadolint/hadolint
rev: v2.13.0-beta
rev: v2.14.0
hooks:
- id: hadolint
args: ["--ignore=DL3013"]
+11 -1
View File
@@ -2,7 +2,16 @@
All notable changes to the **Prowler API** are documented in this file.
## [1.25.0] (Prowler UNRELEASED)
## [1.25.1] (Prowler v5.24.1)
### 🐞 Fixed
- Attack Paths: Missing `tenant_id` filter while getting related findings after scan completes [(#10722)](https://github.com/prowler-cloud/prowler/pull/10722)
- Finding group counters `pass_count`, `fail_count` and `manual_count` now exclude muted findings [(#10753)](https://github.com/prowler-cloud/prowler/pull/10753)
---
## [1.25.0] (Prowler v5.24.0)
### 🔄 Changed
@@ -13,6 +22,7 @@ All notable changes to the **Prowler API** are documented in this file.
- Worker-beat race condition on cold start: replaced `sleep 15` with API service healthcheck dependency (Docker Compose) and init containers (Helm), aligned Gunicorn default port to `8080` [(#10603)](https://github.com/prowler-cloud/prowler/pull/10603)
- API container startup crash on Linux due to root-owned bind-mount preventing JWT key generation [(#10646)](https://github.com/prowler-cloud/prowler/pull/10646)
- Finding group resources endpoints now include findings without associated resources (orphan IaC findings) as simulated resource rows, and return one row per finding when multiple findings share a resource [(#10708)](https://github.com/prowler-cloud/prowler/pull/10708)
### 🔐 Security
+1 -1
View File
@@ -50,7 +50,7 @@ name = "prowler-api"
package-mode = false
# Needed for the SDK compatibility
requires-python = ">=3.11,<3.13"
version = "1.25.0"
version = "1.26.0"
[project.scripts]
celery = "src.backend.config.settings.celery"
+1 -1
View File
@@ -1,7 +1,7 @@
openapi: 3.0.3
info:
title: Prowler API
version: 1.25.0
version: 1.26.0
description: |-
Prowler API specification.
+32 -1
View File
@@ -57,6 +57,7 @@ from api.models import (
ProviderGroupMembership,
ProviderSecret,
Resource,
ResourceFindingMapping,
Role,
RoleProviderGroupRelationship,
SAMLConfiguration,
@@ -15465,7 +15466,7 @@ class TestFindingGroupViewSet:
attrs = data[0]["attributes"]
assert attrs["status"] == "FAIL"
assert attrs["muted"] is True
assert attrs["fail_count"] == 2
assert attrs["fail_count"] == 0
assert attrs["fail_muted_count"] == 2
assert attrs["pass_muted_count"] == 0
assert attrs["manual_muted_count"] == 0
@@ -16030,6 +16031,36 @@ class TestFindingGroupViewSet:
# s3_bucket_public_access has 2 findings with 2 different resources
assert len(data) == 2
def test_resources_id_matches_resource_id_for_mapped_findings(
self, authenticated_client, finding_groups_fixture
):
"""Findings with a resource expose the resource id as row id (hot path contract)."""
response = authenticated_client.get(
reverse(
"finding-group-resources", kwargs={"pk": "s3_bucket_public_access"}
),
{"filter[inserted_at]": TODAY},
)
assert response.status_code == status.HTTP_200_OK
data = response.json()["data"]
assert data, "expected resources in response"
resource_ids = set(
ResourceFindingMapping.objects.filter(
finding__check_id="s3_bucket_public_access",
).values_list("resource_id", flat=True)
)
finding_ids = set(
Finding.objects.filter(
check_id="s3_bucket_public_access",
).values_list("id", flat=True)
)
returned_ids = {item["id"] for item in data}
assert returned_ids <= {str(rid) for rid in resource_ids}
assert returned_ids.isdisjoint({str(fid) for fid in finding_ids})
def test_resources_fields(self, authenticated_client, finding_groups_fixture):
"""Test resource fields (uid, name, service, region, type) have valid values."""
response = authenticated_client.get(
+3 -2
View File
@@ -4225,10 +4225,11 @@ class FindingGroupResourceSerializer(BaseSerializerV1):
Serializer for Finding Group Resources - resources within a finding group.
Returns individual resources with their current status, severity,
and timing information.
and timing information. Orphan findings (without any resource) expose the
finding id as `id` so the row stays identifiable in the UI.
"""
id = serializers.UUIDField(source="resource_id")
id = serializers.UUIDField(source="row_id")
resource = serializers.SerializerMethodField()
provider = serializers.SerializerMethodField()
finding_id = serializers.UUIDField()
+270 -58
View File
@@ -35,11 +35,13 @@ from django.db.models import (
CharField,
Count,
DecimalField,
Exists,
ExpressionWrapper,
F,
IntegerField,
Max,
Min,
OuterRef,
Prefetch,
Q,
QuerySet,
@@ -415,7 +417,7 @@ class SchemaView(SpectacularAPIView):
def get(self, request, *args, **kwargs):
spectacular_settings.TITLE = "Prowler API"
spectacular_settings.VERSION = "1.25.0"
spectacular_settings.VERSION = "1.26.0"
spectacular_settings.DESCRIPTION = (
"Prowler API specification.\n\nThis file is auto-generated."
)
@@ -7125,17 +7127,16 @@ class FindingGroupViewSet(BaseRLSViewSet):
output_field=IntegerField(),
)
# `pass_count`, `fail_count` and `manual_count` count *every* finding
# for the check (muted or not) so the aggregated `status` reflects the
# underlying check outcome regardless of mute state. Whether the group
# is actionable is signalled by the orthogonal `muted` flag below.
# `pass_count`, `fail_count` and `manual_count` only count non-muted
# findings. Muted findings are tracked separately via the
# `*_muted_count` fields.
return (
queryset.values("check_id")
.annotate(
severity_order=Max(severity_case),
pass_count=Count("id", filter=Q(status="PASS")),
fail_count=Count("id", filter=Q(status="FAIL")),
manual_count=Count("id", filter=Q(status="MANUAL")),
pass_count=Count("id", filter=Q(status="PASS", muted=False)),
fail_count=Count("id", filter=Q(status="FAIL", muted=False)),
manual_count=Count("id", filter=Q(status="MANUAL", muted=False)),
pass_muted_count=Count("id", filter=Q(status="PASS", muted=True)),
fail_muted_count=Count("id", filter=Q(status="FAIL", muted=True)),
manual_muted_count=Count("id", filter=Q(status="MANUAL", muted=True)),
@@ -7280,12 +7281,14 @@ class FindingGroupViewSet(BaseRLSViewSet):
# finding-level aggregation path.
row.pop("nonmuted_count", None)
# Compute aggregated status. Counts are inclusive of muted findings,
# so the underlying check outcome surfaces even when the group is
# fully muted.
if row.get("fail_count", 0) > 0:
# Compute aggregated status from non-muted counts first, then
# fall back to muted counts so fully-muted groups still reflect
# the underlying check outcome.
total_fail = row.get("fail_count", 0) + row.get("fail_muted_count", 0)
total_pass = row.get("pass_count", 0) + row.get("pass_muted_count", 0)
if total_fail > 0:
row["status"] = "FAIL"
elif row.get("pass_count", 0) > 0:
elif total_pass > 0:
row["status"] = "PASS"
else:
row["status"] = "MANUAL"
@@ -7385,9 +7388,12 @@ class FindingGroupViewSet(BaseRLSViewSet):
if computed_params.get("status") or computed_params.getlist("status__in"):
queryset = queryset.annotate(
total_fail=F("fail_count") + F("fail_muted_count"),
total_pass=F("pass_count") + F("pass_muted_count"),
).annotate(
aggregated_status=Case(
When(fail_count__gt=0, then=Value("FAIL")),
When(pass_count__gt=0, then=Value("PASS")),
When(total_fail__gt=0, then=Value("FAIL")),
When(total_pass__gt=0, then=Value("PASS")),
default=Value("MANUAL"),
output_field=CharField(),
)
@@ -7578,6 +7584,53 @@ class FindingGroupViewSet(BaseRLSViewSet):
.order_by(*ordering)
)
def _orphan_findings_queryset(self, filtered_queryset, finding_ids=None):
"""Findings in the filtered set with no ResourceFindingMapping entries."""
orphan_qs = filtered_queryset.filter(
~Exists(ResourceFindingMapping.objects.filter(finding_id=OuterRef("pk")))
)
if finding_ids is not None:
orphan_qs = orphan_qs.filter(id__in=finding_ids)
return orphan_qs
def _has_orphan_findings(self, filtered_queryset) -> bool:
"""Return True if any finding in the filtered set has no resource mapping."""
return self._orphan_findings_queryset(filtered_queryset).exists()
def _orphan_aggregation_values(self, orphan_queryset):
"""Raw rows for orphan findings; resource payload synthesized from metadata.
check_metadata is stored with lowercase keys (see
`prowler.lib.outputs.finding.Finding.get_metadata`) and
`Finding.resource_groups` is already denormalized at ingest time.
"""
return orphan_queryset.annotate(
_provider_type=F("scan__provider__provider"),
_provider_uid=F("scan__provider__uid"),
_provider_alias=F("scan__provider__alias"),
_svc=KeyTextTransform("servicename", "check_metadata"),
_region=KeyTextTransform("region", "check_metadata"),
_rtype=KeyTextTransform("resourcetype", "check_metadata"),
_rgroup=F("resource_groups"),
).values(
"id",
"uid",
"status",
"severity",
"delta",
"muted",
"muted_reason",
"first_seen_at",
"inserted_at",
"_provider_type",
"_provider_uid",
"_provider_alias",
"_svc",
"_region",
"_rtype",
"_rgroup",
)
def _post_process_resources(self, resource_data):
"""Convert resource aggregation rows to API output."""
results = []
@@ -7599,9 +7652,13 @@ class FindingGroupViewSet(BaseRLSViewSet):
else:
delta = None
resource_id = row["resource_id"]
finding_id = str(row["finding_id"]) if row.get("finding_id") else None
results.append(
{
"resource_id": row["resource_id"],
"row_id": resource_id,
"resource_id": resource_id,
"resource_uid": row["resource_uid"],
"resource_name": row["resource_name"],
"resource_service": row["resource_service"],
@@ -7620,9 +7677,46 @@ class FindingGroupViewSet(BaseRLSViewSet):
"muted": bool(row.get("muted", False)),
"muted_reason": row.get("muted_reason"),
"resource_group": row.get("resource_group", ""),
"finding_id": (
str(row["finding_id"]) if row.get("finding_id") else None
),
"finding_id": finding_id,
}
)
return results
def _post_process_orphans(self, orphan_rows):
"""Convert orphan finding rows into the same API shape as mapping rows."""
results = []
for row in orphan_rows:
status_val = row["status"]
status = status_val if status_val in ("FAIL", "PASS") else "MANUAL"
muted = bool(row["muted"])
delta_val = row.get("delta")
delta = delta_val if delta_val in ("new", "changed") and not muted else None
finding_id = str(row["id"])
results.append(
{
"row_id": finding_id,
"resource_id": None,
"resource_uid": row["uid"],
"resource_name": row["uid"],
"resource_service": row["_svc"] or "",
"resource_region": row["_region"] or "",
"resource_type": row["_rtype"] or "",
"provider_type": row["_provider_type"],
"provider_uid": row["_provider_uid"],
"provider_alias": row["_provider_alias"],
"status": status,
"severity": row["severity"],
"delta": delta,
"first_seen_at": row["first_seen_at"],
"last_seen_at": row["inserted_at"],
"muted": muted,
"muted_reason": row.get("muted_reason"),
"resource_group": row["_rgroup"] or "",
"finding_id": finding_id,
}
)
@@ -7683,16 +7777,14 @@ class FindingGroupViewSet(BaseRLSViewSet):
sort_param, self._FINDING_GROUP_SORT_MAP
)
if ordering:
# status_order is annotated on demand so groups can be sorted by
# their aggregated status (FAIL > PASS > MANUAL), mirroring the
# priority used in _post_process_aggregation. Counts are
# inclusive of muted findings, so the underlying check outcome
# surfaces even for fully muted groups.
if any(field.lstrip("-") == "status_order" for field in ordering):
aggregated_queryset = aggregated_queryset.annotate(
total_fail_for_sort=F("fail_count") + F("fail_muted_count"),
total_pass_for_sort=F("pass_count") + F("pass_muted_count"),
).annotate(
status_order=Case(
When(fail_count__gt=0, then=Value(3)),
When(pass_count__gt=0, then=Value(2)),
When(total_fail_for_sort__gt=0, then=Value(3)),
When(total_pass_for_sort__gt=0, then=Value(2)),
default=Value(1),
output_field=IntegerField(),
)
@@ -7731,41 +7823,64 @@ class FindingGroupViewSet(BaseRLSViewSet):
def _paginated_resource_response(
self, request, filtered_queryset, resource_ids, tenant_id
):
"""Paginate and return resources.
"""Paginate and return resources, appending orphan findings when present.
Without sort: paginate lightweight resource IDs first, aggregate only the page.
With sort: build a lightweight ordering subquery (resource_id + sort keys),
paginate that, then aggregate full details only for the page.
Hot path (no orphans, or resource filter applied): resources come from
ResourceFindingMapping aggregation. Untouched pre-existing behaviour.
Orphan fallback: findings without a mapping (e.g. IaC) are appended
after mapping rows as synthesised resource-like rows so they remain
visible in the UI without paying the aggregation cost on the hot path.
"""
sort_param = request.query_params.get("sort")
ordering = None
if sort_param:
ordering = self._validate_sort_fields(sort_param, self._RESOURCE_SORT_MAP)
if ordering:
if "resource_id" not in {field.lstrip("-") for field in ordering}:
ordering.append("resource_id")
validated = self._validate_sort_fields(sort_param, self._RESOURCE_SORT_MAP)
ordering = validated if validated else None
# Phase 1: lightweight aggregation with only sort keys, paginate
ordering_qs = self._build_resource_ordering_queryset(
filtered_queryset,
resource_ids=resource_ids,
tenant_id=tenant_id,
ordering=ordering,
)
page = self.paginate_queryset(ordering_qs)
if page is not None:
page_ids = [row["resource_id"] for row in page]
resource_data = self._build_resource_aggregation(
filtered_queryset, resource_ids=page_ids, tenant_id=tenant_id
)
# Re-sort to match the page ordering
id_order = {rid: idx for idx, rid in enumerate(page_ids)}
results = self._post_process_resources(resource_data)
results.sort(key=lambda r: id_order.get(r["resource_id"], 0))
serializer = FindingGroupResourceSerializer(results, many=True)
return self.get_paginated_response(serializer.data)
# Resource filters can only match findings with resources; skip orphan
# detection entirely when they are present.
if resource_ids is not None:
return self._mapping_paginated_response(
request, filtered_queryset, resource_ids, tenant_id, ordering
)
page_ids = [row["resource_id"] for row in ordering_qs]
has_mappings = self._build_resource_mapping_queryset(
filtered_queryset, resource_ids=None, tenant_id=tenant_id
).exists()
if has_mappings:
# Normal or mixed group: serve only resource-mapped rows.
# TODO: Orphan findings in mixed groups are intentionally excluded
# until the ephemeral resources strategy is decided. When resolved,
# route mixed groups to _combined_paginated_response instead.
return self._mapping_paginated_response(
request, filtered_queryset, resource_ids, tenant_id, ordering
)
# Pure orphan group (e.g. IaC): synthesize resource-like rows.
return self._combined_paginated_response(
request, filtered_queryset, tenant_id, ordering
)
def _mapping_paginated_response(
self, request, filtered_queryset, resource_ids, tenant_id, ordering
):
"""Mapping-only paginated response (original fast path)."""
if ordering:
if "resource_id" not in {field.lstrip("-") for field in ordering}:
ordering.append("resource_id")
# Phase 1: lightweight aggregation with only sort keys, paginate
ordering_qs = self._build_resource_ordering_queryset(
filtered_queryset,
resource_ids=resource_ids,
tenant_id=tenant_id,
ordering=ordering,
)
page = self.paginate_queryset(ordering_qs)
if page is not None:
page_ids = [row["resource_id"] for row in page]
resource_data = self._build_resource_aggregation(
filtered_queryset, resource_ids=page_ids, tenant_id=tenant_id
)
@@ -7773,10 +7888,18 @@ class FindingGroupViewSet(BaseRLSViewSet):
results = self._post_process_resources(resource_data)
results.sort(key=lambda r: id_order.get(r["resource_id"], 0))
serializer = FindingGroupResourceSerializer(results, many=True)
return Response(serializer.data)
return self.get_paginated_response(serializer.data)
page_ids = [row["resource_id"] for row in ordering_qs]
resource_data = self._build_resource_aggregation(
filtered_queryset, resource_ids=page_ids, tenant_id=tenant_id
)
id_order = {rid: idx for idx, rid in enumerate(page_ids)}
results = self._post_process_resources(resource_data)
results.sort(key=lambda r: id_order.get(r["resource_id"], 0))
serializer = FindingGroupResourceSerializer(results, many=True)
return Response(serializer.data)
# No sort (or only empty sort fragments): paginate lightweight resource IDs
# first, aggregate only the page.
mapping_qs = self._build_resource_mapping_queryset(
filtered_queryset, resource_ids=resource_ids, tenant_id=tenant_id
)
@@ -7804,6 +7927,95 @@ class FindingGroupViewSet(BaseRLSViewSet):
serializer = FindingGroupResourceSerializer(results, many=True)
return Response(serializer.data)
def _combined_paginated_response(
self, request, filtered_queryset, tenant_id, ordering
):
"""Mapping rows + orphan findings appended at end.
Orphans sit after mapping rows regardless of sort. This keeps the
mapping-only code path intact for checks that have no orphans (the
common case) and avoids paying UNION/coalesce costs there.
"""
mapping_qs = self._build_resource_mapping_queryset(
filtered_queryset, resource_ids=None, tenant_id=tenant_id
)
mapping_count = mapping_qs.values("resource_id").distinct().count()
orphan_ids = list(
self._orphan_findings_queryset(filtered_queryset)
.order_by("id")
.values_list("id", flat=True)
)
orphan_count = len(orphan_ids)
total = mapping_count + orphan_count
# Paginate a simple [0..total) index sequence so DRF produces proper
# links/meta; then slice mapping / orphan sources accordingly.
page = self.paginate_queryset(range(total))
page_indices = list(page) if page is not None else list(range(total))
mapping_indices = [i for i in page_indices if i < mapping_count]
orphan_positions = [
i - mapping_count for i in page_indices if i >= mapping_count
]
mapping_results = []
if mapping_indices:
start = mapping_indices[0]
stop = mapping_indices[-1] + 1
if ordering:
ordering_fields = list(ordering)
if "resource_id" not in {
field.lstrip("-") for field in ordering_fields
}:
ordering_fields.append("resource_id")
ordered_qs = self._build_resource_ordering_queryset(
filtered_queryset,
resource_ids=None,
tenant_id=tenant_id,
ordering=ordering_fields,
)
slice_rids = [row["resource_id"] for row in ordered_qs[start:stop]]
else:
slice_rids = list(
mapping_qs.values_list("resource_id", flat=True)
.distinct()
.order_by("resource_id")[start:stop]
)
if slice_rids:
resource_data = self._build_resource_aggregation(
filtered_queryset,
resource_ids=slice_rids,
tenant_id=tenant_id,
)
rows_by_rid = {row["resource_id"]: row for row in resource_data}
ordered_rows = [
rows_by_rid[rid] for rid in slice_rids if rid in rows_by_rid
]
mapping_results = self._post_process_resources(ordered_rows)
orphan_results = []
if orphan_positions:
slice_fids = [orphan_ids[pos] for pos in orphan_positions]
raw_rows = list(
self._orphan_aggregation_values(
self._orphan_findings_queryset(
filtered_queryset, finding_ids=slice_fids
)
)
)
rows_by_fid = {row["id"]: row for row in raw_rows}
ordered_rows = [
rows_by_fid[fid] for fid in slice_fids if fid in rows_by_fid
]
orphan_results = self._post_process_orphans(ordered_rows)
results = mapping_results + orphan_results
serializer = FindingGroupResourceSerializer(results, many=True)
if page is not None:
return self.get_paginated_response(serializer.data)
return Response(serializer.data)
def list(self, request, *args, **kwargs):
"""
List finding groups with aggregation and filtering.
@@ -248,7 +248,9 @@ def _fetch_findings_batch(
with rls_transaction(tenant_id, using=READ_REPLICA_ALIAS):
# Use `all_objects` to get `Findings` even on soft-deleted `Providers`
# But even the provider is already validated as active in this context
qs = FindingModel.all_objects.filter(scan_id=scan_id).order_by("id")
qs = FindingModel.all_objects.filter(
tenant_id=tenant_id, scan_id=scan_id
).order_by("id")
if after_id is not None:
qs = qs.filter(id__gt=after_id)
+6 -8
View File
@@ -1804,11 +1804,9 @@ def aggregate_finding_group_summaries(tenant_id: str, scan_id: str):
)
# Aggregate findings by check_id for this scan.
# `pass_count`, `fail_count` and `manual_count` count *every* finding
# in this group, regardless of mute state, so the aggregated `status`
# always reflects the underlying check outcome (FAIL > PASS > MANUAL)
# even when the group is fully muted. The orthogonal `muted` flag is
# what tells whether the group has any actionable (non-muted) findings.
# `pass_count`, `fail_count` and `manual_count` only count non-muted
# findings. Muted findings are tracked separately via the
# `*_muted_count` fields.
aggregated = (
Finding.objects.filter(
tenant_id=tenant_id,
@@ -1817,9 +1815,9 @@ def aggregate_finding_group_summaries(tenant_id: str, scan_id: str):
.values("check_id")
.annotate(
severity_order=Max(severity_case),
pass_count=Count("id", filter=Q(status="PASS")),
fail_count=Count("id", filter=Q(status="FAIL")),
manual_count=Count("id", filter=Q(status="MANUAL")),
pass_count=Count("id", filter=Q(status="PASS", muted=False)),
fail_count=Count("id", filter=Q(status="FAIL", muted=False)),
manual_count=Count("id", filter=Q(status="MANUAL", muted=False)),
pass_muted_count=Count("id", filter=Q(status="PASS", muted=True)),
fail_muted_count=Count("id", filter=Q(status="FAIL", muted=True)),
manual_muted_count=Count("id", filter=Q(status="MANUAL", muted=True)),
@@ -15,8 +15,7 @@ This document describes the internal architecture of Prowler Lighthouse AI, enab
Lighthouse AI operates as a Langchain-based agent that connects Large Language Models (LLMs) with Prowler security data through the Model Context Protocol (MCP).
<img className="block dark:hidden" src="/images/lighthouse-architecture-light.png" alt="Prowler Lighthouse Architecture" />
<img className="hidden dark:block" src="/images/lighthouse-architecture-dark.png" alt="Prowler Lighthouse Architecture" />
![Prowler Lighthouse Architecture](/images/lighthouse-architecture.png)
### Three-Tier Architecture
+19
View File
@@ -12,6 +12,24 @@
"dark": "/images/prowler-logo-white.png",
"light": "/images/prowler-logo-black.png"
},
"contextual": {
"options": [
"copy",
"view",
{
"title": "Request a feature",
"description": "Open a feature request on GitHub",
"icon": "plus",
"href": "https://github.com/prowler-cloud/prowler/issues/new?template=feature-request.yml"
},
{
"title": "Report an issue",
"description": "Open a bug report on GitHub",
"icon": "bug",
"href": "https://github.com/prowler-cloud/prowler/issues/new?template=bug_report.yml"
}
]
},
"navigation": {
"tabs": [
{
@@ -133,6 +151,7 @@
]
},
"user-guide/tutorials/prowler-app-attack-paths",
"user-guide/tutorials/prowler-app-finding-groups",
"user-guide/tutorials/prowler-cloud-public-ips",
{
"group": "Tutorials",
@@ -121,8 +121,8 @@ To update the environment file:
Edit the `.env` file and change version values:
```env
PROWLER_UI_VERSION="5.23.0"
PROWLER_API_VERSION="5.23.0"
PROWLER_UI_VERSION="5.24.0"
PROWLER_API_VERSION="5.24.0"
```
<Note>
@@ -59,6 +59,10 @@ Prowler Lighthouse AI is powerful, but there are limitations:
- **NextJS session dependence**: If your Prowler application session expires or logs out, Lighthouse AI will error out. Refresh and log back in to continue.
- **Response quality**: The response quality depends on the selected LLM provider and model. Choose models with strong tool-calling capabilities for best results. We recommend `gpt-5` model from OpenAI.
## Architecture
![Prowler Lighthouse Architecture](/images/lighthouse-architecture.png)
## Extending Lighthouse AI
Lighthouse AI retrieves data through Prowler MCP. To add new capabilities, extend the Prowler MCP Server with additional tools and Lighthouse AI discovers them automatically.
@@ -46,8 +46,7 @@ Search and retrieve official Prowler documentation:
The following diagram illustrates the Prowler MCP Server architecture and its integration points:
<img className="block dark:hidden" src="/images/prowler_mcp_schema_light.png" alt="Prowler MCP Server Schema" />
<img className="hidden dark:block" src="/images/prowler_mcp_schema_dark.png" alt="Prowler MCP Server Schema" />
![Prowler MCP Server Schema](/images/prowler_mcp_schema.png)
The architecture shows how AI assistants connect through the MCP protocol to access Prowler's three main components:
- Prowler Cloud/App for security operations
Binary file not shown.

After

Width:  |  Height:  |  Size: 755 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 146 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 340 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 410 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 267 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 265 KiB

+37
View File
@@ -0,0 +1,37 @@
flowchart TB
browser([Browser])
subgraph NEXTJS["Next.js Server"]
route["API Route<br/>(auth + context assembly)"]
agent["LangChain Agent"]
subgraph TOOLS["Agent Tools"]
metatools["Meta-tools<br/>describe_tool / execute_tool / load_skill"]
end
mcpclient["MCP Client<br/>(HTTP transport)"]
end
llm["LLM Provider<br/>(OpenAI / Bedrock / OpenAI-compatible)"]
subgraph MCP["Prowler MCP Server"]
app_tools["prowler_app_* tools<br/>(auth required)"]
hub_tools["prowler_hub_* tools<br/>(no auth)"]
docs_tools["prowler_docs_* tools<br/>(no auth)"]
end
api["Prowler API"]
hub["hub.prowler.com"]
docs["docs.prowler.com<br/>(Mintlify)"]
browser <-->|SSE stream| route
route --> agent
agent <-->|LLM API| llm
agent --> metatools
metatools --> mcpclient
mcpclient -->|MCP HTTP · Bearer token<br/>for prowler_app_* only| app_tools
mcpclient -->|MCP HTTP| hub_tools
mcpclient -->|MCP HTTP| docs_tools
app_tools -->|REST| api
hub_tools -->|REST| hub
docs_tools -->|REST| docs
Binary file not shown.

After

Width:  |  Height:  |  Size: 286 KiB

@@ -23,6 +23,8 @@ flowchart TB
user --> ui
user --> cli
ui -->|REST| api
ui -->|MCP HTTP| mcp
mcp -->|REST| api
api --> pg
api --> valkey
beat -->|enqueue jobs| valkey
@@ -31,7 +33,5 @@ flowchart TB
worker -->|Attack Paths| neo4j
worker -->|invokes| sdk
cli --> sdk
api -. AI tools .-> mcp
mcp -. context .-> api
sdk --> providers
Binary file not shown.

Before

Width:  |  Height:  |  Size: 268 KiB

After

Width:  |  Height:  |  Size: 348 KiB

+29
View File
@@ -0,0 +1,29 @@
flowchart LR
subgraph HOSTS["MCP Hosts"]
chat["Chat Interfaces<br/>(Claude Desktop, LobeChat)"]
ide["IDEs and Code Editors<br/>(Claude Code, Cursor)"]
apps["Other AI Applications<br/>(5ire, custom agents)"]
end
subgraph MCP["Prowler MCP Server"]
app_tools["prowler_app_* tools<br/>(JWT or API key auth)<br/>Findings · Providers · Scans<br/>Resources · Muting · Compliance<br/>Attack Paths"]
hub_tools["prowler_hub_* tools<br/>(no auth)<br/>Checks Catalog · Check Code<br/>Fixers · Compliance Frameworks"]
docs_tools["prowler_docs_* tools<br/>(no auth)<br/>Search · Document Retrieval"]
end
api["Prowler API<br/>(REST)"]
hub["hub.prowler.com<br/>(REST)"]
docs["docs.prowler.com<br/>(Mintlify)"]
chat -->|STDIO or HTTP| app_tools
chat -->|STDIO or HTTP| hub_tools
chat -->|STDIO or HTTP| docs_tools
ide -->|STDIO or HTTP| app_tools
ide -->|STDIO or HTTP| hub_tools
ide -->|STDIO or HTTP| docs_tools
apps -->|STDIO or HTTP| app_tools
apps -->|STDIO or HTTP| hub_tools
apps -->|STDIO or HTTP| docs_tools
app_tools -->|REST| api
hub_tools -->|REST| hub
docs_tools -->|REST| docs
Binary file not shown.

After

Width:  |  Height:  |  Size: 371 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 328 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 332 KiB

@@ -33,6 +33,41 @@ To scan a particular AWS region with Prowler, use:
prowler aws -f/--region eu-west-1 us-east-1
```
### Excluding Specific Regions
To scan all supported AWS regions except a specific subset, use the `--excluded-region` flag:
```console
prowler aws --excluded-region eu-west-1 me-south-1
```
You can also configure the exclusion list with the `PROWLER_AWS_DISALLOWED_REGIONS` environment variable as a comma-separated list:
```console
export PROWLER_AWS_DISALLOWED_REGIONS="eu-west-1,me-south-1"
prowler aws
```
Or with the AWS provider configuration in `config.yaml`:
```yaml
aws:
disallowed_regions:
- eu-west-1
- me-south-1
```
When more than one source is set, precedence is:
1. `--excluded-region`
2. `PROWLER_AWS_DISALLOWED_REGIONS`
3. `aws.disallowed_regions` in `config.yaml`
<Note>
For self-hosted App or API-triggered scans, set `PROWLER_AWS_DISALLOWED_REGIONS` in the runtime environment of the backend scan containers such as `api` and `worker`. The `ui` container does not enforce AWS region selection.
</Note>
### AWS Credentials Configuration
For details on configuring AWS credentials, refer to the following [Botocore](https://github.com/boto/botocore) [file](https://github.com/boto/botocore/blob/22a19ea7c4c2c4dd7df4ab8c32733cba0c7597a4/botocore/data/partitions.json).
@@ -0,0 +1,119 @@
---
title: 'Finding Groups'
description: 'Organize and triage security findings by check to reduce noise and prioritize remediation effectively.'
---
import { VersionBadge } from "/snippets/version-badge.mdx"
<VersionBadge version="5.23.0" />
Finding Groups transforms security findings triage by grouping them by check instead of displaying a flat list. This dramatically reduces noise and enables faster, more effective prioritization.
## Triage Challenges with Flat Finding Lists
A real cloud environment produces thousands of findings per scan. A flat list makes it impossible to triage effectively:
- **Signal buried in noise**: the same misconfiguration repeated across 200 resources shows up as 200 rows, burying the signal in repetitive data
- **Prioritization guesswork**: without grouping, understanding which issues affect the most resources requires manual counting and correlation
- **Tedious muting**: muting a false positive globally requires manually acting on each individual finding across the list
- **Lost context**: when investigating a single resource, related findings are scattered across the same flat list, making it hard to see the full picture
## How Finding Groups Addresses These Challenges
Finding Groups addresses these challenges by intelligently grouping findings by check.
### Grouped View at a Glance
Each row represents a single check title with key information immediately visible:
- **Severity** indicator for quick risk assessment
- **Impacted providers** showing which cloud platforms are affected
- **X of Y impacted resources** counter displaying how many resources fail this check
For example, `Vercel project has the Web Application Firewall enabled` across every affected project collapses to a single row — not one per project. Sort or filter by severity, provider, or status at the group level to triage top-down instead of drowning in per-resource rows.
![Finding Groups list view](/images/finding-groups-list.png)
### Expanding Groups for Details
Expand any group inline to see the failing resources with detailed information:
| Column | Description |
|--------|-------------|
| **UID** | Unique identifier for the resource |
| **Service** | The cloud service the resource belongs to |
| **Region** | Geographic region where the resource is deployed |
| **Severity** | Risk level of the finding |
| **Provider** | Cloud provider (AWS, Azure, GCP, Kubernetes, etc.) |
| **Last Seen** | When the finding was last detected |
| **Failing For** | Duration the resource has been in a failing state |
![Finding Groups expanded view](/images/finding-groups-expanded.png)
### Resource Detail Drawer
Select any resource to open the detail drawer with full finding context:
- **Risk**: the security risk associated with this finding
- **Description**: detailed explanation of what was detected
- **Status Extended**: additional status information and context
- **Remediation**: step-by-step guidance to resolve the issue
- **View in Prowler Hub**: direct link to explore the check in Prowler Hub
- **Analyze This Finding With Lighthouse AI**: one-click AI-powered analysis for deeper insights
![Finding Groups resource detail drawer](/images/finding-groups-drawer.png)
### Bulk Actions
Bulk-mute an entire group instead of chasing duplicates across the list. This is especially useful for:
- Known false positives that appear across many resources
- Findings in development or test environments
- Accepted risks that have been documented and approved
<Warning>
Muting findings does not resolve underlying security issues. Review each finding carefully before muting to ensure it represents an acceptable risk or has been properly addressed.
</Warning>
## Other Findings for This Resource
Inside the resource detail drawer, the **Other Findings For This Resource** tab lists every finding that hits the same resource — passing, failing, and muted — alongside the one currently being reviewed.
![Other Findings For This Resource tab](/images/finding-groups-other-findings.png)
### Why This Matters
When reviewing "WAF not enabled" on a Vercel project, the tab immediately shows:
- Skew protection status
- Rate limiting configuration
- IP blocking settings
- Custom firewall rules
- Password protection findings
All for that same project, without navigating back to the main list and filtering by resource UID.
### Complete Context Within the Drawer
Pair the Other Findings tab with:
- **Scans tab**: scan history for this resource
- **Events tab**: changes and events over time
This provides full context without leaving the drawer.
## Best Practices
1. **Start with high severity groups**: focus on critical and high severity groups first for maximum impact.
2. **Use filters strategically**: filter by provider or status at the group level to narrow the triage scope.
3. **Leverage bulk mute**: when a finding represents a confirmed false positive, mute the entire group at once.
4. **Check related findings**: review the Other Findings tab to understand the full security posture of a resource.
5. **Track failure duration**: use the "Failing For" column to prioritize long-standing issues that may indicate systemic problems.
## Getting Started
1. Navigate to the **Findings** section in Prowler Cloud/App.
2. Toggle to the **Grouped View** to see findings organized by check.
3. Select any group row to expand and see affected resources.
4. Select a resource to open the detail drawer with full context.
5. Use the **Other Findings For This Resource** tab to see all findings for that resource.
@@ -25,8 +25,7 @@ Behind the scenes, Lighthouse AI works as follows:
Lighthouse AI supports multiple LLM providers including OpenAI, Amazon Bedrock, and OpenAI-compatible services. For configuration details, see [Using Multiple LLM Providers with Lighthouse](/user-guide/tutorials/prowler-app-lighthouse-multi-llm).
</Note>
<img className="block dark:hidden" src="/images/lighthouse-architecture-light.png" alt="Prowler Lighthouse Architecture" />
<img className="hidden dark:block" src="/images/lighthouse-architecture-dark.png" alt="Prowler Lighthouse Architecture" />
![Prowler Lighthouse Architecture](/images/lighthouse-architecture.png)
<Note>
Generated
+74 -20
View File
@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.3.3 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.3.4 and should not be changed by hand.
[[package]]
name = "about-time"
@@ -1267,19 +1267,19 @@ typing-extensions = ">=4.6.0"
[[package]]
name = "azure-mgmt-resource"
version = "23.3.0"
version = "24.0.0"
description = "Microsoft Azure Resource Management Client Library for Python"
optional = false
python-versions = ">=3.8"
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "azure_mgmt_resource-23.3.0-py3-none-any.whl", hash = "sha256:ab216ee28e29db6654b989746e0c85a1181f66653929d2cb6e48fba66d9af323"},
{file = "azure_mgmt_resource-23.3.0.tar.gz", hash = "sha256:fc4f1fd8b6aad23f8af4ed1f913df5f5c92df117449dc354fea6802a2829fea4"},
{file = "azure_mgmt_resource-24.0.0-py3-none-any.whl", hash = "sha256:27b32cd223e2784269f5a0db3c282042886ee4072d79cedc638438ece7cd0df4"},
{file = "azure_mgmt_resource-24.0.0.tar.gz", hash = "sha256:cf6b8995fcdd407ac9ff1dd474087129429a1d90dbb1ac77f97c19b96237b265"},
]
[package.dependencies]
azure-common = ">=1.1"
azure-mgmt-core = ">=1.3.2"
azure-mgmt-core = ">=1.5.0"
isodate = ">=0.6.1"
typing-extensions = ">=4.6.0"
@@ -1425,6 +1425,64 @@ typing-extensions = ">=4.6.0"
[package.extras]
aio = ["azure-core[aio] (>=1.30.0)"]
[[package]]
name = "backports-datetime-fromisoformat"
version = "2.0.3"
description = "Backport of Python 3.11's datetime.fromisoformat"
optional = false
python-versions = ">3"
groups = ["dev"]
markers = "python_version == \"3.10\""
files = [
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5f681f638f10588fa3c101ee9ae2b63d3734713202ddfcfb6ec6cea0778a29d4"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:cd681460e9142f1249408e5aee6d178c6d89b49e06d44913c8fdfb6defda8d1c"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:ee68bc8735ae5058695b76d3bb2aee1d137c052a11c8303f1e966aa23b72b65b"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8273fe7932db65d952a43e238318966eab9e49e8dd546550a41df12175cc2be4"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39d57ea50aa5a524bb239688adc1d1d824c31b6094ebd39aa164d6cadb85de22"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ac6272f87693e78209dc72e84cf9ab58052027733cd0721c55356d3c881791cf"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:44c497a71f80cd2bcfc26faae8857cf8e79388e3d5fbf79d2354b8c360547d58"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:6335a4c9e8af329cb1ded5ab41a666e1448116161905a94e054f205aa6d263bc"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2e4b66e017253cdbe5a1de49e0eecff3f66cd72bcb1229d7db6e6b1832c0443"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:43e2d648e150777e13bbc2549cc960373e37bf65bd8a5d2e0cef40e16e5d8dd0"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:4ce6326fd86d5bae37813c7bf1543bae9e4c215ec6f5afe4c518be2635e2e005"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7c8fac333bf860208fd522a5394369ee3c790d0aa4311f515fcc4b6c5ef8d75"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24a4da5ab3aa0cc293dc0662a0c6d1da1a011dc1edcbc3122a288cfed13a0b45"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:58ea11e3bf912bd0a36b0519eae2c5b560b3cb972ea756e66b73fb9be460af01"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8a375c7dbee4734318714a799b6c697223e4bbb57232af37fbfff88fb48a14c6"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:ac677b1664c4585c2e014739f6678137c8336815406052349c85898206ec7061"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66ce47ee1ba91e146149cf40565c3d750ea1be94faf660ca733d8601e0848147"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:8b7e069910a66b3bba61df35b5f879e5253ff0821a70375b9daf06444d046fa4"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_x86_64.whl", hash = "sha256:a3b5d1d04a9e0f7b15aa1e647c750631a873b298cdd1255687bb68779fe8eb35"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec1b95986430e789c076610aea704db20874f0781b8624f648ca9fb6ef67c6e1"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffe5f793db59e2f1d45ec35a1cf51404fdd69df9f6952a0c87c3060af4c00e32"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:620e8e73bd2595dfff1b4d256a12b67fce90ece3de87b38e1dde46b910f46f4d"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4cf9c0a985d68476c1cabd6385c691201dda2337d7453fb4da9679ce9f23f4e7"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:d144868a73002e6e2e6fef72333e7b0129cecdd121aa8f1edba7107fd067255d"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e81b26497a17c29595bc7df20bc6a872ceea5f8c9d6537283945d4b6396aec10"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:5ba00ead8d9d82fd6123eb4891c566d30a293454e54e32ff7ead7644f5f7e575"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:24d574cb4072e1640b00864e94c4c89858033936ece3fc0e1c6f7179f120d0a8"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9735695a66aad654500b0193525e590c693ab3368478ce07b34b443a1ea5e824"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63d39709e17eb72685d052ac82acf0763e047f57c86af1b791505b1fec96915d"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:1ea2cc84224937d6b9b4c07f5cb7c667f2bde28c255645ba27f8a675a7af8234"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4024e6d35a9fdc1b3fd6ac7a673bd16cb176c7e0b952af6428b7129a70f72cce"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5e2dcc94dc9c9ab8704409d86fcb5236316e9dcef6feed8162287634e3568f4c"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fa2de871801d824c255fac7e5e7e50f2be6c9c376fd9268b40c54b5e9da91f42"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:1314d4923c1509aa9696712a7bc0c7160d3b7acf72adafbbe6c558d523f5d491"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:b750ecba3a8815ad8bc48311552f3f8ab99dd2326d29df7ff670d9c49321f48f"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d5117dce805d8a2f78baeddc8c6127281fa0a5e2c40c6dd992ba6b2b367876"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb35f607bd1cbe37b896379d5f5ed4dc298b536f4b959cb63180e05cacc0539d"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:61c74710900602637d2d145dda9720c94e303380803bf68811b2a151deec75c2"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ece59af54ebf67ecbfbbf3ca9066f5687879e36527ad69d8b6e3ac565d565a62"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:d0a7c5f875068efe106f62233bc712d50db4d07c13c7db570175c7857a7b5dbd"},
{file = "backports_datetime_fromisoformat-2.0.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90e202e72a3d5aae673fcc8c9a4267d56b2f532beeb9173361293625fe4d2039"},
{file = "backports_datetime_fromisoformat-2.0.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2df98ef1b76f5a58bb493dda552259ba60c3a37557d848e039524203951c9f06"},
{file = "backports_datetime_fromisoformat-2.0.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7100adcda5e818b5a894ad0626e38118bb896a347f40ebed8981155675b9ba7b"},
{file = "backports_datetime_fromisoformat-2.0.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e410383f5d6a449a529d074e88af8bc80020bb42b402265f9c02c8358c11da5"},
{file = "backports_datetime_fromisoformat-2.0.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2797593760da6bcc32c4a13fa825af183cd4bfd333c60b3dbf84711afca26ef"},
{file = "backports_datetime_fromisoformat-2.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35a144fd681a0bea1013ccc4cd3fd4dc758ea17ee23dca019c02b82ec46fc0c4"},
{file = "backports_datetime_fromisoformat-2.0.3.tar.gz", hash = "sha256:b58edc8f517b66b397abc250ecc737969486703a66eb97e01e6d51291b1a139d"},
]
[[package]]
name = "bandit"
version = "1.8.3"
@@ -3350,23 +3408,19 @@ files = [
[[package]]
name = "marshmallow"
version = "3.26.2"
version = "4.3.0"
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
optional = false
python-versions = ">=3.9"
python-versions = ">=3.10"
groups = ["dev"]
files = [
{file = "marshmallow-3.26.2-py3-none-any.whl", hash = "sha256:013fa8a3c4c276c24d26d84ce934dc964e2aa794345a0f8c7e5a7191482c8a73"},
{file = "marshmallow-3.26.2.tar.gz", hash = "sha256:bbe2adb5a03e6e3571b573f42527c6fe926e17467833660bebd11593ab8dfd57"},
{file = "marshmallow-4.3.0-py3-none-any.whl", hash = "sha256:46c4fe6984707e3cbd485dfebbf0a59874f58d695aad05c1668d15e8c6e13b46"},
{file = "marshmallow-4.3.0.tar.gz", hash = "sha256:fb43c53b3fe240b8f6af37223d6ef1636f927ad9bea8ab323afad95dff090880"},
]
[package.dependencies]
packaging = ">=17.0"
[package.extras]
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"]
docs = ["autodocsumm (==0.2.14)", "furo (==2024.8.6)", "sphinx (==8.1.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.0)", "sphinxext-opengraph (==0.9.1)"]
tests = ["pytest", "simplejson"]
backports-datetime-fromisoformat = {version = "*", markers = "python_version < \"3.11\""}
typing-extensions = {version = "*", markers = "python_version < \"3.11\""}
[[package]]
name = "mccabe"
@@ -3662,14 +3716,14 @@ dev = ["bumpver", "isort", "mypy", "pylint", "pytest", "yapf"]
[[package]]
name = "msgraph-sdk"
version = "1.23.0"
version = "1.55.0"
description = "The Microsoft Graph Python SDK"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "msgraph_sdk-1.23.0-py3-none-any.whl", hash = "sha256:58e0047b4ca59fd82022c02cd73fec0170a3d84f3b76721e3db2a0314df9a58a"},
{file = "msgraph_sdk-1.23.0.tar.gz", hash = "sha256:6dd1ba9a46f5f0ce8599fd9610133adbd9d1493941438b5d3632fce9e55ed607"},
{file = "msgraph_sdk-1.55.0-py3-none-any.whl", hash = "sha256:c8e68ebc4b88af5111de312e7fa910a4e76ddf48a4534feadb1fb8a411c48cfc"},
{file = "msgraph_sdk-1.55.0.tar.gz", hash = "sha256:6df691a31954a050d26b8a678968017e157d940fb377f2a8a4e17a9741b98756"},
]
[package.dependencies]
@@ -6681,4 +6735,4 @@ files = [
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.13"
content-hash = "786921163bb46716defae1d9de1df001af2abf17edd3061165638707bcd28ce4"
content-hash = "09ce4507a464b318702ed8c6a738f3bb1bc4cc6ff5a50a9c2884f560af9ab034"
+35 -11
View File
@@ -2,7 +2,25 @@
All notable changes to the **Prowler SDK** are documented in this file.
## [5.24.0] (Prowler UNRELEASED)
## [5.25.0] (Prowler UNRELEASED)
### 🔄 Changed
- bumped `msgraph-sdk` from 1.23.0 to 1.55.0 and `azure-mgmt-resource` from 23.3.0 to 24.0.0, removing `marshmallow` as is a transitively dev dependency [(#10733)](https://github.com/prowler-cloud/prowler/pull/10733)
---
## [5.24.1] (Prowler UNRELEASED)
### 🐞 Fixed
- Cloudflare account-scoped API tokens failing connection test in the App with `CloudflareUserTokenRequiredError` [(#10723)](https://github.com/prowler-cloud/prowler/pull/10723)
- `prowler image --registry` failing with `ImageNoImagesProvidedError` due to registry arguments not being forwarded to `ImageProvider` in `init_global_provider` [(#10470)](https://github.com/prowler-cloud/prowler/pull/10470)
- Google Workspace Calendar checks false FAIL on unconfigured settings with secure Google defaults [(#10726)](https://github.com/prowler-cloud/prowler/pull/10726)
---
## [5.24.0] (Prowler v5.24.0)
### 🚀 Added
@@ -13,16 +31,23 @@ All notable changes to the **Prowler SDK** are documented in this file.
- `iam_role_access_not_stale_to_bedrock` and `iam_user_access_not_stale_to_bedrock` checks for AWS provider [(#10536)](https://github.com/prowler-cloud/prowler/pull/10536)
- `iam_policy_no_wildcard_marketplace_subscribe` and `iam_inline_policy_no_wildcard_marketplace_subscribe` checks for AWS provider [(#10525)](https://github.com/prowler-cloud/prowler/pull/10525)
- `bedrock_vpc_endpoints_configured` check for AWS provider [(#10591)](https://github.com/prowler-cloud/prowler/pull/10591)
- `exchange_organization_delicensing_resiliency_enabled` check for m365 provider [(#10608)](https://github.com/prowler-cloud/prowler/pull/10608)
- `exchange_organization_delicensing_resiliency_enabled` check for M365 provider [(#10608)](https://github.com/prowler-cloud/prowler/pull/10608)
- `entra_conditional_access_policy_mfa_enforced_for_guest_users` check for M365 provider [(#10616)](https://github.com/prowler-cloud/prowler/pull/10616)
- `entra_conditional_access_policy_corporate_device_sign_in_frequency_enforced` check for m365 provider [(#10618)](https://github.com/prowler-cloud/prowler/pull/10618)
- `entra_conditional_access_policy_block_unknown_device_platforms` check for m365 provider [(#10615)](https://github.com/prowler-cloud/prowler/pull/10615)
- `entra_conditional_access_policy_corporate_device_sign_in_frequency_enforced` check for M365 provider [(#10618)](https://github.com/prowler-cloud/prowler/pull/10618)
- `entra_conditional_access_policy_block_unknown_device_platforms` check for M365 provider [(#10615)](https://github.com/prowler-cloud/prowler/pull/10615)
- `--excluded-region` CLI flag, `PROWLER_AWS_DISALLOWED_REGIONS` environment variable, and `aws.disallowed_regions` config entry to skip specific AWS regions during scans [(#10688)](https://github.com/prowler-cloud/prowler/pull/10688)
### 🔄 Changed
- Bump Poetry to `2.3.4` and consolidate SDK workflows onto the `setup-python-poetry` composite action with opt-in lockfile regeneration [(#10681)](https://github.com/prowler-cloud/prowler/pull/10681)
- Normalize Conditional Access platform values in Entra models and simplify platform-based checks [(#10635)](https://github.com/prowler-cloud/prowler/pull/10635)
### 🐞 Fixed
- `prowler image --registry-list` crashes with `AttributeError` because `ImageProvider.__init__` returns early before registering the global provider [(#10691)](https://github.com/prowler-cloud/prowler/pull/10691)
- Vercel firewall config handling for team-scoped projects and current API response shapes [(#10695)](https://github.com/prowler-cloud/prowler/pull/10695)
- Google Workspace Drive checks false FAIL on unconfigured settings with secure Google defaults [(#10727)](https://github.com/prowler-cloud/prowler/pull/10727)
---
## [5.23.0] (Prowler v5.23.0)
@@ -72,7 +97,6 @@ All notable changes to the **Prowler SDK** are documented in this file.
- Oracle Cloud `kms_key_rotation_enabled` now checks current key version age to avoid false positives on vaults without auto-rotation support [(#10450)](https://github.com/prowler-cloud/prowler/pull/10450)
- OCI filestorage, blockstorage, KMS, and compute services now honor `--region` for scanning outside the tenancy home region [(#10472)](https://github.com/prowler-cloud/prowler/pull/10472)
- OCI provider now supports multi-region filtering via `--region` [(#10473)](https://github.com/prowler-cloud/prowler/pull/10473)
- `prowler image --registry` failing with `ImageNoImagesProvidedError` due to registry arguments not being forwarded to `ImageProvider` in `init_global_provider` [(#10470)](https://github.com/prowler-cloud/prowler/pull/10470)
- OCI multi-region support for identity client configuration in blockstorage, identity, and filestorage services [(#10520)](https://github.com/prowler-cloud/prowler/pull/10520)
- Google Workspace Calendar checks now filter for customer-level policies only, skipping OU and group overrides that could produce incorrect audit results [(#10658)](https://github.com/prowler-cloud/prowler/pull/10658)
@@ -787,7 +811,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
- S3 `test_connection` uses AWS S3 API `HeadBucket` instead of `GetBucketLocation` [(#8456)](https://github.com/prowler-cloud/prowler/pull/8456)
- Add more validations to Azure Storage models when some values are None to avoid serialization issues [(#8325)](https://github.com/prowler-cloud/prowler/pull/8325)
- `sns_topics_not_publicly_accessible` false positive with `aws:SourceArn` conditions [(#8326)](https://github.com/prowler-cloud/prowler/issues/8326)
- Remove typo from description req 1.2.3 - Prowler ThreatScore m365 [(#8384)](https://github.com/prowler-cloud/prowler/pull/8384)
- Remove typo from description req 1.2.3 - Prowler ThreatScore M365 [(#8384)](https://github.com/prowler-cloud/prowler/pull/8384)
- Way of counting FAILED/PASS reqs from `kisa_isms_p_2023_aws` table [(#8382)](https://github.com/prowler-cloud/prowler/pull/8382)
- Use default tenant domain instead of first domain in list for Azure and M365 providers [(#8402)](https://github.com/prowler-cloud/prowler/pull/8402)
- Avoid multiple module error calls in M365 provider [(#8353)](https://github.com/prowler-cloud/prowler/pull/8353)
@@ -828,7 +852,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
- Title & description wording for `iam_user_accesskey_unused` check for AWS provider [(#8233)](https://github.com/prowler-cloud/prowler/pull/8233)
- Add GitHub provider to lateral panel in documentation and change -h environment variable output [(#8246)](https://github.com/prowler-cloud/prowler/pull/8246)
- Show `m365_identity_type` and `m365_identity_id` in cloud reports [(#8247)](https://github.com/prowler-cloud/prowler/pull/8247)
- Show `M365_identity_type` and `M365_identity_id` in cloud reports [(#8247)](https://github.com/prowler-cloud/prowler/pull/8247)
- Ensure `is_service_role` only returns `True` for service roles [(#8274)](https://github.com/prowler-cloud/prowler/pull/8274)
- Update DynamoDB check metadata to fix broken link [(#8273)](https://github.com/prowler-cloud/prowler/pull/8273)
- Show correct count of findings in Dashboard Security Posture page [(#8270)](https://github.com/prowler-cloud/prowler/pull/8270)
@@ -950,9 +974,9 @@ All notable changes to the **Prowler SDK** are documented in this file.
### Fixed
- `m365_powershell test_credentials` to use sanitized credentials [(#7761)](https://github.com/prowler-cloud/prowler/pull/7761)
- `M365_powershell test_credentials` to use sanitized credentials [(#7761)](https://github.com/prowler-cloud/prowler/pull/7761)
- `admincenter_users_admins_reduced_license_footprint` check logic to pass when admin user has no license [(#7779)](https://github.com/prowler-cloud/prowler/pull/7779)
- `m365_powershell` to close the PowerShell sessions in msgraph services [(#7816)](https://github.com/prowler-cloud/prowler/pull/7816)
- `M365_powershell` to close the PowerShell sessions in msgraph services [(#7816)](https://github.com/prowler-cloud/prowler/pull/7816)
- `defender_ensure_notify_alerts_severity_is_high`check to accept high or lower severity [(#7862)](https://github.com/prowler-cloud/prowler/pull/7862)
- Replace `Directory.Read.All` permission with `Domain.Read.All` which is more restrictive [(#7888)](https://github.com/prowler-cloud/prowler/pull/7888)
- Split calls to list Azure Functions attributes [(#7778)](https://github.com/prowler-cloud/prowler/pull/7778)
@@ -1026,7 +1050,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
- New check `teams_meeting_chat_anonymous_users_disabled` [(#7579)](https://github.com/prowler-cloud/prowler/pull/7579)
- Prowler Threat Score Compliance Framework [(#7603)](https://github.com/prowler-cloud/prowler/pull/7603)
- Documentation for M365 provider [(#7622)](https://github.com/prowler-cloud/prowler/pull/7622)
- Support for m365 provider in Prowler Dashboard [(#7633)](https://github.com/prowler-cloud/prowler/pull/7633)
- Support for M365 provider in Prowler Dashboard [(#7633)](https://github.com/prowler-cloud/prowler/pull/7633)
- New check for Modern Authentication enabled for Exchange Online in M365 [(#7636)](https://github.com/prowler-cloud/prowler/pull/7636)
- New check `sharepoint_onedrive_sync_restricted_unmanaged_devices` [(#7589)](https://github.com/prowler-cloud/prowler/pull/7589)
- New check for Additional Storage restricted for Exchange in M365 [(#7638)](https://github.com/prowler-cloud/prowler/pull/7638)
@@ -1036,7 +1060,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
- New check for MailTips full enabled for Exchange in M365 [(#7637)](https://github.com/prowler-cloud/prowler/pull/7637)
- New check for Comprehensive Attachments Filter Applied for Defender in M365 [(#7661)](https://github.com/prowler-cloud/prowler/pull/7661)
- Modified check `exchange_mailbox_properties_auditing_enabled` to make it configurable [(#7662)](https://github.com/prowler-cloud/prowler/pull/7662)
- snapshots to m365 documentation [(#7673)](https://github.com/prowler-cloud/prowler/pull/7673)
- snapshots to M365 documentation [(#7673)](https://github.com/prowler-cloud/prowler/pull/7673)
- support for static credentials for sending findings to Amazon S3 and AWS Security Hub [(#7322)](https://github.com/prowler-cloud/prowler/pull/7322)
- Prowler ThreatScore for M365 provider [(#7692)](https://github.com/prowler-cloud/prowler/pull/7692)
- Microsoft User and User Credential auth to reports [(#7681)](https://github.com/prowler-cloud/prowler/pull/7681)
+11 -3
View File
@@ -69,11 +69,11 @@ from prowler.lib.outputs.compliance.cis.cis_gcp import GCPCIS
from prowler.lib.outputs.compliance.cis.cis_github import GithubCIS
from prowler.lib.outputs.compliance.cis.cis_googleworkspace import GoogleWorkspaceCIS
from prowler.lib.outputs.compliance.cis.cis_kubernetes import KubernetesCIS
from prowler.lib.outputs.compliance.cis.cis_m365 import M365CIS
from prowler.lib.outputs.compliance.cis.cis_oraclecloud import OracleCloudCIS
from prowler.lib.outputs.compliance.cisa_scuba.cisa_scuba_googleworkspace import (
GoogleWorkspaceCISASCuBA,
)
from prowler.lib.outputs.compliance.cis.cis_m365 import M365CIS
from prowler.lib.outputs.compliance.cis.cis_oraclecloud import OracleCloudCIS
from prowler.lib.outputs.compliance.compliance import display_compliance_table
from prowler.lib.outputs.compliance.csa.csa_alibabacloud import AlibabaCloudCSA
from prowler.lib.outputs.compliance.csa.csa_aws import AWSCSA
@@ -293,6 +293,10 @@ def prowler():
if not args.only_logs:
global_provider.print_credentials()
# --registry-list: listing already printed during provider init, exit
if getattr(global_provider, "_listing_only", False):
sys.exit()
# Skip service and check loading for external-tool providers
if provider not in EXTERNAL_TOOL_PROVIDERS:
# Import custom checks from folder
@@ -1311,8 +1315,12 @@ def prowler():
global_provider.identity.audited_regions,
)
if not global_provider.identity.audited_regions
else global_provider.identity.audited_regions
else set(global_provider.identity.audited_regions)
)
if global_provider._enabled_regions is not None:
security_hub_regions = security_hub_regions.intersection(
global_provider._enabled_regions
)
security_hub = SecurityHub(
aws_account_id=global_provider.identity.account,
+1 -1
View File
@@ -38,7 +38,7 @@ class _MutableTimestamp:
timestamp = _MutableTimestamp(datetime.today())
timestamp_utc = _MutableTimestamp(datetime.now(timezone.utc))
prowler_version = "5.24.0"
prowler_version = "5.25.0"
html_logo_url = "https://github.com/prowler-cloud/prowler/"
square_logo_img = "https://raw.githubusercontent.com/prowler-cloud/prowler/dc7d2d5aeb92fdf12e8604f42ef6472cd3e8e889/docs/img/prowler-logo-black.png"
aws_logo = "https://user-images.githubusercontent.com/38561120/235953920-3e3fba08-0795-41dc-b480-9bea57db9f2e.png"
+4
View File
@@ -3,6 +3,10 @@ aws:
# AWS Global Configuration
# aws.mute_non_default_regions --> Set to True to muted failed findings in non-default regions for AccessAnalyzer, GuardDuty, SecurityHub, DRS and Config
mute_non_default_regions: False
# aws.disallowed_regions --> List of AWS regions to exclude from the scan.
# Also settable via the PROWLER_AWS_DISALLOWED_REGIONS environment variable or
# the --excluded-region CLI flag. Precedence: CLI > env var > config file.
# disallowed_regions: []
# If you want to mute failed findings only in specific regions, create a file with the following syntax and run it with `prowler aws -w mutelist.yaml`:
# Mutelist:
# Accounts:
+136 -41
View File
@@ -111,6 +111,7 @@ class AwsProvider(Provider):
mfa: bool = False,
profile: str = None,
regions: set = set(),
excluded_regions: set = None,
organizations_role_arn: str = None,
scan_unused_services: bool = False,
resource_tags: list[str] = [],
@@ -136,6 +137,10 @@ class AwsProvider(Provider):
- mfa: A boolean indicating whether MFA is enabled.
- profile: The name of the AWS CLI profile to use.
- regions: A set of regions to audit.
- excluded_regions: A set of regions to skip during the scan. Applied
on top of `regions` and of the account's enabled regions. Also
settable via the PROWLER_AWS_DISALLOWED_REGIONS environment variable
or the `disallowed_regions` key in the provider config file.
- organizations_role_arn: The ARN of the AWS Organizations IAM role to assume.
- scan_unused_services: A boolean indicating whether to scan unused services. False by default.
- resource_tags: A list of tags to filter the resources to audit.
@@ -190,6 +195,33 @@ class AwsProvider(Provider):
logger.info("Initializing AWS provider ...")
# Load provider config early because provider-level settings can affect
# bootstrap region selection before the scan starts.
if config_content is not None:
self._audit_config = config_content
else:
if not config_path:
config_path = default_config_file_path
self._audit_config = load_and_validate_config_file(self._type, config_path)
excluded_regions = self.resolve_excluded_regions(
excluded_regions, self._audit_config
)
# Normalize excluded_regions and prune the include-list up front so
# every downstream consumer (identity, STS region, service/region
# enumeration) sees an already-filtered view.
if excluded_regions and regions:
regions = set(regions) - excluded_regions
if not regions:
raise AWSArgumentTypeValidationError(
message=(
"All requested AWS regions are excluded by the "
"disallowed regions configuration."
),
file=pathlib.Path(__file__).name,
)
######## AWS Session
logger.info("Generating original session ...")
@@ -215,7 +247,7 @@ class AwsProvider(Provider):
# After the session is created, validate it
logger.info("Validating credentials ...")
sts_region = get_aws_region_for_sts(
self.session.current_session.region_name, regions
self.session.current_session.region_name, regions, excluded_regions
)
# Validate the credentials
@@ -229,7 +261,9 @@ class AwsProvider(Provider):
######## AWS Provider Identity
# Get profile region
profile_region = self.get_profile_region(self._session.current_session)
profile_region = self.get_profile_region(
self._session.current_session, excluded_regions
)
# Set identity
self._identity = self.set_identity(
@@ -332,7 +366,26 @@ class AwsProvider(Provider):
)
########
# Parse Scan Tags
# Get Enabled Regions
self._enabled_regions = self.get_aws_enabled_regions(
self._session.current_session
)
# Apply the exclusion to the account's enabled regions. This is the
# gate used by generate_regional_clients, so skipped regions never get
# a boto3 client created for them and cannot stall the scan.
if excluded_regions:
if self._enabled_regions is not None:
self._enabled_regions = self._enabled_regions - excluded_regions
if self._identity.audited_regions:
self._identity.audited_regions = (
set(self._identity.audited_regions) - excluded_regions
)
logger.info(f"Excluding AWS regions from scan: {sorted(excluded_regions)}")
self._excluded_regions = excluded_regions
# Parse Scan Tags after region exclusions are applied so tag discovery
# also skips disallowed regions.
if resource_tags:
self._audit_resources = self.get_tagged_resources(resource_tags)
@@ -340,22 +393,9 @@ class AwsProvider(Provider):
if resource_arn:
self._audit_resources = resource_arn
# Get Enabled Regions
self._enabled_regions = self.get_aws_enabled_regions(
self._session.current_session
)
# Set ignore unused services
self._scan_unused_services = scan_unused_services
# Audit Config
if config_content:
self._audit_config = config_content
else:
if not config_path:
config_path = default_config_file_path
self._audit_config = load_and_validate_config_file(self._type, config_path)
# Fixer Config
self._fixer_config = fixer_config
@@ -468,12 +508,53 @@ class AwsProvider(Provider):
)
@staticmethod
def get_profile_region(session: Session):
profile_region = AWS_REGION_US_EAST_1
if session.region_name:
profile_region = session.region_name
def resolve_excluded_regions(
excluded_regions: set | list | tuple | None,
audit_config: dict | None,
) -> set[str]:
"""Resolve AWS region exclusions with precedence arg > env > config."""
if excluded_regions is not None:
raw_regions = excluded_regions
else:
raw_regions = Provider.get_excluded_regions_from_env()
if not raw_regions and isinstance(audit_config, dict):
raw_regions = audit_config.get("disallowed_regions") or []
return profile_region
return {str(region).strip() for region in raw_regions if str(region).strip()}
@staticmethod
def get_bootstrap_region_candidates(session_region: str | None) -> tuple[str, ...]:
"""Return safe fallback regions for bootstrap AWS calls."""
if session_region:
if session_region.startswith("cn-"):
return ("cn-north-1", "cn-northwest-1")
if session_region.startswith("us-gov-"):
return ("us-gov-east-1", "us-gov-west-1")
if session_region.startswith("eusc-"):
return ("eusc-de-east-1",)
if session_region.startswith("us-iso"):
return (session_region,)
return (AWS_STS_GLOBAL_ENDPOINT_REGION, "us-east-2", "us-west-2", "eu-west-1")
@staticmethod
def get_profile_region(
session: Session, excluded_regions: set[str] | None = None
) -> str:
excluded_regions = set(excluded_regions or ())
session_region = session.region_name
if session_region and session_region not in excluded_regions:
return session_region
for region in AwsProvider.get_bootstrap_region_candidates(session_region):
if region not in excluded_regions:
if session_region and session_region != region:
logger.info(
f"Configured AWS profile region {session_region} is excluded; using {region} for bootstrap clients."
)
return region
return session_region or AWS_REGION_US_EAST_1
@staticmethod
def set_identity(
@@ -701,12 +782,15 @@ class AwsProvider(Provider):
Caller Identity ARN: arn:aws:iam::123456789012:user/prowler
```
"""
# Beautify audited regions, set "all" if there is no filter region
regions = (
", ".join(self._identity.audited_regions)
if self._identity.audited_regions is not None
else "all"
)
# Beautify audited regions. If the scan includes all regions but some
# are explicitly excluded, reflect that in the banner instead of
# showing the misleading "all" label.
if self._identity.audited_regions:
regions = ", ".join(sorted(self._identity.audited_regions))
elif getattr(self, "_excluded_regions", None):
regions = f"all except {', '.join(sorted(self._excluded_regions))}"
else:
regions = "all"
# Beautify audited profile, set "default" if there is no profile set
profile = (
self._identity.profile if self._identity.profile is not None else "default"
@@ -745,6 +829,8 @@ class AwsProvider(Provider):
service_regions = AwsProvider.get_available_aws_service_regions(
service, self._identity.partition, self._identity.audited_regions
)
if getattr(self, "_excluded_regions", None):
service_regions = service_regions - self._excluded_regions
# Get the regions enabled for the account and get the intersection with the service available regions
if self._enabled_regions is not None:
@@ -962,6 +1048,8 @@ class AwsProvider(Provider):
service_regions = AwsProvider.get_available_aws_service_regions(
service, self._identity.partition, self._identity.audited_regions
)
if getattr(self, "_excluded_regions", None):
service_regions = service_regions - self._excluded_regions
default_region = self.get_global_region()
# global region of the partition when all regions are audited and there is no profile region
if self._identity.profile_region in service_regions:
@@ -1565,13 +1653,19 @@ def read_aws_regions_file() -> dict:
# TODO: This can be moved to another class since it doesn't need self
def get_aws_region_for_sts(session_region: str, regions: set[str]) -> str:
def get_aws_region_for_sts(
session_region: str,
regions: set[str],
excluded_regions: set[str] | None = None,
) -> str:
"""
Get the AWS region for the STS Assume Role operation.
Args:
- session_region (str): The region configured in the AWS session.
- regions (set[str]): The regions passed with the -f/--region/--filter-region option.
- excluded_regions (set[str] | None): Regions that should be avoided for
bootstrap calls when possible.
Returns:
str: The AWS region for the STS Assume Role operation
@@ -1579,20 +1673,21 @@ def get_aws_region_for_sts(session_region: str, regions: set[str]) -> str:
Example:
aws_region = get_aws_region_for_sts(session_region, regions)
"""
# If there is no region passed with -f/--region/--filter-region
if regions is None or len(regions) == 0:
# If you have a region configured in your AWS config or credentials file
if session_region is not None:
aws_region = session_region
else:
# If there is no region set passed with -f/--region
# we use the Global STS Endpoint Region, us-east-1
aws_region = AWS_STS_GLOBAL_ENDPOINT_REGION
else:
# Get the first region passed to the -f/--region
aws_region = list(regions)[0]
excluded_regions = set(excluded_regions or ())
return aws_region
if regions:
for region in regions:
if region not in excluded_regions:
return region
if session_region and session_region not in excluded_regions:
return session_region
for region in AwsProvider.get_bootstrap_region_candidates(session_region):
if region not in excluded_regions:
return region
return session_region or AWS_STS_GLOBAL_ENDPOINT_REGION
# TODO: this duplicates the provider arguments validation library
@@ -66,6 +66,16 @@ def init_parser(self):
help="AWS region names to run Prowler against",
choices=AwsProvider.get_regions(partition=None),
)
aws_regions_subparser.add_argument(
"--excluded-region",
"--excluded-regions",
nargs="+",
help=(
"AWS region names to exclude from the scan. Overrides the "
"PROWLER_AWS_DISALLOWED_REGIONS environment variable when set."
),
choices=AwsProvider.get_regions(partition=None),
)
# AWS Organizations
aws_orgs_subparser = aws_parser.add_argument_group("AWS Organizations")
aws_orgs_subparser.add_argument(
@@ -30,10 +30,12 @@ def quick_inventory(provider: AwsProvider, args):
ec2_client = provider.session.current_session.client(
"ec2", region_name=provider.identity.profile_region
)
excluded_regions = getattr(provider, "_excluded_regions", set())
# Get all the available regions
provider.identity.audited_regions = [
region["RegionName"]
for region in ec2_client.describe_regions()["Regions"]
if region["RegionName"] not in excluded_regions
]
with alive_bar(
@@ -332,19 +332,16 @@ class CloudflareProvider(Provider):
return
except PermissionDeniedError as error:
error_str = str(error)
# Check for user-level authentication required (code 9109)
if "9109" in error_str:
logger.error(f"CloudflareUserTokenRequiredError: {error}")
raise CloudflareUserTokenRequiredError(
file=os.path.basename(__file__),
)
# Check for invalid API key or email (code 9103) - comes as 403
if "9103" in error_str or "Unknown X-Auth-Key" in error_str:
logger.error(f"CloudflareInvalidAPIKeyError: {error}")
raise CloudflareInvalidAPIKeyError(
file=os.path.basename(__file__),
)
# For other permission errors, try accounts.list() as fallback
# For permission errors (including 9109 account-scoped tokens),
# try accounts.list() as fallback before failing.
# Error 9109 means the token is account-scoped, not user-level,
# which is valid for scanning — only fail if accounts.list() also fails.
logger.warning(
f"Unable to retrieve Cloudflare user info: {error}. "
"Trying accounts.list() as fallback."
+19
View File
@@ -1,4 +1,5 @@
import importlib
import os
import pkgutil
import sys
from abc import ABC, abstractmethod
@@ -135,6 +136,18 @@ class Provider(ABC):
"""
return set()
@staticmethod
def get_excluded_regions_from_env() -> set:
"""Parse the PROWLER_AWS_DISALLOWED_REGIONS environment variable.
The variable is a comma-separated list of region identifiers to skip
during scans (e.g. "me-south-1, ap-east-1"). Whitespace around entries
is tolerated and empty entries are dropped. Returns an empty set when
the variable is unset or contains no usable values.
"""
raw = os.environ.get("PROWLER_AWS_DISALLOWED_REGIONS", "")
return {region.strip() for region in raw.split(",") if region.strip()}
@staticmethod
def get_global_provider() -> "Provider":
return Provider._global
@@ -160,6 +173,11 @@ class Provider(ABC):
if not isinstance(Provider._global, provider_class):
if "aws" in provider_class_name.lower():
excluded_regions = (
set(arguments.excluded_region)
if getattr(arguments, "excluded_region", None)
else None
)
provider_class(
retries_max_attempts=arguments.aws_retries_max_attempts,
role_arn=arguments.role,
@@ -169,6 +187,7 @@ class Provider(ABC):
mfa=arguments.mfa,
profile=arguments.profile,
regions=set(arguments.region) if arguments.region else None,
excluded_regions=excluded_regions,
organizations_role_arn=arguments.organizations_role,
scan_unused_services=arguments.scan_unused_services,
resource_tags=arguments.resource_tag,
@@ -35,21 +35,20 @@ class calendar_external_invitations_warning(Check):
f"External invitation warnings for Google Calendar are enabled "
f"in domain {calendar_client.provider.identity.domain}."
)
elif warning_enabled is None:
report.status = "PASS"
report.status_extended = (
f"External invitation warnings for Google Calendar use Google's "
f"secure default configuration (enabled) "
f"in domain {calendar_client.provider.identity.domain}."
)
else:
report.status = "FAIL"
if warning_enabled is None:
report.status_extended = (
f"External invitation warnings for Google Calendar are not "
f"explicitly configured in domain "
f"{calendar_client.provider.identity.domain}. "
f"Users should be warned when inviting guests outside the organization."
)
else:
report.status_extended = (
f"External invitation warnings for Google Calendar are disabled "
f"in domain {calendar_client.provider.identity.domain}. "
f"Users should be warned when inviting guests outside the organization."
)
report.status_extended = (
f"External invitation warnings for Google Calendar are disabled "
f"in domain {calendar_client.provider.identity.domain}. "
f"Users should be warned when inviting guests outside the organization."
)
findings.append(report)
@@ -36,20 +36,20 @@ class calendar_external_sharing_primary_calendar(Check):
f"{calendar_client.provider.identity.domain} is restricted to "
f"free/busy information only."
)
elif sharing is None:
report.status = "PASS"
report.status_extended = (
f"Primary calendar external sharing uses Google's secure default "
f"configuration (free/busy only) "
f"in domain {calendar_client.provider.identity.domain}."
)
else:
report.status = "FAIL"
if sharing is None:
report.status_extended = (
f"Primary calendar external sharing is not explicitly configured "
f"in domain {calendar_client.provider.identity.domain}. "
f"External sharing should be restricted to free/busy information only."
)
else:
report.status_extended = (
f"Primary calendar external sharing in domain "
f"{calendar_client.provider.identity.domain} is set to {sharing}. "
f"External sharing should be restricted to free/busy information only."
)
report.status_extended = (
f"Primary calendar external sharing in domain "
f"{calendar_client.provider.identity.domain} is set to {sharing}. "
f"External sharing should be restricted to free/busy information only."
)
findings.append(report)
@@ -33,21 +33,20 @@ class drive_external_sharing_warn_users(Check):
f"External sharing warnings for Drive and Docs are enabled "
f"in domain {drive_client.provider.identity.domain}."
)
elif warning_enabled is None:
report.status = "PASS"
report.status_extended = (
f"External sharing warnings for Drive and Docs use Google's "
f"secure default configuration (enabled) "
f"in domain {drive_client.provider.identity.domain}."
)
else:
report.status = "FAIL"
if warning_enabled is None:
report.status_extended = (
f"External sharing warnings for Drive and Docs are not "
f"explicitly configured in domain "
f"{drive_client.provider.identity.domain}. "
f"Users should be warned when sharing files outside the organization."
)
else:
report.status_extended = (
f"External sharing warnings for Drive and Docs are disabled "
f"in domain {drive_client.provider.identity.domain}. "
f"Users should be warned when sharing files outside the organization."
)
report.status_extended = (
f"External sharing warnings for Drive and Docs are disabled "
f"in domain {drive_client.provider.identity.domain}. "
f"Users should be warned when sharing files outside the organization."
)
findings.append(report)
@@ -35,22 +35,21 @@ class drive_shared_drive_creation_allowed(Check):
f"Users in domain {drive_client.provider.identity.domain} "
f"are allowed to create new shared drives."
)
elif allow_creation is None:
report.status = "PASS"
report.status_extended = (
f"Shared drive creation uses Google's secure default "
f"configuration (allowed) "
f"in domain {drive_client.provider.identity.domain}."
)
else:
report.status = "FAIL"
if allow_creation is None:
report.status_extended = (
f"Shared drive creation is not explicitly configured in "
f"domain {drive_client.provider.identity.domain}. "
f"Users should be allowed to create new shared drives to avoid "
f"data loss when accounts are deleted."
)
else:
report.status_extended = (
f"Users in domain {drive_client.provider.identity.domain} "
f"are prevented from creating new shared drives. "
f"Users should be allowed to create new shared drives to avoid "
f"data loss when accounts are deleted."
)
report.status_extended = (
f"Users in domain {drive_client.provider.identity.domain} "
f"are prevented from creating new shared drives. "
f"Users should be allowed to create new shared drives to avoid "
f"data loss when accounts are deleted."
)
findings.append(report)
@@ -35,21 +35,21 @@ class drive_shared_drive_disable_download_print_copy(Check):
f"{drive_client.provider.identity.domain} is restricted to "
f"{allowed}."
)
elif allowed is None:
report.status = "PASS"
report.status_extended = (
f"Download, print, and copy restrictions for shared drives use "
f"Google's secure default configuration (disabled for viewers "
f"and commenters) "
f"in domain {drive_client.provider.identity.domain}."
)
else:
report.status = "FAIL"
if allowed is None:
report.status_extended = (
f"Download, print, and copy restrictions for shared drive "
f"viewers and commenters are not explicitly configured in "
f"domain {drive_client.provider.identity.domain}. "
f"These actions should be restricted to editors or managers only."
)
else:
report.status_extended = (
f"Download, print, and copy in shared drives in domain "
f"{drive_client.provider.identity.domain} is set to {allowed}. "
f"These actions should be restricted to editors or managers only."
)
report.status_extended = (
f"Download, print, and copy in shared drives in domain "
f"{drive_client.provider.identity.domain} is set to {allowed}. "
f"These actions should be restricted to editors or managers only."
)
findings.append(report)
@@ -36,21 +36,20 @@ class drive_warn_sharing_with_allowlisted_domains(Check):
f"Users are warned when sharing files with allowlisted "
f"domains in domain {drive_client.provider.identity.domain}."
)
elif warn_enabled is None:
report.status = "PASS"
report.status_extended = (
f"Warning when sharing with allowlisted domains uses Google's "
f"secure default configuration (enabled) "
f"in domain {drive_client.provider.identity.domain}."
)
else:
report.status = "FAIL"
if warn_enabled is None:
report.status_extended = (
f"Warning when sharing with allowlisted domains is not "
f"explicitly configured in domain "
f"{drive_client.provider.identity.domain}. "
f"Users should be warned when sharing files with users in allowlisted domains."
)
else:
report.status_extended = (
f"Warning when sharing with allowlisted domains is disabled "
f"in domain {drive_client.provider.identity.domain}. "
f"Users should be warned when sharing files with users in allowlisted domains."
)
report.status_extended = (
f"Warning when sharing with allowlisted domains is disabled "
f"in domain {drive_client.provider.identity.domain}. "
f"Users should be warned when sharing files with users in allowlisted domains."
)
findings.append(report)
+41 -33
View File
@@ -163,42 +163,50 @@ class ImageProvider(Provider):
# Registry scan mode: enumerate images from registry
if self.registry:
self._enumerate_registry()
if self._listing_only:
return
for image in self.images:
self._validate_image_name(image)
if not self.images:
raise ImageNoImagesProvidedError(
file=__file__,
message="No images provided for scanning.",
)
# Audit Config
if config_content:
self._audit_config = config_content
else:
if not config_path:
config_path = default_config_file_path
self._audit_config = load_and_validate_config_file(self._type, config_path)
# Fixer Config
self._fixer_config = fixer_config if fixer_config is not None else {}
# Mutelist (not needed for Image provider since Trivy has its own logic)
# Safe defaults for listing-only mode (overwritten below in scan mode)
self._audit_config = {}
self._fixer_config = {}
self._mutelist = None
self.audit_metadata = None
self.audit_metadata = Audit_Metadata(
provider=self._type,
account_id=self.audited_account,
account_name="image",
region=self.region,
services_scanned=0,
expected_checks=[],
completed_checks=0,
audit_progress=0,
)
# Skip scan setup for listing-only mode
if not self._listing_only:
for image in self.images:
self._validate_image_name(image)
if not self.images:
raise ImageNoImagesProvidedError(
file=__file__,
message="No images provided for scanning.",
)
# Audit Config
if config_content:
self._audit_config = config_content
else:
if not config_path:
config_path = default_config_file_path
self._audit_config = load_and_validate_config_file(
self._type, config_path
)
# Fixer Config
self._fixer_config = fixer_config if fixer_config is not None else {}
# Mutelist (not needed for Image provider since Trivy has its own logic)
self._mutelist = None
self.audit_metadata = Audit_Metadata(
provider=self._type,
account_id=self.audited_account,
account_name="image",
region=self.region,
services_scanned=0,
expected_checks=[],
completed_checks=0,
audit_progress=0,
)
Provider.set_global_provider(self)
@@ -55,6 +55,7 @@ class Project(VercelService):
# Parse password protection
pwd_protection = proj.get("passwordProtection")
security = proj.get("security", {}) or {}
self.projects[project_id] = VercelProject(
id=project_id,
@@ -75,6 +76,16 @@ class Project(VercelService):
git_fork_protection=proj.get("gitForkProtection", True),
git_repository=proj.get("link"),
secure_compute=proj.get("secureCompute"),
firewall_enabled=security.get("firewallEnabled"),
firewall_config_version=(
str(security.get("firewallConfigVersion"))
if security.get("firewallConfigVersion") is not None
else None
),
managed_rules=security.get(
"managedRules", security.get("managedRulesets")
),
bot_id_enabled=security.get("botIdEnabled"),
)
logger.info(f"Project - Found {len(self.projects)} project(s)")
@@ -160,4 +171,8 @@ class VercelProject(BaseModel):
git_fork_protection: bool = True
git_repository: Optional[dict] = None
secure_compute: Optional[dict] = None
firewall_enabled: Optional[bool] = None
firewall_config_version: Optional[str] = None
managed_rules: Optional[dict] = None
bot_id_enabled: Optional[bool] = None
environment_variables: list[VercelEnvironmentVariable] = Field(default_factory=list)
@@ -26,10 +26,7 @@ class Security(VercelService):
def _fetch_firewall_config(self, project):
"""Fetch WAF/Firewall config for a single project."""
try:
data = self._get(
"/v1/security/firewall/config",
params={"projectId": project.id},
)
data = self._read_firewall_config(project)
if data is None:
# 403 — plan limitation, store with managed_rulesets=None
@@ -44,39 +41,60 @@ class Security(VercelService):
)
return
# Parse firewall config
fw = data.get("firewallConfig", data) if isinstance(data, dict) else {}
fw = self._normalize_firewall_config(data)
# Determine if firewall is enabled
rules = fw.get("rules", []) or []
managed = fw.get("managedRules", fw.get("managedRulesets"))
if not fw:
fallback_firewall_enabled = self._fallback_firewall_enabled(project)
self.firewall_configs[project.id] = VercelFirewallConfig(
project_id=project.id,
project_name=project.name,
team_id=project.team_id,
firewall_enabled=(
fallback_firewall_enabled
if fallback_firewall_enabled is not None
else False
),
managed_rulesets=self._fallback_managed_rulesets(project),
name=project.name,
id=project.id,
)
return
rules = [
rule for rule in (fw.get("rules", []) or []) if self._is_active(rule)
]
managed = self._active_managed_rulesets(
fw.get("managedRules", fw.get("managedRulesets", fw.get("crs")))
)
custom_rules = []
ip_blocking = []
ip_blocking = list(fw.get("ips", []) or [])
rate_limiting = []
for rule in rules:
rule_action = rule.get("action", {})
action_type = (
rule_action.get("type", "")
if isinstance(rule_action, dict)
else str(rule_action)
)
mitigate_action = self._mitigate_action(rule)
if action_type == "rate_limit" or rule.get("rateLimit"):
if self._is_rate_limiting_rule(rule, mitigate_action):
rate_limiting.append(rule)
elif action_type in ("deny", "block") and self._is_ip_rule(rule):
elif self._is_ip_rule(rule):
ip_blocking.append(rule)
else:
custom_rules.append(rule)
firewall_enabled = bool(rules) or bool(managed)
firewall_enabled = fw.get("firewallEnabled")
if firewall_enabled is None:
firewall_enabled = self._fallback_firewall_enabled(project)
if firewall_enabled is None:
firewall_enabled = bool(rules) or bool(ip_blocking) or bool(managed)
if not managed:
managed = self._fallback_managed_rulesets(project)
self.firewall_configs[project.id] = VercelFirewallConfig(
project_id=project.id,
project_name=project.name,
team_id=project.team_id,
firewall_enabled=firewall_enabled,
managed_rulesets=managed if managed is not None else {},
managed_rulesets=managed,
custom_rules=custom_rules,
ip_blocking_rules=ip_blocking,
rate_limiting_rules=rate_limiting,
@@ -95,6 +113,117 @@ class Security(VercelService):
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def _read_firewall_config(self, project):
"""Read the deployed firewall config via the documented endpoint.
See: https://vercel.com/docs/rest-api/security/read-firewall-configuration
"""
params = self._firewall_params(project)
config_version = getattr(project, "firewall_config_version", None)
endpoints = []
if config_version:
endpoints.append(f"/v1/security/firewall/config/{config_version}")
endpoints.append("/v1/security/firewall/config/active")
last_error = None
for endpoint in endpoints:
try:
return self._get(endpoint, params=params)
except Exception as error:
last_error = error
logger.warning(
f"Security - Firewall config read failed for project "
f"{project.id} (team={getattr(project, 'team_id', None)}) "
f"on {endpoint} with params={params}: "
f"{error.__class__.__name__}: {error}"
)
if last_error is not None:
logger.debug(
f"Security - Falling back to firewall config wrapper for "
f"{project.id} after {last_error.__class__.__name__}: {last_error}"
)
return self._get("/v1/security/firewall/config", params=params)
@staticmethod
def _firewall_params(project) -> dict:
"""Build firewall request params, preserving team scope for team projects."""
params = {"projectId": project.id}
team_id = getattr(project, "team_id", None)
if isinstance(team_id, str) and team_id:
params["teamId"] = team_id
return params
@staticmethod
def _normalize_firewall_config(data: dict) -> dict:
"""Normalize firewall responses across Vercel endpoint variants."""
if not isinstance(data, dict):
return {}
if "firewallConfig" in data and isinstance(data["firewallConfig"], dict):
return data["firewallConfig"]
if any(key in data for key in ("active", "draft", "versions")):
return data.get("active") or {}
return data
@staticmethod
def _active_managed_rulesets(managed_rules: dict | None) -> dict:
"""Return only active managed rulesets."""
if not isinstance(managed_rules, dict):
return {}
return {
ruleset: config
for ruleset, config in managed_rules.items()
if not isinstance(config, dict) or config.get("active", False)
}
@classmethod
def _fallback_managed_rulesets(cls, project) -> dict:
"""Return active managed rulesets from project metadata."""
return cls._active_managed_rulesets(getattr(project, "managed_rules", None))
@staticmethod
def _fallback_firewall_enabled(project) -> bool | None:
"""Return firewall enabled state from project metadata when available."""
return getattr(project, "firewall_enabled", None)
@staticmethod
def _mitigate_action(rule: dict) -> dict:
"""Extract the nested Vercel mitigation action payload for a rule."""
action = rule.get("action", {})
if not isinstance(action, dict):
return {}
mitigate = action.get("mitigate")
return mitigate if isinstance(mitigate, dict) else action
@staticmethod
def _is_active(rule: dict) -> bool:
"""Treat missing active flags as enabled for backwards compatibility."""
return rule.get("active", True) is not False
@classmethod
def _is_rate_limiting_rule(
cls, rule: dict, mitigate_action: dict | None = None
) -> bool:
"""Check if a firewall rule enforces rate limiting."""
if rule.get("rateLimit"):
return True
mitigate = (
mitigate_action
if isinstance(mitigate_action, dict)
else cls._mitigate_action(rule)
)
return bool(mitigate.get("rateLimit")) or mitigate.get("action") == "rate_limit"
@staticmethod
def _is_ip_rule(rule: dict) -> bool:
"""Check if a rule is an IP blocking rule based on conditions."""
+3 -4
View File
@@ -30,7 +30,7 @@ dependencies = [
"azure-mgmt-postgresqlflexibleservers==1.1.0",
"azure-mgmt-recoveryservices==3.1.0",
"azure-mgmt-recoveryservicesbackup==9.2.0",
"azure-mgmt-resource==23.3.0",
"azure-mgmt-resource==24.0.0",
"azure-mgmt-search==9.1.0",
"azure-mgmt-security==7.0.0",
"azure-mgmt-sql==3.0.1",
@@ -57,7 +57,7 @@ dependencies = [
"kubernetes==32.0.1",
"markdown==3.10.2",
"microsoft-kiota-abstractions==1.9.2",
"msgraph-sdk==1.23.0",
"msgraph-sdk==1.55.0",
"numpy==2.0.2",
"openstacksdk==4.2.0",
"pandas==2.2.3",
@@ -95,7 +95,7 @@ maintainers = [{name = "Prowler Engineering", email = "engineering@prowler.com"}
name = "prowler"
readme = "README.md"
requires-python = ">=3.10,<3.13"
version = "5.24.0"
version = "5.25.0"
[project.scripts]
prowler = "prowler.__main__:prowler"
@@ -121,7 +121,6 @@ docker = "7.1.0"
filelock = "3.20.3"
flake8 = "7.1.2"
freezegun = "1.5.1"
marshmallow = "==3.26.2"
mock = "5.2.0"
moto = {extras = ["all"], version = "5.1.11"}
openapi-schema-validator = "0.6.3"
+144
View File
@@ -839,6 +839,132 @@ aws:
assert isinstance(aws_provider, AwsProvider)
@mock_aws
def test_excluded_regions_removed_from_enabled_regions(self):
aws_provider = AwsProvider(excluded_regions={AWS_REGION_EU_WEST_1})
assert AWS_REGION_EU_WEST_1 not in aws_provider._enabled_regions
assert AWS_REGION_EU_WEST_1 not in aws_provider.generate_regional_clients("ec2")
@mock_aws
def test_excluded_regions_pruned_from_input_regions(self):
aws_provider = AwsProvider(
regions={AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1},
excluded_regions={AWS_REGION_EU_WEST_1},
)
assert AWS_REGION_EU_WEST_1 not in aws_provider._identity.audited_regions
assert AWS_REGION_US_EAST_1 in aws_provider._identity.audited_regions
@mock_aws
def test_excluded_regions_from_config_file(self):
with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as tmp:
tmp.write(f"aws:\n disallowed_regions:\n - {AWS_REGION_EU_WEST_1}\n")
config_path = tmp.name
try:
aws_provider = AwsProvider(config_path=config_path)
assert AWS_REGION_EU_WEST_1 not in aws_provider._enabled_regions
assert aws_provider._excluded_regions == {AWS_REGION_EU_WEST_1}
finally:
os.remove(config_path)
@mock_aws
def test_excluded_regions_from_env_on_direct_provider_init(self):
with mock.patch.dict(
os.environ,
{"PROWLER_AWS_DISALLOWED_REGIONS": AWS_REGION_EU_WEST_1},
clear=False,
):
aws_provider = AwsProvider()
assert aws_provider._excluded_regions == {AWS_REGION_EU_WEST_1}
assert AWS_REGION_EU_WEST_1 not in aws_provider._enabled_regions
@mock_aws
def test_excluded_regions_precedence_explicit_over_env_and_config(self):
with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as tmp:
tmp.write(f"aws:\n disallowed_regions:\n - {AWS_REGION_EU_WEST_1}\n")
config_path = tmp.name
try:
with mock.patch.dict(
os.environ,
{"PROWLER_AWS_DISALLOWED_REGIONS": AWS_REGION_US_EAST_1},
clear=False,
):
aws_provider = AwsProvider(
config_path=config_path,
excluded_regions={AWS_REGION_US_EAST_2},
)
assert aws_provider._excluded_regions == {AWS_REGION_US_EAST_2}
assert AWS_REGION_US_EAST_2 not in aws_provider._enabled_regions
assert AWS_REGION_EU_WEST_1 in aws_provider._enabled_regions
assert AWS_REGION_US_EAST_1 in aws_provider._enabled_regions
finally:
os.remove(config_path)
@mock_aws
def test_excluded_regions_from_config_avoid_excluded_profile_region(
self, monkeypatch
):
monkeypatch.setenv("AWS_DEFAULT_REGION", AWS_REGION_EU_WEST_1)
with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as tmp:
tmp.write(f"aws:\n disallowed_regions:\n - {AWS_REGION_EU_WEST_1}\n")
config_path = tmp.name
try:
aws_provider = AwsProvider(config_path=config_path)
assert aws_provider.identity.profile_region == AWS_REGION_US_EAST_1
finally:
os.remove(config_path)
@mock_aws
def test_aws_provider_raises_when_all_input_regions_are_excluded(self):
with raises(AWSArgumentTypeValidationError):
AwsProvider(
regions={AWS_REGION_EU_WEST_1},
excluded_regions={AWS_REGION_EU_WEST_1},
)
def test_get_excluded_regions_from_env_parses_comma_list(self):
with mock.patch.dict(
os.environ,
{"PROWLER_AWS_DISALLOWED_REGIONS": " me-south-1 , ap-east-1 ,, "},
):
assert Provider.get_excluded_regions_from_env() == {
"me-south-1",
"ap-east-1",
}
def test_get_excluded_regions_from_env_ignores_legacy_generic_name(self):
with mock.patch.dict(
os.environ,
{"PROWLER_DISALLOWED_REGIONS": "me-south-1"},
clear=True,
):
assert Provider.get_excluded_regions_from_env() == set()
def test_get_excluded_regions_from_env_unset(self):
with mock.patch.dict(os.environ, {}, clear=True):
assert Provider.get_excluded_regions_from_env() == set()
@mock_aws
def test_print_credentials_shows_all_except_excluded_regions(self):
aws_provider = AwsProvider(
excluded_regions={AWS_REGION_EU_WEST_1, AWS_REGION_US_EAST_1}
)
with patch(
"prowler.providers.aws.aws_provider.print_boxes"
) as mock_print_boxes:
aws_provider.print_credentials()
report_lines = mock_print_boxes.call_args.args[0]
assert any(
"AWS Regions:" in line and "all except eu-west-1, us-east-1" in line
for line in report_lines
)
@mock_aws
def test_generate_regional_clients_all_enabled_regions(self):
aws_provider = AwsProvider()
@@ -2033,6 +2159,24 @@ aws:
== AWS_REGION_EU_WEST_1
)
def test_get_aws_region_for_sts_avoids_excluded_session_region(self):
input_regions = None
session_region = AWS_REGION_EU_WEST_1
assert (
get_aws_region_for_sts(
session_region, input_regions, {AWS_REGION_EU_WEST_1}
)
== AWS_REGION_US_EAST_1
)
def test_get_profile_region_avoids_excluded_session_region(self):
mocked_session = mock.Mock(region_name=AWS_REGION_EU_WEST_1)
assert (
AwsProvider.get_profile_region(mocked_session, {AWS_REGION_EU_WEST_1})
== AWS_REGION_US_EAST_1
)
@mock_aws
def test_set_session_config_default(self):
aws_provider = AwsProvider()
@@ -73,8 +73,8 @@ class TestCalendarExternalInvitationsWarning:
assert findings[0].status == "FAIL"
assert "disabled" in findings[0].status_extended
def test_fail_no_policy_set(self):
"""Test FAIL when no explicit policy is set (None) but fetch succeeded"""
def test_pass_using_default(self):
"""Test PASS when no explicit policy is set (None) — Google default is secure (enabled)"""
mock_provider = set_mocked_googleworkspace_provider()
with (
@@ -100,8 +100,8 @@ class TestCalendarExternalInvitationsWarning:
findings = check.execute()
assert len(findings) == 1
assert findings[0].status == "FAIL"
assert "not explicitly configured" in findings[0].status_extended
assert findings[0].status == "PASS"
assert "secure default" in findings[0].status_extended
def test_no_findings_when_fetch_failed(self):
"""Test no findings returned when the API fetch failed"""
@@ -104,8 +104,8 @@ class TestCalendarExternalSharingPrimaryCalendar:
assert findings[0].status == "FAIL"
assert "EXTERNAL_ALL_INFO_READ_WRITE" in findings[0].status_extended
def test_fail_no_policy_set(self):
"""Test FAIL when no explicit policy is set (None) but fetch succeeded"""
def test_pass_using_default(self):
"""Test PASS when no explicit policy is set (None) — Google default is secure (free/busy only)"""
mock_provider = set_mocked_googleworkspace_provider()
with (
@@ -131,8 +131,8 @@ class TestCalendarExternalSharingPrimaryCalendar:
findings = check.execute()
assert len(findings) == 1
assert findings[0].status == "FAIL"
assert "not explicitly configured" in findings[0].status_extended
assert findings[0].status == "PASS"
assert "secure default" in findings[0].status_extended
def test_no_findings_when_fetch_failed(self):
"""Test no findings returned when the API fetch failed"""
@@ -67,8 +67,8 @@ class TestDriveExternalSharingWarnUsers:
assert findings[0].status == "FAIL"
assert "disabled" in findings[0].status_extended
def test_fail_no_policy_set(self):
"""Test FAIL when no explicit policy is set (None) but fetch succeeded"""
def test_pass_using_default(self):
"""Test PASS when no explicit policy is set (None) — Google default is secure"""
mock_provider = set_mocked_googleworkspace_provider()
with (
@@ -92,8 +92,8 @@ class TestDriveExternalSharingWarnUsers:
findings = check.execute()
assert len(findings) == 1
assert findings[0].status == "FAIL"
assert "not explicitly configured" in findings[0].status_extended
assert findings[0].status == "PASS"
assert "secure default" in findings[0].status_extended
def test_no_findings_when_fetch_failed(self):
"""Test no findings returned when the API fetch failed"""
@@ -69,8 +69,8 @@ class TestDriveSharedDriveCreationAllowed:
assert findings[0].status == "FAIL"
assert "prevented" in findings[0].status_extended
def test_fail_no_policy_set(self):
"""Test FAIL when no explicit policy is set (None) but fetch succeeded"""
def test_pass_using_default(self):
"""Test PASS when no explicit policy is set (None) — Google default is secure"""
mock_provider = set_mocked_googleworkspace_provider()
with (
@@ -94,8 +94,8 @@ class TestDriveSharedDriveCreationAllowed:
findings = check.execute()
assert len(findings) == 1
assert findings[0].status == "FAIL"
assert "not explicitly configured" in findings[0].status_extended
assert findings[0].status == "PASS"
assert "secure default" in findings[0].status_extended
def test_no_findings_when_fetch_failed(self):
"""Test no findings returned when the API fetch failed"""
@@ -101,8 +101,8 @@ class TestDriveSharedDriveDisableDownloadPrintCopy:
assert findings[0].status == "FAIL"
assert "ALL" in findings[0].status_extended
def test_fail_no_policy_set(self):
"""Test FAIL when no explicit policy is set (None) but fetch succeeded"""
def test_pass_using_default(self):
"""Test PASS when no explicit policy is set (None) — Google default is secure"""
mock_provider = set_mocked_googleworkspace_provider()
with (
@@ -128,8 +128,8 @@ class TestDriveSharedDriveDisableDownloadPrintCopy:
findings = check.execute()
assert len(findings) == 1
assert findings[0].status == "FAIL"
assert "not explicitly configured" in findings[0].status_extended
assert findings[0].status == "PASS"
assert "secure default" in findings[0].status_extended
def test_no_findings_when_fetch_failed(self):
"""Test no findings returned when the API fetch failed"""
@@ -71,8 +71,8 @@ class TestDriveWarnSharingWithAllowlistedDomains:
assert findings[0].status == "FAIL"
assert "disabled" in findings[0].status_extended
def test_fail_no_policy_set(self):
"""Test FAIL when no explicit policy is set (None) but fetch succeeded"""
def test_pass_using_default(self):
"""Test PASS when no explicit policy is set (None) — Google default is secure"""
mock_provider = set_mocked_googleworkspace_provider()
with (
@@ -98,8 +98,8 @@ class TestDriveWarnSharingWithAllowlistedDomains:
findings = check.execute()
assert len(findings) == 1
assert findings[0].status == "FAIL"
assert "not explicitly configured" in findings[0].status_extended
assert findings[0].status == "PASS"
assert "secure default" in findings[0].status_extended
def test_no_findings_when_fetch_failed(self):
"""Test no findings returned when the API fetch failed"""
@@ -1185,3 +1185,58 @@ class TestInitGlobalProviderRegistryEnumeration:
# The "other/lib" repo should be filtered out by --image-filter
assert not any("other/lib" in img for img in provider.images)
assert len(provider.images) == 3
class TestRegistryListMode:
"""Regression test: `prowler image --registry <url> --registry-list` crashes.
When --registry-list is passed, ImageProvider._enumerate_registry sets
_listing_only = True and __init__ returns early before calling
Provider.set_global_provider(self). The caller in __main__.py then calls
global_provider.print_credentials() on a None reference, raising
AttributeError: 'NoneType' object has no attribute 'print_credentials'.
"""
@patch("prowler.providers.image.image_provider.create_registry_adapter")
@patch("prowler.providers.common.provider.load_and_validate_config_file")
def test_registry_list_does_not_crash(self, mock_load_config, mock_adapter_factory):
"""Reproduce the --registry-list crash by running the same sequence
as __main__.py: init_global_provider, get_global_provider,
then print_credentials."""
mock_load_config.return_value = {}
adapter = MagicMock()
adapter.list_repositories.return_value = ["myorg/app"]
adapter.list_tags.return_value = ["v1.0", "latest"]
mock_adapter_factory.return_value = adapter
arguments = Namespace(
provider="image",
config_file=None,
fixer_config=None,
images=None,
image_list_file=None,
scanners=["vuln"],
image_config_scanners=None,
trivy_severity=None,
ignore_unfixed=False,
timeout="5m",
registry="myregistry.io",
image_filter=None,
tag_filter=None,
max_images=0,
registry_insecure=False,
registry_list_images=True,
)
# Reproduce the exact crash sequence from __main__.py lines 289-294:
# Provider.init_global_provider(args)
# global_provider = Provider.get_global_provider()
# global_provider.print_credentials()
with mock.patch.object(Provider, "_global", None):
Provider.init_global_provider(arguments)
global_provider = Provider.get_global_provider()
# This is the line that crashes: global_provider is None so
# .print_credentials() raises AttributeError.
global_provider.print_credentials()
@@ -0,0 +1,45 @@
from unittest import mock
from prowler.providers.vercel.services.project.project_service import Project
from tests.providers.vercel.vercel_fixtures import (
PROJECT_ID,
PROJECT_NAME,
TEAM_ID,
set_mocked_vercel_provider,
)
class TestProjectService:
def test_list_projects_parses_security_metadata(self):
service = Project.__new__(Project)
service.provider = set_mocked_vercel_provider()
service.projects = {}
service._paginate = mock.MagicMock(
return_value=[
{
"id": PROJECT_ID,
"name": PROJECT_NAME,
"accountId": TEAM_ID,
"security": {
"firewallEnabled": True,
"firewallConfigVersion": 42,
"managedRules": {
"owasp": {"active": True, "action": "log"},
"ai_bots": {"active": False, "action": "deny"},
},
"botIdEnabled": True,
},
}
]
)
service._list_projects()
project = service.projects[PROJECT_ID]
assert project.firewall_enabled is True
assert project.firewall_config_version == "42"
assert project.managed_rules == {
"owasp": {"active": True, "action": "log"},
"ai_bots": {"active": False, "action": "deny"},
}
assert project.bot_id_enabled is True
@@ -0,0 +1,199 @@
from unittest import mock
from prowler.providers.vercel.services.project.project_service import VercelProject
from prowler.providers.vercel.services.security.security_service import Security
from tests.providers.vercel.vercel_fixtures import PROJECT_ID, PROJECT_NAME, TEAM_ID
class TestSecurityService:
def test_fetch_firewall_config_reads_active_version_and_normalizes_response(self):
project = VercelProject(id=PROJECT_ID, name=PROJECT_NAME, team_id=TEAM_ID)
service = Security.__new__(Security)
service.firewall_configs = {}
service._get = mock.MagicMock(
return_value={
"active": {
"firewallEnabled": True,
"managedRules": {
"owasp": {"active": True, "action": "deny"},
"ai_bots": {"active": False, "action": "deny"},
},
"rules": [
{
"id": "rule-custom",
"name": "Block admin access",
"active": True,
"conditionGroup": [
{
"conditions": [
{
"type": "path",
"op": "pre",
"value": "/admin",
}
]
}
],
"action": {
"mitigate": {
"action": "deny",
}
},
},
{
"id": "rule-rate-limit",
"name": "Rate limit login",
"active": True,
"conditionGroup": [
{
"conditions": [
{
"type": "path",
"op": "eq",
"value": "/login",
}
]
}
],
"action": {
"mitigate": {
"action": "deny",
"rateLimit": {
"algo": "fixed_window",
"window": 60,
"limit": 10,
},
}
},
},
],
"ips": [
{
"id": "ip-rule",
"ip": "203.0.113.7",
"action": "deny",
}
],
},
"draft": None,
"versions": [1],
}
)
service._fetch_firewall_config(project)
service._get.assert_called_once_with(
"/v1/security/firewall/config/active",
params={"projectId": PROJECT_ID, "teamId": TEAM_ID},
)
config = service.firewall_configs[PROJECT_ID]
assert config.firewall_enabled is True
assert config.managed_rulesets == {"owasp": {"active": True, "action": "deny"}}
assert [rule["id"] for rule in config.custom_rules] == ["rule-custom"]
assert [rule["id"] for rule in config.rate_limiting_rules] == [
"rule-rate-limit"
]
assert [rule["id"] for rule in config.ip_blocking_rules] == ["ip-rule"]
def test_fetch_firewall_config_parses_crs_managed_rulesets(self):
project = VercelProject(
id=PROJECT_ID,
name=PROJECT_NAME,
team_id=TEAM_ID,
firewall_config_version="1",
)
service = Security.__new__(Security)
service.firewall_configs = {}
service._get = mock.MagicMock(
return_value={
"id": "waf_test",
"version": 1,
"firewallEnabled": True,
"crs": {
"gen": {"active": True, "action": "log"},
"xss": {"active": True, "action": "deny"},
"php": {"active": False, "action": "log"},
},
"rules": [],
"ips": [],
}
)
service._fetch_firewall_config(project)
config = service.firewall_configs[PROJECT_ID]
assert config.firewall_enabled is True
assert config.managed_rulesets == {
"gen": {"active": True, "action": "log"},
"xss": {"active": True, "action": "deny"},
}
def test_fetch_firewall_config_falls_back_to_wrapper_when_active_missing(self):
project = VercelProject(id=PROJECT_ID, name=PROJECT_NAME, team_id=TEAM_ID)
service = Security.__new__(Security)
service.firewall_configs = {}
service._get = mock.MagicMock(
side_effect=[
Exception("404 active config not found"),
{"active": None, "draft": None, "versions": []},
]
)
service._fetch_firewall_config(project)
assert service._get.call_args_list == [
mock.call(
"/v1/security/firewall/config/active",
params={"projectId": PROJECT_ID, "teamId": TEAM_ID},
),
mock.call(
"/v1/security/firewall/config",
params={"projectId": PROJECT_ID, "teamId": TEAM_ID},
),
]
config = service.firewall_configs[PROJECT_ID]
assert config.firewall_enabled is False
assert config.managed_rulesets == {}
assert config.custom_rules == []
assert config.rate_limiting_rules == []
assert config.ip_blocking_rules == []
def test_fetch_firewall_config_uses_project_security_metadata_when_config_empty(
self,
):
project = VercelProject(
id=PROJECT_ID,
name=PROJECT_NAME,
team_id=TEAM_ID,
firewall_enabled=True,
firewall_config_version="42",
managed_rules={
"owasp": {"active": True, "action": "log"},
"ai_bots": {"active": False, "action": "deny"},
},
)
service = Security.__new__(Security)
service.firewall_configs = {}
service._get = mock.MagicMock(
return_value={"active": None, "draft": None, "versions": []}
)
service._fetch_firewall_config(project)
service._get.assert_called_once_with(
"/v1/security/firewall/config/42",
params={"projectId": PROJECT_ID, "teamId": TEAM_ID},
)
config = service.firewall_configs[PROJECT_ID]
assert config.firewall_enabled is True
assert config.managed_rulesets == {"owasp": {"active": True, "action": "log"}}
assert config.custom_rules == []
assert config.rate_limiting_rules == []
assert config.ip_blocking_rules == []
+19 -3
View File
@@ -2,17 +2,34 @@
All notable changes to the **Prowler UI** are documented in this file.
## [1.24.0] (Prowler UNRELEASED)
## [1.24.1] (Prowler v5.24.1)
### 🔒 Security
- Upgrade React to 19.2.5 and Next.js to 16.2.3 to mitigate CVE-2026-23869 (React2DoS), a high-severity unauthenticated remote DoS vulnerability in the React Flight Protocol's Server Function deserialization [(#10754)](https://github.com/prowler-cloud/prowler/pull/10754)
- Disable Next.js 16 Server Function argument logging to prevent sign-in credentials (email/password) from being printed to the terminal during development[(#10760)](https://github.com/prowler-cloud/prowler/pull/10760)
---
## [1.24.0] (Prowler v5.24.0)
### 🚀 Added
- Resources side drawer with redesigned detail panel [(#10673)](https://github.com/prowler-cloud/prowler/pull/10673)
- Syntax highlighting for remediation code blocks in finding groups drawer with provider-aware auto-detection (Shell, HCL, YAML, Bicep) [(#10698)](https://github.com/prowler-cloud/prowler/pull/10698)
### 🔄 Changed
- Attack Paths scan selection: contextual button labels based on graph availability, tooltips on disabled actions, green dot indicator for selectable scans, and a warning banner when viewing data from a previous scan cycle [(#10685)](https://github.com/prowler-cloud/prowler/pull/10685)
- Remove legacy finding detail sheet, row-details wrapper, and resource detail panel; unify findings and resources around new side drawers [(#10692)](https://github.com/prowler-cloud/prowler/pull/10692)
- Attack Paths "View Finding" now opens the finding drawer inline over the graph instead of navigating to `/findings` in a new tab, preserving graph zoom, selection, and filter state
- Attack Paths scan table: replace action buttons with radio buttons, add dedicated Graph column, use info-colored In Progress badge, remove redundant Progress column, and fix info banner variant [(#10704)](https://github.com/prowler-cloud/prowler/pull/10704)
### 🐞 Fixed
- Findings group resource filters now strip unsupported scan parameters, display scan name instead of provider alias in filter badges, migrate mute modal from HeroUI to shadcn, and add searchable accounts/provider type selectors [(#10662)](https://github.com/prowler-cloud/prowler/pull/10662)
- Compliance detail page header now reflects the actual provider, alias and UID of the selected scan instead of always defaulting to AWS [(#10674)](https://github.com/prowler-cloud/prowler/pull/10674)
- Attack Path scan selector now labels buttons based on `graph_data_ready` instead of scan state, shows tooltip on disabled buttons, and displays green dot on all scan states when graph data is available [(#10694)](https://github.com/prowler-cloud/prowler/pull/10694)
- Provider wizard modal moved to a stable page-level host so the providers table refreshes after link, authenticate, and connection check without closing the modal [(#10675)](https://github.com/prowler-cloud/prowler/pull/10675)
---
@@ -41,7 +58,6 @@ All notable changes to the **Prowler UI** are documented in this file.
### 🐞 Fixed
- Preserve query parameters in callbackUrl during invitation flow [(#10571)](https://github.com/prowler-cloud/prowler/pull/10571)
- Deleting the active organization now switches to the target org before deleting, preventing JWT rejection from the backend [(#10491)](https://github.com/prowler-cloud/prowler/pull/10491)
- Clear Filters now resets all filters including muted findings and auto-applies, Clear all in pills only removes pill-visible sub-filters, and the discard icon is now an Undo text button [(#10446)](https://github.com/prowler-cloud/prowler/pull/10446)
- Send to Jira modal now dynamically fetches and displays available issue types per project instead of hardcoding `"Task"`, fixing failures on non-English Jira instances [(#10534)](https://github.com/prowler-cloud/prowler/pull/10534)
- Exclude service filter from finding group resources endpoint to prevent empty results when a service filter is active [(#10652)](https://github.com/prowler-cloud/prowler/pull/10652)
@@ -70,7 +70,7 @@ describe("getFindingGroups — default sort for muted and non-muted rows", () =>
const calledUrl = fetchMock.mock.calls[0][0] as string;
const url = new URL(calledUrl);
expect(url.searchParams.get("sort")).toBe(
"-status,-new_fail_count,-changed_fail_count,-severity,-fail_count,-last_seen_at",
"-status,-severity,-new_fail_count,-changed_fail_count,-fail_count,-last_seen_at",
);
});
@@ -84,7 +84,7 @@ describe("getFindingGroups — default sort for muted and non-muted rows", () =>
const calledUrl = fetchMock.mock.calls[0][0] as string;
const url = new URL(calledUrl);
expect(url.searchParams.get("sort")).toBe(
"-status,-new_fail_count,-changed_fail_count,-severity,-new_fail_muted_count,-changed_fail_muted_count,-fail_count,-fail_muted_count,-last_seen_at",
"-status,-severity,-new_fail_count,-changed_fail_count,-new_fail_muted_count,-changed_fail_muted_count,-fail_count,-fail_muted_count,-last_seen_at",
);
});
});
@@ -106,7 +106,7 @@ describe("getLatestFindingGroups — default sort for muted and non-muted rows",
const calledUrl = fetchMock.mock.calls[0][0] as string;
const url = new URL(calledUrl);
expect(url.searchParams.get("sort")).toBe(
"-status,-new_fail_count,-changed_fail_count,-severity,-fail_count,-last_seen_at",
"-status,-severity,-new_fail_count,-changed_fail_count,-fail_count,-last_seen_at",
);
});
@@ -120,7 +120,7 @@ describe("getLatestFindingGroups — default sort for muted and non-muted rows",
const calledUrl = fetchMock.mock.calls[0][0] as string;
const url = new URL(calledUrl);
expect(url.searchParams.get("sort")).toBe(
"-status,-new_fail_count,-changed_fail_count,-severity,-new_fail_muted_count,-changed_fail_muted_count,-fail_count,-fail_muted_count,-last_seen_at",
"-status,-severity,-new_fail_count,-changed_fail_count,-new_fail_muted_count,-changed_fail_muted_count,-fail_count,-fail_muted_count,-last_seen_at",
);
});
});
@@ -262,7 +262,7 @@ describe("getFindingGroupResources — Blocker 1: FAIL-first sort", () => {
const calledUrl = fetchMock.mock.calls[0][0] as string;
const url = new URL(calledUrl);
expect(url.searchParams.get("sort")).toBe(
"-status,-delta,-severity,-last_seen_at",
"-status,-severity,-delta,-last_seen_at",
);
});
@@ -300,7 +300,7 @@ describe("getLatestFindingGroupResources — Blocker 1: FAIL-first sort", () =>
const calledUrl = fetchMock.mock.calls[0][0] as string;
const url = new URL(calledUrl);
expect(url.searchParams.get("sort")).toBe(
"-status,-delta,-severity,-last_seen_at",
"-status,-severity,-delta,-last_seen_at",
);
});
@@ -344,7 +344,7 @@ describe("getFindingGroupResources — triangulation: params coexist", () => {
expect(url.searchParams.get("page[number]")).toBe("2");
expect(url.searchParams.get("page[size]")).toBe("50");
expect(url.searchParams.get("sort")).toBe(
"-status,-delta,-severity,-last_seen_at",
"-status,-severity,-delta,-last_seen_at",
);
expect(url.searchParams.get("filter[status]")).toBeNull();
});
@@ -372,7 +372,7 @@ describe("getLatestFindingGroupResources — triangulation: params coexist", ()
expect(url.searchParams.get("page[number]")).toBe("3");
expect(url.searchParams.get("page[size]")).toBe("20");
expect(url.searchParams.get("sort")).toBe(
"-status,-delta,-severity,-last_seen_at",
"-status,-severity,-delta,-last_seen_at",
);
expect(url.searchParams.get("filter[status]")).toBeNull();
});
@@ -443,7 +443,7 @@ describe("getFindingGroupResources — caller filters are preserved", () => {
const calledUrl = fetchMock.mock.calls[0][0] as string;
const url = new URL(calledUrl);
expect(url.searchParams.get("sort")).toBe(
"-status,-delta,-severity,-last_seen_at",
"-status,-severity,-delta,-last_seen_at",
);
expect(url.searchParams.get("filter[name__icontains]")).toBe("bucket-prod");
expect(url.searchParams.get("filter[severity__in]")).toBe("high");
@@ -533,7 +533,7 @@ describe("getLatestFindingGroupResources — caller filters are preserved", () =
const calledUrl = fetchMock.mock.calls[0][0] as string;
const url = new URL(calledUrl);
expect(url.searchParams.get("sort")).toBe(
"-status,-delta,-severity,-last_seen_at",
"-status,-severity,-delta,-last_seen_at",
);
expect(url.searchParams.get("filter[name__icontains]")).toBe(
"instance-prod",
+3 -3
View File
@@ -83,13 +83,13 @@ function normalizeFindingGroupResourceFilters(
}
const DEFAULT_FINDING_GROUPS_SORT =
"-status,-new_fail_count,-changed_fail_count,-severity,-fail_count,-last_seen_at";
"-status,-severity,-new_fail_count,-changed_fail_count,-fail_count,-last_seen_at";
const DEFAULT_FINDING_GROUPS_SORT_WITH_MUTED =
"-status,-new_fail_count,-changed_fail_count,-severity,-new_fail_muted_count,-changed_fail_muted_count,-fail_count,-fail_muted_count,-last_seen_at";
"-status,-severity,-new_fail_count,-changed_fail_count,-new_fail_muted_count,-changed_fail_muted_count,-fail_count,-fail_muted_count,-last_seen_at";
const DEFAULT_FINDING_GROUP_RESOURCES_SORT =
"-status,-delta,-severity,-last_seen_at";
"-status,-severity,-delta,-last_seen_at";
interface FetchFindingGroupsParams {
page?: number;
@@ -262,7 +262,6 @@ export const getLatestFindingsByResourceUid = async ({
);
url.searchParams.append("filter[resource_uid]", resourceUid);
url.searchParams.append("filter[status]", "FAIL");
url.searchParams.append("filter[muted]", "include");
url.searchParams.append("sort", "-severity,-updated_at");
if (page) url.searchParams.append("page[number]", page.toString());
+135
View File
@@ -0,0 +1,135 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
const {
fetchMock,
getAuthHeadersMock,
getFormValueMock,
handleApiErrorMock,
handleApiResponseMock,
} = vi.hoisted(() => ({
fetchMock: vi.fn(),
getAuthHeadersMock: vi.fn(),
getFormValueMock: vi.fn(),
handleApiErrorMock: vi.fn(),
handleApiResponseMock: vi.fn(),
}));
vi.mock("next/cache", () => ({
revalidatePath: vi.fn(),
}));
vi.mock("next/navigation", () => ({
redirect: vi.fn(),
}));
vi.mock("@/lib", () => ({
apiBaseUrl: "https://api.example.com/api/v1",
getAuthHeaders: getAuthHeadersMock,
getFormValue: getFormValueMock,
wait: vi.fn(),
}));
vi.mock("@/lib/provider-credentials/build-crendentials", () => ({
buildSecretConfig: vi.fn(() => ({
secretType: "access-secret-key",
secret: { key: "value" },
})),
}));
vi.mock("@/lib/provider-filters", () => ({
appendSanitizedProviderInFilters: vi.fn(),
}));
vi.mock("@/lib/server-actions-helper", () => ({
handleApiError: handleApiErrorMock,
handleApiResponse: handleApiResponseMock,
}));
import {
addCredentialsProvider,
addProvider,
checkConnectionProvider,
updateCredentialsProvider,
} from "./providers";
describe("providers actions", () => {
beforeEach(() => {
vi.clearAllMocks();
vi.stubGlobal("fetch", fetchMock);
getAuthHeadersMock.mockResolvedValue({ Authorization: "Bearer token" });
getFormValueMock.mockImplementation((formData: FormData, field: string) =>
formData.get(field),
);
handleApiErrorMock.mockReturnValue({ error: "Unexpected error" });
handleApiResponseMock.mockResolvedValue({ data: { id: "secret-1" } });
fetchMock.mockResolvedValue(
new Response(JSON.stringify({ data: { id: "secret-1" } }), {
status: 200,
headers: { "Content-Type": "application/json" },
}),
);
});
it("should revalidate providers after linking a cloud provider", async () => {
// Given
const formData = new FormData();
formData.set("providerType", "aws");
formData.set("providerUid", "111111111111");
// When
await addProvider(formData);
// Then
expect(handleApiResponseMock).toHaveBeenCalledWith(
expect.any(Response),
"/providers",
);
});
it("should revalidate providers after adding credentials in the wizard", async () => {
// Given
const formData = new FormData();
formData.set("providerId", "provider-1");
formData.set("providerType", "aws");
// When
await addCredentialsProvider(formData);
// Then
expect(handleApiResponseMock).toHaveBeenCalledWith(
expect.any(Response),
"/providers",
);
});
it("should revalidate providers after updating credentials in the wizard", async () => {
// Given
const formData = new FormData();
formData.set("providerId", "provider-1");
formData.set("providerType", "oraclecloud");
// When
await updateCredentialsProvider("secret-1", formData);
// Then
expect(handleApiResponseMock).toHaveBeenCalledWith(
expect.any(Response),
"/providers",
);
});
it("should revalidate providers when checking connection from the wizard", async () => {
// Given
const formData = new FormData();
formData.set("providerId", "provider-1");
// When
await checkConnectionProvider(formData);
// Then
expect(handleApiResponseMock).toHaveBeenCalledWith(
expect.any(Response),
"/providers",
);
});
});
+1
View File
@@ -3,6 +3,7 @@ export {
getLatestResources,
getMetadataInfo,
getResourceById,
getResourceDrawerData,
getResourceEvents,
getResources,
} from "./resources";
+57
View File
@@ -2,9 +2,12 @@
import { redirect } from "next/navigation";
import { getLatestFindings } from "@/actions/findings";
import { listOrganizationsSafe } from "@/actions/organizations/organizations";
import { apiBaseUrl, getAuthHeaders } from "@/lib";
import { appendSanitizedProviderTypeFilters } from "@/lib/provider-filters";
import { handleApiResponse } from "@/lib/server-actions-helper";
import { OrganizationResource } from "@/types/organizations";
export const getResources = async ({
page = 1,
@@ -255,3 +258,57 @@ export const getResourceById = async (
return undefined;
}
};
export const getResourceDrawerData = async ({
resourceId,
resourceUid,
providerId,
providerType,
page = 1,
pageSize = 10,
query = "",
}: {
resourceId: string;
resourceUid: string;
providerId: string;
providerType: string;
page?: number;
pageSize?: number;
query?: string;
}) => {
const isCloudEnv = process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true";
const [resourceData, findingsResponse, organizationsResponse] =
await Promise.all([
getResourceById(resourceId, { fields: ["tags"] }),
getLatestFindings({
page,
pageSize,
query,
sort: "severity,-inserted_at",
filters: {
"filter[resource_uid]": resourceUid,
"filter[status]": "FAIL",
},
}),
isCloudEnv && providerType === "aws"
? listOrganizationsSafe()
: Promise.resolve({ data: [] }),
]);
const providerOrg =
providerType === "aws"
? (organizationsResponse.data.find((organization: OrganizationResource) =>
organization.relationships?.providers?.data?.some(
(provider: { id: string }) => provider.id === providerId,
),
) ?? null)
: null;
return {
findings: findingsResponse?.data ?? [],
findingsMeta: findingsResponse?.meta ?? null,
providerOrg,
resourceTags: resourceData?.data?.attributes.tags ?? {},
};
};
@@ -0,0 +1,16 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("findings view overview SSR", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "findings-view.ssr.tsx");
const source = readFileSync(filePath, "utf8");
it("uses the non-legacy latest findings columns", () => {
expect(source).toContain("ColumnLatestFindings");
expect(source).not.toContain("ColumnNewFindingsToDate");
});
});
@@ -3,7 +3,7 @@
import { getLatestFindings } from "@/actions/findings/findings";
import { LighthouseBanner } from "@/components/lighthouse/banner";
import { LinkToFindings } from "@/components/overview";
import { ColumnNewFindingsToDate } from "@/components/overview/new-findings-table/table/column-new-findings-to-date";
import { ColumnLatestFindings } from "@/components/overview/new-findings-table/table";
import { DataTable } from "@/components/ui/table";
import { createDict } from "@/lib/helper";
import { FindingProps, SearchParamsProps } from "@/types";
@@ -73,7 +73,7 @@ export async function FindingsViewSSR({ searchParams }: FindingsViewSSRProps) {
<DataTable
key={`dashboard-findings-${Date.now()}`}
columns={ColumnNewFindingsToDate}
columns={ColumnLatestFindings}
data={(expandedResponse?.data || []) as FindingProps[]}
/>
</div>
@@ -0,0 +1,93 @@
import { render, screen } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import type { ReactNode } from "react";
import { describe, expect, it, vi } from "vitest";
import type { GraphNode } from "@/types/attack-paths";
import { NodeDetailPanel } from "./node-detail-panel";
vi.mock("@/components/ui/sheet/sheet", () => ({
Sheet: ({ children }: { children: ReactNode }) => <div>{children}</div>,
SheetContent: ({ children }: { children: ReactNode }) => (
<div>{children}</div>
),
SheetDescription: ({ children }: { children: ReactNode }) => (
<div>{children}</div>
),
SheetHeader: ({ children }: { children: ReactNode }) => <div>{children}</div>,
SheetTitle: ({ children }: { children: ReactNode }) => <div>{children}</div>,
}));
vi.mock("./node-overview", () => ({
NodeOverview: () => <div>Node overview</div>,
}));
vi.mock("./node-findings", () => ({
NodeFindings: () => <div>Node findings</div>,
}));
vi.mock("./node-resources", () => ({
NodeResources: () => <div>Node resources</div>,
}));
const findingNode: GraphNode = {
id: "graph-node-id",
labels: ["ProwlerFinding"],
properties: {
id: "finding-123",
check_title: "Open S3 bucket",
name: "Open S3 bucket",
},
};
const resourceNode: GraphNode = {
id: "resource-node-id",
labels: ["S3Bucket"],
properties: {
id: "bucket-123",
name: "bucket-123",
},
};
describe("NodeDetailPanel", () => {
it("renders the view finding button only for finding nodes", () => {
const { rerender } = render(<NodeDetailPanel node={findingNode} />);
expect(
screen.getByRole("button", { name: /view finding finding-123/i }),
).toBeInTheDocument();
rerender(<NodeDetailPanel node={resourceNode} />);
expect(
screen.queryByRole("button", { name: /view finding/i }),
).not.toBeInTheDocument();
});
it("calls onViewFinding with the node finding id", async () => {
const user = userEvent.setup();
const onViewFinding = vi.fn();
render(
<NodeDetailPanel node={findingNode} onViewFinding={onViewFinding} />,
);
await user.click(
screen.getByRole("button", { name: /view finding finding-123/i }),
);
expect(onViewFinding).toHaveBeenCalledWith("finding-123");
});
it("disables the button and shows the spinner while loading", () => {
render(<NodeDetailPanel node={findingNode} viewFindingLoading />);
const button = screen.getByRole("button", {
name: /view finding finding-123/i,
});
expect(button).toBeDisabled();
expect(screen.getByLabelText("Loading")).toHaveClass("size-4");
});
});
@@ -1,6 +1,7 @@
"use client";
import { Button, Card, CardContent } from "@/components/shadcn";
import { Spinner } from "@/components/shadcn/spinner/spinner";
import {
Sheet,
SheetContent,
@@ -18,6 +19,8 @@ interface NodeDetailPanelProps {
node: GraphNode | null;
allNodes?: GraphNode[];
onClose?: () => void;
onViewFinding?: (findingId: string) => void;
viewFindingLoading?: boolean;
}
/**
@@ -26,9 +29,13 @@ interface NodeDetailPanelProps {
export const NodeDetailContent = ({
node,
allNodes = [],
onViewFinding,
viewFindingLoading = false,
}: {
node: GraphNode;
allNodes?: GraphNode[];
onViewFinding?: (findingId: string) => void;
viewFindingLoading?: boolean;
}) => {
const isProwlerFinding = node?.labels.some((label) =>
label.toLowerCase().includes("finding"),
@@ -56,7 +63,12 @@ export const NodeDetailContent = ({
<div className="text-text-neutral-secondary dark:text-text-neutral-secondary text-xs">
Findings connected to this node
</div>
<NodeFindings node={node} allNodes={allNodes} />
<NodeFindings
node={node}
allNodes={allNodes}
onViewFinding={onViewFinding}
viewFindingLoading={viewFindingLoading}
/>
</CardContent>
</Card>
)}
@@ -88,12 +100,15 @@ export const NodeDetailPanel = ({
node,
allNodes = [],
onClose,
onViewFinding,
viewFindingLoading = false,
}: NodeDetailPanelProps) => {
const isOpen = node !== null;
const isProwlerFinding = node?.labels.some((label) =>
label.toLowerCase().includes("finding"),
);
const findingId = node ? String(node.properties?.id || node.id) : "";
return (
<Sheet open={isOpen} onOpenChange={(open) => !open && onClose?.()}>
@@ -107,15 +122,19 @@ export const NodeDetailPanel = ({
</SheetDescription>
</div>
{node && isProwlerFinding && (
<Button asChild variant="default" size="sm" className="mt-1">
<a
href={`/findings?id=${String(node.properties?.id || node.id)}`}
target="_blank"
rel="noopener noreferrer"
aria-label={`View finding ${String(node.properties?.id || node.id)}`}
>
View Finding
</a>
<Button
variant="default"
size="sm"
className="mt-1"
onClick={() => onViewFinding?.(findingId)}
disabled={viewFindingLoading}
aria-label={`View finding ${findingId}`}
>
{viewFindingLoading ? (
<Spinner className="size-4" />
) : (
"View Finding →"
)}
</Button>
)}
</div>
@@ -123,7 +142,12 @@ export const NodeDetailPanel = ({
{node && (
<div className="pt-6">
<NodeDetailContent node={node} allNodes={allNodes} />
<NodeDetailContent
node={node}
allNodes={allNodes}
onViewFinding={onViewFinding}
viewFindingLoading={viewFindingLoading}
/>
</div>
)}
</SheetContent>
@@ -1,5 +1,7 @@
"use client";
import { Button } from "@/components/shadcn";
import { Spinner } from "@/components/shadcn/spinner/spinner";
import { SeverityBadge } from "@/components/ui/table/severity-badge";
import type { GraphNode } from "@/types/attack-paths";
@@ -16,13 +18,20 @@ type Severity = (typeof SEVERITY_LEVELS)[keyof typeof SEVERITY_LEVELS];
interface NodeFindingsProps {
node: GraphNode;
allNodes?: GraphNode[];
onViewFinding?: (findingId: string) => void;
viewFindingLoading?: boolean;
}
/**
* Node findings section showing related findings for the selected node
* Displays findings that are connected to the node via HAS_FINDING edges
*/
export const NodeFindings = ({ node, allNodes = [] }: NodeFindingsProps) => {
export const NodeFindings = ({
node,
allNodes = [],
onViewFinding,
viewFindingLoading = false,
}: NodeFindingsProps) => {
// Get finding IDs from the node's findings array (populated by adapter)
const findingIds = node.findings || [];
@@ -79,15 +88,20 @@ export const NodeFindings = ({ node, allNodes = [] }: NodeFindingsProps) => {
ID: {findingId}
</p>
</div>
<a
href={`/findings?id=${findingId}`}
target="_blank"
rel="noopener noreferrer"
<Button
variant="link"
size="sm"
onClick={() => onViewFinding?.(findingId)}
disabled={viewFindingLoading}
aria-label={`View full finding for ${findingName}`}
className="text-text-info dark:text-text-info h-auto shrink-0 p-0 text-xs font-medium hover:underline"
>
View Full Finding
</a>
{viewFindingLoading ? (
<Spinner className="size-4" />
) : (
"View Full Finding →"
)}
</Button>
</div>
{finding.properties?.description && (
<div className="text-text-neutral-secondary dark:text-text-neutral-secondary mt-2 text-xs">
@@ -1,8 +1,8 @@
"use client";
import Link from "next/link";
import { Badge } from "@/components/shadcn/badge/badge";
import { Button } from "@/components/shadcn/button/button";
import { Spinner } from "@/components/shadcn/spinner/spinner";
interface Finding {
id: string;
@@ -13,12 +13,18 @@ interface Finding {
interface NodeRemediationProps {
findings: Finding[];
onViewFinding?: (findingId: string) => void;
viewFindingLoading?: boolean;
}
/**
* Node remediation section showing related Prowler findings
*/
export const NodeRemediation = ({ findings }: NodeRemediationProps) => {
export const NodeRemediation = ({
findings,
onViewFinding,
viewFindingLoading = false,
}: NodeRemediationProps) => {
const getSeverityVariant = (severity: string) => {
switch (severity) {
case "critical":
@@ -66,15 +72,20 @@ export const NodeRemediation = ({ findings }: NodeRemediationProps) => {
</div>
</div>
<div className="mt-2">
<Link
href={`/findings?id=${finding.id}`}
target="_blank"
rel="noopener noreferrer"
<Button
variant="link"
size="sm"
onClick={() => onViewFinding?.(finding.id)}
disabled={viewFindingLoading}
aria-label={`View full finding for ${finding.title}`}
className="text-text-info dark:text-text-info text-sm transition-all hover:opacity-80 dark:hover:opacity-80"
className="text-text-info dark:text-text-info h-auto p-0 text-sm transition-all hover:opacity-80 dark:hover:opacity-80"
>
View Full Finding
</Link>
{viewFindingLoading ? (
<Spinner className="size-4" />
) : (
"View Full Finding →"
)}
</Button>
</div>
</div>
))}
@@ -28,29 +28,12 @@ vi.mock("@/components/shadcn/tooltip", () => ({
Tooltip: ({ children }: { children: ReactNode }) => <>{children}</>,
TooltipTrigger: ({
children,
asChild: _asChild,
...props
}: {
children: ReactNode;
asChild?: boolean;
}) => <div {...props}>{children}</div>,
}) => <>{children}</>,
TooltipContent: ({ children }: { children: ReactNode }) => (
<div role="tooltip">{children}</div>
),
}));
vi.mock("./scan-status-badge", () => ({
ScanStatusBadge: ({
status,
graphDataReady,
}: {
status: string;
graphDataReady?: boolean;
}) => (
<span>
{status}
{graphDataReady && " (graph ready)"}
</span>
<span data-testid="tooltip-content">{children}</span>
),
}));
@@ -186,14 +169,14 @@ describe("ScanListTable", () => {
expect(screen.getByText("12 Total Entries")).toBeInTheDocument();
expect(screen.getByText("Page 1 of 3")).toBeInTheDocument();
await user.click(screen.getAllByRole("button", { name: "Select scan" })[0]);
await user.click(screen.getAllByRole("radio", { name: "Select scan" })[0]);
expect(pushMock).toHaveBeenCalledWith(
"/attack-paths?scanPage=1&scanPageSize=5&scanId=scan-1",
);
});
it("enables the select button for a failed scan when graph data is ready", async () => {
it("enables the radio button for a failed scan when graph data is ready", async () => {
const user = userEvent.setup();
const failedScan: AttackPathScan = {
...createScan(1),
@@ -206,18 +189,18 @@ describe("ScanListTable", () => {
render(<ScanListTable scans={[failedScan]} />);
const button = screen.getByRole("button", { name: "Select scan" });
expect(button).toBeEnabled();
expect(button).toHaveTextContent("Select");
const radio = screen.getByRole("radio", { name: "Select scan" });
expect(radio).toBeEnabled();
expect(radio).toHaveAttribute("aria-checked", "false");
await user.click(button);
await user.click(radio);
expect(pushMock).toHaveBeenCalledWith(
"/attack-paths?scanPage=1&scanPageSize=5&scanId=scan-1",
);
});
it("disables the select button for a failed scan when graph data is not ready", () => {
it("disables the radio button for a failed scan when graph data is not ready", () => {
const failedScan: AttackPathScan = {
...createScan(1),
attributes: {
@@ -229,30 +212,30 @@ describe("ScanListTable", () => {
render(<ScanListTable scans={[failedScan]} />);
const button = screen.getByRole("button", { name: "Select scan" });
expect(button).toBeDisabled();
expect(button).toHaveTextContent("Unavailable");
const radio = screen.getByRole("radio", { name: "Scan not available" });
expect(radio).toBeDisabled();
});
// PROWLER-1383: Button label based on graph_data_ready instead of scan state
it("shows 'Unavailable' for scheduled scan when graph data is not ready", () => {
it("shows a disabled radio button for a scheduled scan without graph data", () => {
const scheduledScan: AttackPathScan = {
...createScan(1),
attributes: {
...createScan(1).attributes,
state: "scheduled",
progress: 0,
graph_data_ready: false,
completed_at: null,
duration: null,
},
};
render(<ScanListTable scans={[scheduledScan]} />);
const button = screen.getByRole("button", { name: "Select scan" });
expect(button).toBeDisabled();
expect(button).toHaveTextContent("Unavailable");
const radio = screen.getByRole("radio", { name: "Scan not available" });
expect(radio).toBeDisabled();
});
it("shows 'Unavailable' for executing scan when graph data is not ready", () => {
it("shows a disabled radio button for an executing scan without graph data", () => {
const executingScan: AttackPathScan = {
...createScan(1),
attributes: {
@@ -260,85 +243,83 @@ describe("ScanListTable", () => {
state: "executing",
progress: 45,
graph_data_ready: false,
completed_at: null,
duration: null,
},
};
render(<ScanListTable scans={[executingScan]} />);
const button = screen.getByRole("button", { name: "Select scan" });
expect(button).toBeDisabled();
expect(button).toHaveTextContent("Unavailable");
const radio = screen.getByRole("radio", { name: "Scan not available" });
expect(radio).toBeDisabled();
});
// PROWLER-1383: Enable Select on scheduled/executing scans with graph data from previous cycle
it("enables 'Select' for executing scan when graph data is ready from previous cycle", async () => {
it("enables the radio button for a scheduled scan when graph data is ready from a previous cycle", async () => {
const user = userEvent.setup();
const executingScan: AttackPathScan = {
...createScan(1),
attributes: {
...createScan(1).attributes,
state: "executing",
progress: 30,
graph_data_ready: true,
},
};
render(<ScanListTable scans={[executingScan]} />);
const button = screen.getByRole("button", { name: "Select scan" });
expect(button).toBeEnabled();
expect(button).toHaveTextContent("Select");
await user.click(button);
expect(pushMock).toHaveBeenCalledWith(
"/attack-paths?scanPage=1&scanPageSize=5&scanId=scan-1",
);
});
it("enables 'Select' for scheduled scan when graph data is ready from previous cycle", async () => {
const user = userEvent.setup();
const scheduledScan: AttackPathScan = {
const scheduledWithGraph: AttackPathScan = {
...createScan(1),
attributes: {
...createScan(1).attributes,
state: "scheduled",
progress: 0,
graph_data_ready: true,
},
};
render(<ScanListTable scans={[scheduledScan]} />);
render(<ScanListTable scans={[scheduledWithGraph]} />);
const button = screen.getByRole("button", { name: "Select scan" });
expect(button).toBeEnabled();
expect(button).toHaveTextContent("Select");
const radio = screen.getByRole("radio", { name: "Select scan" });
expect(radio).toBeEnabled();
expect(radio).toHaveAttribute("aria-checked", "false");
await user.click(radio);
await user.click(button);
expect(pushMock).toHaveBeenCalledWith(
"/attack-paths?scanPage=1&scanPageSize=5&scanId=scan-1",
);
});
// PROWLER-1383: Tooltip on disabled button explaining why it can't be selected
it("shows tooltip on disabled button explaining graph data is not available", () => {
const unavailableScan: AttackPathScan = {
it("exposes an accessible label in the Graph column when graph data is ready", () => {
render(<ScanListTable scans={[createScan(1)]} />);
expect(screen.getByLabelText("Graph available")).toHaveClass(
"text-text-success-primary",
);
});
it("exposes an accessible label in the Graph column when graph data is not ready", () => {
const noGraphScan: AttackPathScan = {
...createScan(1),
attributes: {
...createScan(1).attributes,
state: "executing",
graph_data_ready: false,
},
};
render(<ScanListTable scans={[unavailableScan]} />);
render(<ScanListTable scans={[noGraphScan]} />);
expect(screen.getByRole("tooltip")).toHaveTextContent(
"Graph data not yet available",
expect(screen.getByLabelText("Graph not available")).toHaveClass(
"text-text-neutral-secondary",
);
});
it("does not show tooltip on enabled button", () => {
render(<ScanListTable scans={[createScan(1)]} />);
it("renders a tooltip explaining a completed scan without graph data", () => {
const completedNoGraph: AttackPathScan = {
...createScan(1),
attributes: {
...createScan(1).attributes,
state: "completed",
graph_data_ready: false,
},
};
expect(screen.queryByRole("tooltip")).not.toBeInTheDocument();
render(<ScanListTable scans={[completedNoGraph]} />);
expect(
screen.getByRole("radio", { name: "Scan not available" }),
).toBeDisabled();
expect(
screen.getByText("This scan completed without producing graph data."),
).toBeInTheDocument();
});
});
@@ -1,9 +1,13 @@
"use client";
import { ColumnDef } from "@tanstack/react-table";
import { Check, Minus } from "lucide-react";
import { usePathname, useRouter, useSearchParams } from "next/navigation";
import { Button } from "@/components/shadcn/button/button";
import {
RadioGroup,
RadioGroupItem,
} from "@/components/shadcn/radio-group/radio-group";
import {
Tooltip,
TooltipContent,
@@ -13,8 +17,10 @@ import { DateWithTime } from "@/components/ui/entities/date-with-time";
import { EntityInfo } from "@/components/ui/entities/entity-info";
import { DataTable, DataTableColumnHeader } from "@/components/ui/table";
import { formatDuration } from "@/lib/date-utils";
import { cn } from "@/lib/utils";
import type { MetaDataProps, ProviderType } from "@/types";
import type { AttackPathScan } from "@/types/attack-paths";
import { SCAN_STATES } from "@/types/attack-paths";
import { ScanStatusBadge } from "./scan-status-badge";
@@ -24,7 +30,6 @@ interface ScanListTableProps {
const DEFAULT_PAGE_SIZE = 5;
const PAGE_SIZE_OPTIONS = [2, 5, 10, 15];
const parsePageParam = (value: string | null, fallback: number) => {
if (!value) return fallback;
@@ -37,26 +42,32 @@ const formatNullableDuration = (duration: number | null) => {
return formatDuration(duration);
};
const isSelectDisabled = (
scan: AttackPathScan,
selectedScanId: string | null,
) => {
return !scan.attributes.graph_data_ready || selectedScanId === scan.id;
};
const getSelectButtonLabel = (
scan: AttackPathScan,
selectedScanId: string | null,
) => {
if (selectedScanId === scan.id) {
return "Selected";
}
const getDisabledTooltip = (scan: AttackPathScan): string | null => {
if (scan.attributes.graph_data_ready) {
return "Select";
return null;
}
return "Unavailable";
if (scan.attributes.state === SCAN_STATES.SCHEDULED) {
return "Graph will be available once this scan runs and completes.";
}
if (scan.attributes.state === SCAN_STATES.AVAILABLE) {
return "This scan is queued. Graph will be available once it completes.";
}
if (scan.attributes.state === SCAN_STATES.EXECUTING) {
return "Scan is running. Graph will be available once it completes.";
}
if (scan.attributes.state === SCAN_STATES.FAILED) {
return "This scan failed. No graph data is available.";
}
if (scan.attributes.state === SCAN_STATES.COMPLETED) {
return "This scan completed without producing graph data.";
}
return "Graph data is not available for this scan.";
};
const getSelectedRowSelection = (
@@ -88,11 +99,54 @@ const buildMetadata = (
const getColumns = ({
selectedScanId,
onSelectScan,
}: {
selectedScanId: string | null;
onSelectScan: (scanId: string) => void;
}): ColumnDef<AttackPathScan>[] => [
{
id: "select",
header: () => <span className="text-sm font-medium">Select</span>,
cell: ({ row }) => {
const isSelected = selectedScanId === row.original.id;
const canSelect = row.original.attributes.graph_data_ready;
const tooltip = getDisabledTooltip(row.original);
const radio = (
<RadioGroupItem
value={row.original.id}
checked={isSelected}
disabled={!canSelect}
className={cn(
"size-5",
canSelect &&
!isSelected &&
"border-text-neutral-secondary cursor-pointer",
!canSelect && "disabled:opacity-70",
)}
aria-label={
isSelected
? "Selected scan"
: canSelect
? "Select scan"
: "Scan not available"
}
/>
);
if (!canSelect && !isSelected && tooltip) {
return (
<Tooltip>
<TooltipTrigger asChild>
<span tabIndex={0}>{radio}</span>
</TooltipTrigger>
<TooltipContent>{tooltip}</TooltipContent>
</Tooltip>
);
}
return radio;
},
enableSorting: false,
},
{
accessorKey: "provider",
header: ({ column }) => (
@@ -126,22 +180,32 @@ const getColumns = ({
<DataTableColumnHeader column={column} title="Status" />
),
cell: ({ row }) => (
<ScanStatusBadge
status={row.original.attributes.state}
progress={row.original.attributes.progress}
graphDataReady={row.original.attributes.graph_data_ready}
/>
<div className="flex">
<ScanStatusBadge
status={row.original.attributes.state}
progress={row.original.attributes.progress}
/>
</div>
),
enableSorting: false,
},
{
accessorKey: "progress",
header: ({ column }) => (
<DataTableColumnHeader column={column} title="Progress" />
),
cell: ({ row }) => (
<span className="text-sm">{row.original.attributes.progress}%</span>
),
accessorKey: "graph_data_ready",
header: () => <span className="text-sm font-medium">Graph</span>,
cell: ({ row }) =>
row.original.attributes.graph_data_ready ? (
<Check
size={16}
aria-label="Graph available"
className="text-text-success-primary"
/>
) : (
<Minus
size={16}
aria-label="Graph not available"
className="text-text-neutral-secondary"
/>
),
enableSorting: false,
},
{
@@ -156,44 +220,6 @@ const getColumns = ({
),
enableSorting: false,
},
{
id: "actions",
header: () => <span className="sr-only">Actions</span>,
cell: ({ row }) => {
const isDisabled = isSelectDisabled(row.original, selectedScanId);
const button = (
<Button
type="button"
aria-label="Select scan"
disabled={isDisabled}
variant={isDisabled ? "secondary" : "default"}
onClick={() => onSelectScan(row.original.id)}
className="w-full max-w-24"
>
{getSelectButtonLabel(row.original, selectedScanId)}
</Button>
);
if (isDisabled && selectedScanId !== row.original.id) {
return (
<div className="flex justify-end">
<Tooltip>
<TooltipTrigger asChild>
<span className="w-full max-w-24" tabIndex={0}>
{button}
</span>
</TooltipTrigger>
<TooltipContent>Graph data not yet available</TooltipContent>
</Tooltip>
</div>
);
}
return <div className="flex justify-end">{button}</div>;
},
enableSorting: false,
},
];
/**
@@ -243,19 +269,27 @@ export const ScanListTable = ({ scans }: ScanListTableProps) => {
};
return (
<DataTable
columns={getColumns({
selectedScanId,
onSelectScan: handleSelectScan,
})}
data={paginatedScans}
metadata={buildMetadata(scans.length, currentPage, totalPages)}
controlledPage={currentPage}
controlledPageSize={pageSize}
onPageChange={handlePageChange}
onPageSizeChange={handlePageSizeChange}
enableRowSelection
rowSelection={getSelectedRowSelection(paginatedScans, selectedScanId)}
/>
<RadioGroup
value={selectedScanId ?? ""}
onValueChange={handleSelectScan}
className="gap-0"
>
<DataTable
columns={getColumns({ selectedScanId })}
data={paginatedScans}
metadata={buildMetadata(scans.length, currentPage, totalPages)}
controlledPage={currentPage}
controlledPageSize={pageSize}
onPageChange={handlePageChange}
onPageSizeChange={handlePageSizeChange}
onRowClick={(row) => {
if (row.original.attributes.graph_data_ready) {
handleSelectScan(row.original.id);
}
}}
enableRowSelection
rowSelection={getSelectedRowSelection(paginatedScans, selectedScanId)}
/>
</RadioGroup>
);
};
@@ -3,97 +3,55 @@
import { Loader2 } from "lucide-react";
import { Badge } from "@/components/shadcn/badge/badge";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/shadcn/tooltip";
import { cn } from "@/lib/utils";
import type { ScanState } from "@/types/attack-paths";
import { SCAN_STATES } from "@/types/attack-paths";
const BADGE_CONFIG: Record<
ScanState,
{ className: string; label: string; showGraphDot: boolean }
> = {
const BADGE_CONFIG: Record<ScanState, { className: string; label: string }> = {
[SCAN_STATES.SCHEDULED]: {
className: "bg-bg-neutral-tertiary text-text-neutral-primary",
label: "Scheduled",
showGraphDot: true,
},
[SCAN_STATES.AVAILABLE]: {
className: "bg-bg-neutral-tertiary text-text-neutral-primary",
label: "Queued",
showGraphDot: true,
},
[SCAN_STATES.EXECUTING]: {
className: "bg-bg-warning-secondary text-text-neutral-primary",
className: "bg-bg-info-secondary text-text-info",
label: "In Progress",
showGraphDot: true,
},
[SCAN_STATES.COMPLETED]: {
className: "bg-bg-pass-secondary text-text-success-primary",
label: "Completed",
showGraphDot: true,
},
[SCAN_STATES.FAILED]: {
className: "bg-bg-fail-secondary text-text-error-primary",
label: "Failed",
showGraphDot: true,
},
};
interface ScanStatusBadgeProps {
status: ScanState;
progress?: number;
graphDataReady?: boolean;
}
export const ScanStatusBadge = ({
status,
progress = 0,
graphDataReady = false,
}: ScanStatusBadgeProps) => {
const config = BADGE_CONFIG[status];
const graphDot = graphDataReady && config.showGraphDot && (
<span className="inline-block size-2 rounded-full bg-green-500" />
);
const tooltipText = graphDataReady
? "Graph available"
: status === SCAN_STATES.FAILED || status === SCAN_STATES.COMPLETED
? "Graph not available"
: "Graph not available yet";
const spinner = status === SCAN_STATES.EXECUTING && (
<Loader2 size={14} className="animate-spin" />
);
const icon =
status === SCAN_STATES.EXECUTING ? (
<>
{graphDot}
{spinner}
</>
) : (
graphDot
);
const label =
status === SCAN_STATES.EXECUTING
? `${config.label} (${progress}%)`
: config.label;
return (
<Tooltip>
<TooltipTrigger asChild>
<Badge className={cn(config.className, "gap-2")}>
{icon}
<span>{label}</span>
</Badge>
</TooltipTrigger>
<TooltipContent>{tooltipText}</TooltipContent>
</Tooltip>
<Badge className={cn(config.className, "gap-2")}>
{status === SCAN_STATES.EXECUTING && (
<Loader2 size={14} className="animate-spin" />
)}
<span>{label}</span>
</Badge>
);
};
@@ -14,6 +14,8 @@ import {
getAvailableQueries,
} from "@/actions/attack-paths";
import { adaptQueryResultToGraphData } from "@/actions/attack-paths/query-result.adapter";
import { FindingDetailDrawer } from "@/components/findings/table";
import { useFindingDetails } from "@/components/resources/table/use-finding-details";
import { AutoRefresh } from "@/components/scans";
import {
Alert,
@@ -30,6 +32,7 @@ import {
DialogTitle,
DialogTrigger,
} from "@/components/shadcn/dialog";
import { Spinner } from "@/components/shadcn/spinner/spinner";
import { useToast } from "@/components/ui";
import type {
AttackPathQuery,
@@ -37,7 +40,7 @@ import type {
AttackPathScan,
GraphNode,
} from "@/types/attack-paths";
import { ATTACK_PATH_QUERY_IDS } from "@/types/attack-paths";
import { ATTACK_PATH_QUERY_IDS, SCAN_STATES } from "@/types/attack-paths";
import {
AttackPathGraph,
@@ -65,6 +68,7 @@ export default function AttackPathsPage() {
const searchParams = useSearchParams();
const scanId = searchParams.get("scanId");
const graphState = useGraphState();
const finding = useFindingDetails();
const { toast } = useToast();
const [scansLoading, setScansLoading] = useState(true);
@@ -116,10 +120,17 @@ export default function AttackPathsPage() {
// Check if there's an executing scan for auto-refresh
const hasExecutingScan = scans.some(
(scan) =>
scan.attributes.state === "executing" ||
scan.attributes.state === "scheduled",
scan.attributes.state === SCAN_STATES.EXECUTING ||
scan.attributes.state === SCAN_STATES.SCHEDULED,
);
// Detect if the selected scan is showing data from a previous cycle
const selectedScan = scans.find((scan) => scan.id === scanId);
const isViewingPreviousCycleData =
selectedScan &&
selectedScan.attributes.graph_data_ready &&
selectedScan.attributes.state !== SCAN_STATES.COMPLETED;
// Callback to refresh scans (used by AutoRefresh component)
const refreshScans = async () => {
try {
@@ -304,6 +315,14 @@ export default function AttackPathsPage() {
graphState.selectNode(null);
};
const getFindingId = (node: GraphNode | null) =>
node ? String(node.properties?.id || node.id) : "";
const handleViewFinding = (findingId: string) => {
if (!findingId) return;
void finding.navigateToFinding(findingId);
};
const handleGraphExport = (svgElement: SVGSVGElement | null) => {
try {
if (svgElement) {
@@ -339,11 +358,11 @@ export default function AttackPathsPage() {
<h2 className="dark:text-prowler-theme-pale/90 text-xl font-semibold">
Attack Paths
</h2>
<p className="text-text-neutral-secondary dark:text-text-neutral-secondary mt-2 text-sm">
<p className="text-text-neutral-secondary mt-2 text-sm">
Select a scan, build a query, and visualize Attack Paths in your
infrastructure.
</p>
<p className="text-text-neutral-secondary dark:text-text-neutral-secondary mt-1 text-xs">
<p className="text-text-neutral-secondary mt-1 text-xs">
Scans can be selected when data is available. A new scan does not
interrupt access to existing data.
</p>
@@ -373,6 +392,21 @@ export default function AttackPathsPage() {
<ScanListTable scans={scans} />
</Suspense>
{/* Banner: viewing data from a previous scan cycle */}
{isViewingPreviousCycleData && (
<Alert variant="info">
<Info className="size-4" />
<AlertTitle>Viewing data from a previous scan</AlertTitle>
<AlertDescription>
This scan is currently{" "}
{selectedScan.attributes.state === SCAN_STATES.EXECUTING
? `running (${selectedScan.attributes.progress}%)`
: selectedScan.attributes.state}
. The graph data shown is from the last completed cycle.
</AlertDescription>
</Alert>
)}
{/* Query Builder Section - shown only after selecting a scan */}
{scanId && (
<div className="minimal-scrollbar rounded-large shadow-small border-border-neutral-secondary bg-bg-neutral-secondary relative z-0 flex w-full flex-col gap-4 overflow-auto border p-4">
@@ -568,7 +602,7 @@ export default function AttackPathsPage() {
<X size={16} />
</Button>
</div>
<p className="text-text-neutral-secondary dark:text-text-neutral-secondary mb-4 text-xs">
<p className="text-text-neutral-secondary mb-4 text-xs">
{graphState.selectedNode?.labels.some(
(label) =>
label
@@ -591,7 +625,7 @@ export default function AttackPathsPage() {
<h4 className="mb-2 text-xs font-semibold">
Type
</h4>
<p className="text-text-neutral-secondary dark:text-text-neutral-secondary text-xs">
<p className="text-text-neutral-secondary text-xs">
{graphState.selectedNode?.labels
.map(formatNodeLabel)
.join(", ")}
@@ -641,7 +675,7 @@ export default function AttackPathsPage() {
<div className="flex items-center justify-between">
<div className="flex-1">
<h3 className="text-lg font-semibold">Node Details</h3>
<p className="text-text-neutral-secondary dark:text-text-neutral-secondary mt-1 text-sm">
<p className="text-text-neutral-secondary mt-1 text-sm">
{String(
graphState.selectedNode.labels.some((label) =>
label.toLowerCase().includes("finding"),
@@ -659,15 +693,20 @@ export default function AttackPathsPage() {
{graphState.selectedNode.labels.some((label) =>
label.toLowerCase().includes("finding"),
) && (
<Button asChild variant="default" size="sm">
<a
href={`/findings?id=${String(graphState.selectedNode.properties?.id || graphState.selectedNode.id)}`}
target="_blank"
rel="noopener noreferrer"
aria-label={`View finding ${String(graphState.selectedNode.properties?.id || graphState.selectedNode.id)}`}
>
View Finding
</a>
<Button
variant="default"
size="sm"
onClick={() =>
handleViewFinding(getFindingId(graphState.selectedNode))
}
disabled={finding.findingDetailLoading}
aria-label={`View finding ${getFindingId(graphState.selectedNode)}`}
>
{finding.findingDetailLoading ? (
<Spinner className="size-4" />
) : (
"View Finding"
)}
</Button>
)}
<Button
@@ -685,9 +724,22 @@ export default function AttackPathsPage() {
<NodeDetailContent
node={graphState.selectedNode}
allNodes={graphState.data.nodes}
onViewFinding={handleViewFinding}
viewFindingLoading={finding.findingDetailLoading}
/>
</div>
)}
{finding.findingDetails && (
<FindingDetailDrawer
key={finding.findingDetails.id}
finding={finding.findingDetails}
defaultOpen
onOpenChange={(open) => {
if (!open) finding.resetFindingDetails();
}}
/>
)}
</>
)}
</div>
-3
View File
@@ -37,9 +37,6 @@ export default async function Findings({
// Check if the searchParams contain any date or scan filter
const hasDateOrScan = hasDateOrScanFilter(resolvedSearchParams);
// TODO: Re-implement deep link support (/findings?id=<uuid>) using the grouped view's resource detail drawer
// once the legacy FindingDetailsSheet is fully deprecated (still used by /resources and overview dashboard).
const [providersData, scansData] = await Promise.all([
getProviders({ pageSize: 50 }),
getScans({ pageSize: 50 }),
+8 -27
View File
@@ -1,11 +1,6 @@
import { Suspense } from "react";
import {
AddProviderButton,
MutedFindingsConfigButton,
ProvidersAccountsTable,
ProvidersFilters,
} from "@/components/providers";
import { ProvidersAccountsView } from "@/components/providers";
import { SkeletonTableProviders } from "@/components/providers/table";
import { Skeleton } from "@/components/shadcn/skeleton/skeleton";
import { ContentLayout } from "@/components/ui";
@@ -56,15 +51,6 @@ export default async function Providers({
);
}
const ProvidersActions = () => {
return (
<div className="flex flex-wrap gap-4 md:justify-end">
<MutedFindingsConfigButton />
<AddProviderButton />
</div>
);
};
const ProvidersTableFallback = () => {
return (
<div className="flex flex-col gap-6">
@@ -120,17 +106,12 @@ const ProvidersAccountsContent = async ({
});
return (
<div className="flex flex-col gap-6">
<ProvidersFilters
filters={providersView.filters}
providers={providersView.providers}
actions={<ProvidersActions />}
/>
<ProvidersAccountsTable
isCloud={process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true"}
metadata={providersView.metadata}
rows={providersView.rows}
/>
</div>
<ProvidersAccountsView
isCloud={process.env.NEXT_PUBLIC_IS_CLOUD_ENV === "true"}
filters={providersView.filters}
providers={providersView.providers}
metadata={providersView.metadata}
rows={providersView.rows}
/>
);
};
@@ -0,0 +1,16 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("client accordion content", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "client-accordion-content.tsx");
const source = readFileSync(filePath, "utf8");
it("uses the shared standalone finding columns instead of the legacy findings columns", () => {
expect(source).toContain("getStandaloneFindingColumns");
expect(source).not.toContain("getColumnFindings");
});
});
@@ -5,7 +5,7 @@ import { useEffect, useRef, useState } from "react";
import { getFindings } from "@/actions/findings/findings";
import {
getColumnFindings,
getStandaloneFindingColumns,
SkeletonTableFindings,
} from "@/components/findings/table";
import { Accordion } from "@/components/ui/accordion/Accordion";
@@ -33,6 +33,7 @@ export const ClientAccordionContent = ({
const searchParams = useSearchParams();
const pageNumber = searchParams.get("page") || "1";
const complianceId = searchParams.get("complianceId");
const openFindingId = searchParams.get("id");
const defaultSort = "severity,status,-inserted_at";
const sort = searchParams.get("sort") || defaultSort;
const loadedPageRef = useRef<string | null>(null);
@@ -159,12 +160,7 @@ export const ClientAccordionContent = ({
<h4 className="mb-2 text-sm font-medium">Findings</h4>
<DataTable
// Remove select and updated_at columns for compliance view
columns={getColumnFindings({}, 0).filter(
(col) =>
col.id !== "select" &&
!("accessorKey" in col && col.accessorKey === "updated_at"),
)}
columns={getStandaloneFindingColumns({ openFindingId })}
data={expandedFindings || []}
metadata={findings?.meta}
disableScroll={true}
@@ -1,14 +1,8 @@
// TODO: Legacy columns — used by overview dashboard (column-new-findings-to-date.tsx).
// Migrate that consumer to grouped view columns, then delete this file.
"use client";
import { ColumnDef, RowSelectionState } from "@tanstack/react-table";
import { ColumnDef } from "@tanstack/react-table";
import { Database } from "lucide-react";
import { useSearchParams } from "next/navigation";
import { FindingDetail } from "@/components/findings/table";
import { DataTableRowActions } from "@/components/findings/table";
import { Checkbox } from "@/components/shadcn";
import { CodeSnippet } from "@/components/ui/code-snippet/code-snippet";
import { DateWithTime } from "@/components/ui/entities";
import {
@@ -18,11 +12,15 @@ import {
} from "@/components/ui/table";
import { FindingProps, ProviderType } from "@/types";
// TODO: PROWLER-379 - Enable ImpactedResourcesCell when backend supports grouped findings
// import { ImpactedResourcesCell } from "./impacted-resources-cell";
import { FindingDetailDrawer } from "./finding-detail-drawer";
import { DeltaValues, NotificationIndicator } from "./notification-indicator";
import { ProviderIconCell } from "./provider-icon-cell";
interface GetStandaloneFindingColumnsOptions {
includeUpdatedAt?: boolean;
openFindingId?: string | null;
}
const getFindingsData = (row: { original: FindingProps }) => {
return row.original;
};
@@ -45,49 +43,38 @@ const getProviderData = (
return row.original.relationships?.provider?.attributes?.[field] || "-";
};
// Component for finding title that opens the detail drawer
const FindingTitleCell = ({ row }: { row: { original: FindingProps } }) => {
const searchParams = useSearchParams();
const findingId = searchParams.get("id");
const isOpen = findingId === row.original.id;
const { checktitle } = row.original.attributes.check_metadata;
function FindingTitleCell({
finding,
defaultOpen = false,
}: {
finding: FindingProps;
defaultOpen?: boolean;
}) {
return (
<FindingDetail
findingDetails={row.original}
defaultOpen={isOpen}
<FindingDetailDrawer
finding={finding}
defaultOpen={defaultOpen}
trigger={
<div className="max-w-[500px]">
<p className="text-text-neutral-primary hover:text-button-tertiary cursor-pointer text-left text-sm break-words whitespace-normal hover:underline">
{checktitle}
{finding.attributes.check_metadata.checktitle}
</p>
</div>
}
/>
);
};
}
// Function to generate columns with access to selection state
export function getColumnFindings(
rowSelection: RowSelectionState,
selectableRowCount: number,
): ColumnDef<FindingProps>[] {
// Calculate selection state from rowSelection for header checkbox
const selectedCount = Object.values(rowSelection).filter(Boolean).length;
const isAllSelected =
selectedCount > 0 && selectedCount === selectableRowCount;
const isSomeSelected =
selectedCount > 0 && selectedCount < selectableRowCount;
return [
// Notification column - shows new/changed/muted indicators
export function getStandaloneFindingColumns({
includeUpdatedAt = false,
openFindingId = null,
}: GetStandaloneFindingColumnsOptions = {}): ColumnDef<FindingProps>[] {
const columns: ColumnDef<FindingProps>[] = [
{
id: "notification",
header: () => null,
cell: ({ row }) => {
const finding = row.original;
const isMuted = finding.attributes.muted;
const mutedReason = finding.attributes.muted_reason;
const delta = finding.attributes.delta as
| (typeof DeltaValues)[keyof typeof DeltaValues]
| undefined;
@@ -95,8 +82,8 @@ export function getColumnFindings(
return (
<NotificationIndicator
delta={delta}
isMuted={isMuted}
mutedReason={mutedReason}
isMuted={finding.attributes.muted}
mutedReason={finding.attributes.muted_reason}
showDeltaWhenMuted
/>
);
@@ -104,51 +91,6 @@ export function getColumnFindings(
enableSorting: false,
enableHiding: false,
},
// Select column
{
id: "select",
header: ({ table }) => {
const headerChecked = isAllSelected
? true
: isSomeSelected
? "indeterminate"
: false;
return (
<div className="ml-1 flex w-6 items-center justify-center pr-4">
<Checkbox
checked={headerChecked}
onCheckedChange={(checked) =>
table.toggleAllPageRowsSelected(checked === true)
}
aria-label="Select all"
disabled={selectableRowCount === 0}
/>
</div>
);
},
cell: ({ row }) => {
const finding = row.original;
const isMuted = finding.attributes.muted;
const isSelected = !!rowSelection[row.id];
return (
<div className="ml-1 flex w-6 items-center justify-center pr-4">
<Checkbox
checked={isSelected}
disabled={isMuted}
onCheckedChange={(checked) =>
row.toggleSelected(checked === true)
}
aria-label="Select row"
/>
</div>
);
},
enableSorting: false,
enableHiding: false,
},
// Status column
{
accessorKey: "status",
header: ({ column }) => (
@@ -162,7 +104,6 @@ export function getColumnFindings(
return <StatusFindingBadge status={status} />;
},
},
// Finding column - clickable to open detail sheet
{
accessorKey: "check",
header: ({ column }) => (
@@ -172,9 +113,13 @@ export function getColumnFindings(
param="check_id"
/>
),
cell: ({ row }) => <FindingTitleCell row={row} />,
cell: ({ row }) => (
<FindingTitleCell
finding={row.original}
defaultOpen={openFindingId === row.original.id}
/>
),
},
// Resource name column
{
accessorKey: "resourceName",
header: ({ column }) => (
@@ -197,7 +142,6 @@ export function getColumnFindings(
},
enableSorting: false,
},
// Severity column
{
accessorKey: "severity",
header: ({ column }) => (
@@ -214,7 +158,6 @@ export function getColumnFindings(
return <SeverityBadge severity={severity} />;
},
},
// Provider column
{
accessorKey: "provider",
header: ({ column }) => (
@@ -227,7 +170,6 @@ export function getColumnFindings(
},
enableSorting: false,
},
// Service column
{
accessorKey: "service",
header: ({ column }) => (
@@ -243,7 +185,6 @@ export function getColumnFindings(
},
enableSorting: false,
},
// Region column
{
accessorKey: "region",
header: ({ column }) => (
@@ -260,19 +201,10 @@ export function getColumnFindings(
},
enableSorting: false,
},
// TODO: PROWLER-379 - Enable Impacted Resources column when backend supports grouped findings
// {
// accessorKey: "impactedResources",
// header: ({ column }) => (
// <DataTableColumnHeader column={column} title="Impacted Resources" />
// ),
// cell: () => {
// return <ImpactedResourcesCell impacted={1} total={1} />;
// },
// enableSorting: false,
// },
// Time column
{
];
if (includeUpdatedAt) {
columns.push({
accessorKey: "updated_at",
header: ({ column }) => (
<DataTableColumnHeader
@@ -287,13 +219,8 @@ export function getColumnFindings(
} = getFindingsData(row);
return <DateWithTime dateTime={updated_at} />;
},
},
// Actions column - dropdown with Mute/Jira options
{
id: "actions",
header: () => <div className="w-10" />,
cell: ({ row }) => <DataTableRowActions row={row} />,
enableSorting: false,
},
];
});
}
return columns;
}
@@ -1,14 +0,0 @@
"use client";
import { FindingProps } from "@/types/components";
import { FindingDetail } from "./finding-detail";
export const DataTableRowDetails = ({
findingDetails,
}: {
entityId: string;
findingDetails: FindingProps;
}) => {
return <FindingDetail findingDetails={findingDetails} />;
};
@@ -0,0 +1,22 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("finding detail drawer", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "finding-detail-drawer.tsx");
const source = readFileSync(filePath, "utf8");
it("uses the shared resource detail drawer hook with single-resource mode", () => {
expect(source).toContain("useResourceDetailDrawer");
expect(source).toContain("totalResourceCount: 1");
expect(source).toContain("initialIndex: defaultOpen || inline ? 0 : null");
});
it("renders the new resource detail drawer content instead of the legacy finding detail component", () => {
expect(source).toContain("ResourceDetailDrawerContent");
expect(source).not.toContain('from "./finding-detail"');
});
});
@@ -0,0 +1,98 @@
"use client";
import type { ReactNode } from "react";
import { findingToFindingResourceRow } from "@/lib/finding-detail";
import type { FindingProps } from "@/types/components";
import {
ResourceDetailDrawer,
useResourceDetailDrawer,
} from "./resource-detail-drawer";
import { ResourceDetailDrawerContent } from "./resource-detail-drawer/resource-detail-drawer-content";
interface FindingDetailDrawerProps {
finding: FindingProps;
trigger?: ReactNode;
defaultOpen?: boolean;
inline?: boolean;
onOpenChange?: (open: boolean) => void;
onMuteComplete?: () => void;
}
export function FindingDetailDrawer({
finding,
trigger,
defaultOpen = false,
inline = false,
onOpenChange,
onMuteComplete,
}: FindingDetailDrawerProps) {
const drawer = useResourceDetailDrawer({
resources: [findingToFindingResourceRow(finding)],
checkId: finding.attributes.check_id,
totalResourceCount: 1,
initialIndex: defaultOpen || inline ? 0 : null,
});
const handleOpen = () => {
drawer.openDrawer(0);
onOpenChange?.(true);
};
const handleOpenChange = (open: boolean) => {
if (open) {
drawer.openDrawer(0);
} else {
drawer.closeDrawer();
}
onOpenChange?.(open);
};
const handleMuteComplete = () => {
drawer.refetchCurrent();
onMuteComplete?.();
};
if (inline) {
return (
<ResourceDetailDrawerContent
isLoading={drawer.isLoading}
isNavigating={drawer.isNavigating}
checkMeta={drawer.checkMeta}
currentIndex={drawer.currentIndex}
totalResources={drawer.totalResources}
currentFinding={drawer.currentFinding}
otherFindings={drawer.otherFindings}
onNavigatePrev={drawer.navigatePrev}
onNavigateNext={drawer.navigateNext}
onMuteComplete={handleMuteComplete}
/>
);
}
return (
<>
{trigger ? (
<button type="button" className="contents" onClick={handleOpen}>
{trigger}
</button>
) : null}
<ResourceDetailDrawer
open={drawer.isOpen}
onOpenChange={handleOpenChange}
isLoading={drawer.isLoading}
isNavigating={drawer.isNavigating}
checkMeta={drawer.checkMeta}
currentIndex={drawer.currentIndex}
totalResources={drawer.totalResources}
currentFinding={drawer.currentFinding}
otherFindings={drawer.otherFindings}
onNavigatePrev={drawer.navigatePrev}
onNavigateNext={drawer.navigateNext}
onMuteComplete={handleMuteComplete}
/>
</>
);
}
@@ -1,299 +0,0 @@
import { render, screen } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { describe, expect, it, vi } from "vitest";
import { FindingProps } from "@/types";
import { FindingDetail } from "./finding-detail";
// Mock next/navigation
const mockRefresh = vi.fn();
vi.mock("next/navigation", () => ({
useRouter: () => ({ refresh: mockRefresh }),
usePathname: () => "/findings",
useSearchParams: () => new URLSearchParams(),
}));
// Mock @/components/shadcn to avoid next-auth import chain
vi.mock("@/components/shadcn", () => {
const Slot = ({ children }: { children: React.ReactNode }) => <>{children}</>;
return {
Button: ({
children,
...props
}: React.ButtonHTMLAttributes<HTMLButtonElement> & {
variant?: string;
size?: string;
}) => <button {...props}>{children}</button>,
Drawer: ({ children }: { children: React.ReactNode }) => <>{children}</>,
DrawerClose: ({ children }: { children: React.ReactNode }) => (
<>{children}</>
),
DrawerContent: ({ children }: { children: React.ReactNode }) => (
<>{children}</>
),
DrawerDescription: ({ children }: { children: React.ReactNode }) => (
<>{children}</>
),
DrawerHeader: ({ children }: { children: React.ReactNode }) => (
<>{children}</>
),
DrawerTitle: ({ children }: { children: React.ReactNode }) => (
<>{children}</>
),
DrawerTrigger: Slot,
InfoField: ({
children,
label,
}: {
children: React.ReactNode;
label: string;
variant?: string;
}) => (
<div>
<span>{label}</span>
{children}
</div>
),
Tabs: ({ children }: { children: React.ReactNode }) => <>{children}</>,
TabsContent: ({ children }: { children: React.ReactNode }) => (
<>{children}</>
),
TabsList: ({ children }: { children: React.ReactNode }) => <>{children}</>,
TabsTrigger: ({ children }: { children: React.ReactNode }) => (
<>{children}</>
),
Tooltip: ({ children }: { children: React.ReactNode }) => <>{children}</>,
TooltipContent: ({ children }: { children: React.ReactNode }) => (
<>{children}</>
),
TooltipTrigger: Slot,
};
});
vi.mock("@/components/ui/code-snippet/code-snippet", () => ({
CodeSnippet: ({ value }: { value: string }) => <span>{value}</span>,
}));
vi.mock("@/components/ui/custom/custom-link", () => ({
CustomLink: ({ children }: { children: React.ReactNode }) => (
<span>{children}</span>
),
}));
vi.mock("@/components/ui/entities", () => ({
EntityInfo: () => <div data-testid="entity-info" />,
}));
vi.mock("@/components/ui/entities/date-with-time", () => ({
DateWithTime: ({ dateTime }: { dateTime: string }) => <span>{dateTime}</span>,
}));
vi.mock("@/components/ui/table/severity-badge", () => ({
SeverityBadge: ({ severity }: { severity: string }) => (
<span>{severity}</span>
),
}));
vi.mock("@/components/ui/table/status-finding-badge", () => ({
FindingStatus: {},
StatusFindingBadge: ({ status }: { status: string }) => <span>{status}</span>,
}));
vi.mock("@/lib/iac-utils", () => ({
buildGitFileUrl: () => null,
extractLineRangeFromUid: () => null,
}));
vi.mock("@/lib/utils", () => ({
cn: (...args: string[]) => args.filter(Boolean).join(" "),
}));
// Mock child components that are not under test
vi.mock("../mute-findings-modal", () => ({
MuteFindingsModal: ({
isOpen,
findingIds,
}: {
isOpen: boolean;
findingIds: string[];
}) =>
isOpen ? (
<div data-testid="mute-modal">Muting {findingIds.length} finding(s)</div>
) : null,
}));
vi.mock("../muted", () => ({
Muted: ({ isMuted }: { isMuted: boolean }) =>
isMuted ? <span data-testid="muted-badge">Muted</span> : null,
}));
vi.mock("./delta-indicator", () => ({
DeltaIndicator: () => null,
}));
vi.mock("@/components/shared/events-timeline/events-timeline", () => ({
EventsTimeline: () => <div data-testid="events-timeline" />,
}));
vi.mock("react-markdown", () => ({
default: ({ children }: { children: string }) => <span>{children}</span>,
}));
const baseFinding: FindingProps = {
type: "findings",
id: "finding-123",
attributes: {
uid: "uid-123",
delta: null,
status: "FAIL",
status_extended: "S3 bucket is publicly accessible",
severity: "high",
check_id: "s3_bucket_public_access",
muted: false,
check_metadata: {
risk: "Public access risk",
notes: "",
checkid: "s3_bucket_public_access",
provider: "aws",
severity: "high",
checktype: [],
dependson: [],
relatedto: [],
categories: ["security"],
checktitle: "S3 Bucket Public Access Check",
compliance: null,
relatedurl: "",
description: "Checks if S3 buckets are publicly accessible",
remediation: {
code: { cli: "", other: "", nativeiac: "", terraform: "" },
recommendation: { url: "", text: "" },
},
servicename: "s3",
checkaliases: [],
resourcetype: "AwsS3Bucket",
subservicename: "",
resourceidtemplate: "",
},
raw_result: null,
inserted_at: "2024-01-01T00:00:00Z",
updated_at: "2024-01-02T00:00:00Z",
first_seen_at: "2024-01-01T00:00:00Z",
},
relationships: {
resources: { data: [{ type: "resources", id: "res-1" }] },
scan: {
data: { type: "scans", id: "scan-1" },
attributes: {
name: "Daily Scan",
trigger: "scheduled",
state: "completed",
unique_resource_count: 50,
progress: 100,
scanner_args: { checks_to_execute: [] },
duration: 120,
started_at: "2024-01-01T00:00:00Z",
inserted_at: "2024-01-01T00:00:00Z",
completed_at: "2024-01-01T00:02:00Z",
scheduled_at: null,
next_scan_at: "2024-01-02T00:00:00Z",
},
},
resource: {
data: [{ type: "resources", id: "res-1" }],
id: "res-1",
attributes: {
uid: "arn:aws:s3:::my-bucket",
name: "my-bucket",
region: "us-east-1",
service: "s3",
tags: {},
type: "AwsS3Bucket",
inserted_at: "2024-01-01T00:00:00Z",
updated_at: "2024-01-01T00:00:00Z",
details: null,
partition: "aws",
},
relationships: {
provider: { data: { type: "providers", id: "prov-1" } },
findings: {
meta: { count: 1 },
data: [{ type: "findings", id: "finding-123" }],
},
},
links: { self: "/resources/res-1" },
},
provider: {
data: { type: "providers", id: "prov-1" },
attributes: {
provider: "aws",
uid: "123456789012",
alias: "my-account",
connection: {
connected: true,
last_checked_at: "2024-01-01T00:00:00Z",
},
inserted_at: "2024-01-01T00:00:00Z",
updated_at: "2024-01-01T00:00:00Z",
},
relationships: {
secret: { data: { type: "provider-secrets", id: "secret-1" } },
},
links: { self: "/providers/prov-1" },
},
},
links: { self: "/findings/finding-123" },
};
describe("FindingDetail", () => {
it("shows the Mute button for non-muted findings", () => {
render(<FindingDetail findingDetails={baseFinding} />);
expect(screen.getByRole("button", { name: /mute/i })).toBeInTheDocument();
});
it("hides the Mute button for muted findings", () => {
const mutedFinding: FindingProps = {
...baseFinding,
attributes: { ...baseFinding.attributes, muted: true },
};
render(<FindingDetail findingDetails={mutedFinding} />);
expect(screen.queryByRole("button", { name: /mute/i })).toBeNull();
});
it("opens the mute modal when clicking the Mute button", async () => {
const user = userEvent.setup();
render(<FindingDetail findingDetails={baseFinding} />);
expect(screen.queryByTestId("mute-modal")).toBeNull();
await user.click(screen.getByRole("button", { name: /mute/i }));
expect(screen.getByTestId("mute-modal")).toBeInTheDocument();
});
it("does not render the mute modal for muted findings", () => {
const mutedFinding: FindingProps = {
...baseFinding,
attributes: { ...baseFinding.attributes, muted: true },
};
render(<FindingDetail findingDetails={mutedFinding} />);
expect(screen.queryByTestId("mute-modal")).toBeNull();
});
it("shows the muted badge for muted findings", () => {
const mutedFinding: FindingProps = {
...baseFinding,
attributes: { ...baseFinding.attributes, muted: true },
};
render(<FindingDetail findingDetails={mutedFinding} />);
expect(screen.getByTestId("muted-badge")).toBeInTheDocument();
});
});
@@ -1,502 +0,0 @@
// TODO: Legacy component — used by /resources page and overview dashboard.
// Migrate those consumers to the new resource-detail-drawer, then delete this file.
"use client";
import { ExternalLink, Link, VolumeX, X } from "lucide-react";
import { usePathname, useRouter, useSearchParams } from "next/navigation";
import { type ReactNode, useState } from "react";
import {
Button,
Drawer,
DrawerClose,
DrawerContent,
DrawerDescription,
DrawerHeader,
DrawerTitle,
DrawerTrigger,
InfoField,
Tabs,
TabsContent,
TabsList,
TabsTrigger,
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/shadcn";
import { EventsTimeline } from "@/components/shared/events-timeline/events-timeline";
import { CodeSnippet } from "@/components/ui/code-snippet/code-snippet";
import { CustomLink } from "@/components/ui/custom/custom-link";
import { EntityInfo } from "@/components/ui/entities";
import { DateWithTime } from "@/components/ui/entities/date-with-time";
import { SeverityBadge } from "@/components/ui/table/severity-badge";
import {
FindingStatus,
StatusFindingBadge,
} from "@/components/ui/table/status-finding-badge";
import { formatDuration } from "@/lib/date-utils";
import { buildGitFileUrl, extractLineRangeFromUid } from "@/lib/iac-utils";
import { cn } from "@/lib/utils";
import { FindingProps, ProviderType } from "@/types";
import { MarkdownContainer } from "../markdown-container";
import { MuteFindingsModal } from "../mute-findings-modal";
import { Muted } from "../muted";
import { DeltaIndicator } from "./delta-indicator";
const renderValue = (value: string | null | undefined) => {
return value && value.trim() !== "" ? value : "-";
};
interface FindingDetailProps {
findingDetails: FindingProps;
trigger?: ReactNode;
open?: boolean;
defaultOpen?: boolean;
onOpenChange?: (open: boolean) => void;
}
export const FindingDetail = ({
findingDetails,
trigger,
open,
defaultOpen = false,
onOpenChange,
}: FindingDetailProps) => {
const finding = findingDetails;
const attributes = finding.attributes;
const resource = finding.relationships?.resource?.attributes;
const scan = finding.relationships?.scan?.attributes;
const providerDetails = finding.relationships?.provider?.attributes;
const router = useRouter();
const pathname = usePathname();
const searchParams = useSearchParams();
const [isMuteModalOpen, setIsMuteModalOpen] = useState(false);
const copyFindingUrl = () => {
const params = new URLSearchParams(searchParams.toString());
params.set("id", findingDetails.id);
const url = `${window.location.origin}${pathname}?${params.toString()}`;
navigator.clipboard.writeText(url);
};
// Build Git URL for IaC findings
const gitUrl =
providerDetails?.provider === "iac" && resource
? buildGitFileUrl(
providerDetails.uid,
resource.name,
extractLineRangeFromUid(attributes.uid) || "",
resource.region,
)
: null;
const handleMuteComplete = () => {
setIsMuteModalOpen(false);
onOpenChange?.(false);
router.refresh();
};
const muteModal = !attributes.muted && (
<MuteFindingsModal
isOpen={isMuteModalOpen}
onOpenChange={setIsMuteModalOpen}
findingIds={[findingDetails.id]}
onComplete={handleMuteComplete}
/>
);
const content = (
<div className="flex min-w-0 flex-col gap-4 rounded-lg">
{/* Header */}
<div className="flex flex-col gap-2">
{/* Row 1: Status badges */}
<div className="flex flex-wrap items-center gap-4">
<StatusFindingBadge status={attributes.status as FindingStatus} />
<SeverityBadge severity={attributes.severity || "-"} />
{attributes.delta && (
<div className="flex items-center gap-1 capitalize">
<DeltaIndicator delta={attributes.delta} />
<span className="text-text-neutral-secondary text-xs">
{attributes.delta}
</span>
</div>
)}
<Muted
isMuted={attributes.muted}
mutedReason={attributes.muted_reason || ""}
/>
</div>
{/* Row 2: Title with copy link */}
<h2 className="text-text-neutral-primary line-clamp-2 flex items-center gap-2 text-lg leading-tight font-medium">
{renderValue(attributes.check_metadata.checktitle)}
<Tooltip>
<TooltipTrigger asChild>
<button
onClick={copyFindingUrl}
className="text-bg-data-info inline-flex cursor-pointer transition-opacity hover:opacity-80"
aria-label="Copy finding link to clipboard"
>
<Link size={16} />
</button>
</TooltipTrigger>
<TooltipContent>Copy finding link to clipboard</TooltipContent>
</Tooltip>
</h2>
{/* Row 3: First Seen */}
<div className="text-text-neutral-tertiary text-sm">
<span className="text-text-neutral-secondary mr-1">Time:</span>
<DateWithTime inline dateTime={attributes.updated_at || "-"} />
</div>
</div>
{/* Tabs */}
<Tabs key={findingDetails.id} defaultValue="general" className="w-full">
<div className="mb-4 flex items-center justify-between">
<TabsList>
<TabsTrigger value="general">General</TabsTrigger>
<TabsTrigger value="resources">Resources</TabsTrigger>
<TabsTrigger value="scans">Scans</TabsTrigger>
<TabsTrigger value="events">Events</TabsTrigger>
</TabsList>
{!attributes.muted && (
<Button
variant="outline"
size="sm"
onClick={() => setIsMuteModalOpen(true)}
>
<VolumeX className="size-4" />
Mute
</Button>
)}
</div>
{/* General Tab */}
<TabsContent value="general" className="flex flex-col gap-4">
<p className="text-text-neutral-primary text-sm">
Here is an overview of this finding:
</p>
<div className="flex flex-wrap gap-4">
{providerDetails && (
<EntityInfo
cloudProvider={providerDetails.provider as ProviderType}
entityAlias={providerDetails.alias}
entityId={providerDetails.uid}
showConnectionStatus={providerDetails.connection.connected}
/>
)}
<InfoField label="Service">
{attributes.check_metadata.servicename}
</InfoField>
<InfoField label="Region">{resource?.region ?? "-"}</InfoField>
</div>
<div className="grid grid-cols-1 gap-4 md:grid-cols-2 lg:grid-cols-3">
<InfoField label="Check ID" variant="simple">
<CodeSnippet value={attributes.check_id} className="max-w-full" />
</InfoField>
<InfoField label="Finding ID" variant="simple">
<CodeSnippet value={findingDetails.id} className="max-w-full" />
</InfoField>
<InfoField label="Finding UID" variant="simple">
<CodeSnippet value={attributes.uid} className="max-w-full" />
</InfoField>
<InfoField label="First seen" variant="simple">
<DateWithTime inline dateTime={attributes.first_seen_at || "-"} />
</InfoField>
</div>
{attributes.status === "FAIL" && (
<InfoField label="Risk" variant="simple">
<div
className={cn(
"max-w-full rounded-md border p-2",
"border-border-error-primary bg-bg-fail-secondary",
)}
>
<MarkdownContainer>
{attributes.check_metadata.risk}
</MarkdownContainer>
</div>
</InfoField>
)}
<InfoField label="Description">
<MarkdownContainer>
{attributes.check_metadata.description}
</MarkdownContainer>
</InfoField>
<InfoField label="Status Extended">
{renderValue(attributes.status_extended)}
</InfoField>
{attributes.check_metadata.remediation && (
<div className="flex flex-col gap-4">
<h4 className="text-text-neutral-primary text-sm font-bold">
Remediation Details
</h4>
{/* Recommendation section */}
{attributes.check_metadata.remediation.recommendation.text && (
<InfoField label="Recommendation">
<div className="flex flex-col gap-2">
<MarkdownContainer>
{
attributes.check_metadata.remediation.recommendation
.text
}
</MarkdownContainer>
{attributes.check_metadata.remediation.recommendation
.url && (
<CustomLink
href={
attributes.check_metadata.remediation.recommendation
.url
}
size="sm"
>
Learn more
</CustomLink>
)}
</div>
</InfoField>
)}
{/* CLI Command section */}
{attributes.check_metadata.remediation.code.cli && (
<InfoField label="CLI Command" variant="simple">
<div
className={cn("rounded-md p-2", "bg-bg-neutral-tertiary")}
>
<span className="text-xs whitespace-pre-line">
{attributes.check_metadata.remediation.code.cli}
</span>
</div>
</InfoField>
)}
{/* Remediation Steps section */}
{attributes.check_metadata.remediation.code.other && (
<InfoField label="Remediation Steps">
<MarkdownContainer>
{attributes.check_metadata.remediation.code.other}
</MarkdownContainer>
</InfoField>
)}
{/* Additional URLs section */}
{attributes.check_metadata.additionalurls &&
attributes.check_metadata.additionalurls.length > 0 && (
<InfoField label="References">
<ul className="list-inside list-disc space-y-1">
{attributes.check_metadata.additionalurls.map(
(link, idx) => (
<li key={idx}>
<CustomLink
href={link}
size="sm"
className="break-all whitespace-normal!"
>
{link}
</CustomLink>
</li>
),
)}
</ul>
</InfoField>
)}
</div>
)}
<InfoField label="Categories">
{attributes.check_metadata.categories?.join(", ") || "none"}
</InfoField>
</TabsContent>
{/* Resources Tab */}
<TabsContent value="resources" className="flex flex-col gap-4">
{resource ? (
<>
{providerDetails?.provider === "iac" && gitUrl && (
<div className="flex justify-end">
<Tooltip>
<TooltipTrigger asChild>
<a
href={gitUrl}
target="_blank"
rel="noopener noreferrer"
className="text-bg-data-info inline-flex items-center gap-1 text-sm"
aria-label="Open resource in repository"
>
<ExternalLink size={16} />
View in Repository
</a>
</TooltipTrigger>
<TooltipContent>
Go to Resource in the Repository
</TooltipContent>
</Tooltip>
</div>
)}
<div className="grid grid-cols-1 gap-4 md:grid-cols-2">
<InfoField label="Resource Name">
{renderValue(resource.name)}
</InfoField>
<InfoField label="Resource Type">
{renderValue(resource.type)}
</InfoField>
</div>
<div className="grid grid-cols-1 gap-4 md:grid-cols-2">
<InfoField label="Service">
{renderValue(resource.service)}
</InfoField>
<InfoField label="Region">
{renderValue(resource.region)}
</InfoField>
</div>
<div className="grid grid-cols-1 gap-4 md:grid-cols-2">
<InfoField label="Partition">
{renderValue(resource.partition)}
</InfoField>
<InfoField label="Details">
{renderValue(resource.details)}
</InfoField>
</div>
<InfoField label="Resource ID" variant="simple">
<CodeSnippet value={resource.uid} />
</InfoField>
{resource.tags && Object.entries(resource.tags).length > 0 && (
<div className="flex flex-col gap-4">
<h4 className="text-text-neutral-secondary text-sm font-bold">
Tags
</h4>
<div className="grid grid-cols-1 gap-4 md:grid-cols-2">
{Object.entries(resource.tags).map(([key, value]) => (
<InfoField key={key} label={key}>
{renderValue(value)}
</InfoField>
))}
</div>
</div>
)}
<div className="grid grid-cols-1 gap-4 md:grid-cols-2">
<InfoField label="Created At">
<DateWithTime inline dateTime={resource.inserted_at || "-"} />
</InfoField>
<InfoField label="Last Updated">
<DateWithTime inline dateTime={resource.updated_at || "-"} />
</InfoField>
</div>
</>
) : (
<p className="text-text-neutral-tertiary text-sm">
Resource information is not available.
</p>
)}
</TabsContent>
{/* Scans Tab */}
<TabsContent value="scans" className="flex flex-col gap-4">
{scan ? (
<>
<div className="grid grid-cols-1 gap-4 md:grid-cols-3">
<InfoField label="Scan Name">{scan.name || "N/A"}</InfoField>
<InfoField label="Resources Scanned">
{scan.unique_resource_count}
</InfoField>
<InfoField label="Progress">{scan.progress}%</InfoField>
</div>
<div className="grid grid-cols-1 gap-4 md:grid-cols-3">
<InfoField label="Trigger">{scan.trigger}</InfoField>
<InfoField label="State">{scan.state}</InfoField>
<InfoField label="Duration">
{formatDuration(scan.duration)}
</InfoField>
</div>
<div className="grid grid-cols-1 gap-4 md:grid-cols-2">
<InfoField label="Started At">
<DateWithTime inline dateTime={scan.started_at || "-"} />
</InfoField>
<InfoField label="Completed At">
<DateWithTime inline dateTime={scan.completed_at || "-"} />
</InfoField>
</div>
<div className="grid grid-cols-1 gap-4 md:grid-cols-2">
<InfoField label="Launched At">
<DateWithTime inline dateTime={scan.inserted_at || "-"} />
</InfoField>
{scan.scheduled_at && (
<InfoField label="Scheduled At">
<DateWithTime inline dateTime={scan.scheduled_at} />
</InfoField>
)}
</div>
</>
) : (
<p className="text-text-neutral-tertiary text-sm">
Scan information is not available.
</p>
)}
</TabsContent>
{/* Events Tab */}
<TabsContent value="events" className="flex flex-col gap-4">
<EventsTimeline
resourceId={finding.relationships?.resource?.id}
isAwsProvider={providerDetails?.provider === "aws"}
/>
</TabsContent>
</Tabs>
</div>
);
// If no trigger, render content directly (inline mode)
if (!trigger) {
return (
<>
{muteModal}
{content}
</>
);
}
// With trigger, wrap in Drawer — modal rendered outside to avoid nested overlay issues
return (
<>
{muteModal}
<Drawer
direction="right"
open={open}
defaultOpen={defaultOpen}
onOpenChange={onOpenChange}
>
<DrawerTrigger asChild>{trigger}</DrawerTrigger>
<DrawerContent className="minimal-scrollbar 3xl:w-1/3 h-full w-full overflow-x-hidden overflow-y-auto p-6 md:w-1/2 md:max-w-none">
<DrawerHeader className="sr-only">
<DrawerTitle>Finding Details</DrawerTitle>
<DrawerDescription>View the finding details</DrawerDescription>
</DrawerHeader>
<DrawerClose className="ring-offset-background focus:ring-ring absolute top-4 right-4 rounded-sm opacity-70 transition-opacity hover:opacity-100 focus:ring-2 focus:ring-offset-2 focus:outline-none">
<X className="size-4" />
<span className="sr-only">Close</span>
</DrawerClose>
{content}
</DrawerContent>
</Drawer>
</>
);
};
@@ -0,0 +1,16 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("findings group drill down", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "findings-group-drill-down.tsx");
const source = readFileSync(filePath, "utf8");
it("uses the shared finding-group resource state hook", () => {
expect(source).toContain("useFindingGroupResourceState");
expect(source).not.toContain("useInfiniteResources");
});
});
@@ -3,15 +3,12 @@
import {
flexRender,
getCoreRowModel,
Row,
RowSelectionState,
useReactTable,
} from "@tanstack/react-table";
import { ChevronLeft } from "lucide-react";
import { useSearchParams } from "next/navigation";
import { useState } from "react";
import { Spinner } from "@/components/shadcn/spinner/spinner";
import { LoadingState } from "@/components/shadcn/spinner/loading-state";
import {
Table,
TableBody,
@@ -21,24 +18,20 @@ import {
TableRow,
} from "@/components/ui/table";
import { SeverityBadge, StatusFindingBadge } from "@/components/ui/table";
import { useInfiniteResources } from "@/hooks/use-infinite-resources";
import { useFindingGroupResourceState } from "@/hooks/use-finding-group-resource-state";
import { cn, hasHistoricalFindingFilter } from "@/lib";
import {
getFilteredFindingGroupDelta,
isFindingGroupMuted,
} from "@/lib/findings-groups";
import { FindingGroupRow, FindingResourceRow } from "@/types";
import { FindingGroupRow } from "@/types";
import { FloatingMuteButton } from "../floating-mute-button";
import { getColumnFindingResources } from "./column-finding-resources";
import { canMuteFindingResource } from "./finding-resource-selection";
import { FindingsSelectionContext } from "./findings-selection-context";
import { ImpactedResourcesCell } from "./impacted-resources-cell";
import { DeltaValues, NotificationIndicator } from "./notification-indicator";
import {
ResourceDetailDrawer,
useResourceDetailDrawer,
} from "./resource-detail-drawer";
import { ResourceDetailDrawer } from "./resource-detail-drawer";
interface FindingsGroupDrillDownProps {
group: FindingGroupRow;
@@ -50,9 +43,6 @@ export function FindingsGroupDrillDown({
onCollapse,
}: FindingsGroupDrillDownProps) {
const searchParams = useSearchParams();
const [rowSelection, setRowSelection] = useState<RowSelectionState>({});
const [resources, setResources] = useState<FindingResourceRow[]>([]);
const [isLoading, setIsLoading] = useState(true);
// Keep drill-down endpoint selection aligned with the grouped findings page.
const currentParams = Object.fromEntries(searchParams.entries());
@@ -66,78 +56,27 @@ export function FindingsGroupDrillDown({
}
});
const handleSetResources = (
newResources: FindingResourceRow[],
_hasMore: boolean,
) => {
setResources(newResources);
setIsLoading(false);
};
const handleAppendResources = (
newResources: FindingResourceRow[],
_hasMore: boolean,
) => {
setResources((prev) => [...prev, ...newResources]);
setIsLoading(false);
};
const handleSetLoading = (loading: boolean) => {
setIsLoading(loading);
};
const { sentinelRef, refresh, loadMore, totalCount } = useInfiniteResources({
checkId: group.checkId,
hasDateOrScanFilter: hasHistoricalFilterActive,
filters,
onSetResources: handleSetResources,
onAppendResources: handleAppendResources,
onSetLoading: handleSetLoading,
});
// Resource detail drawer
const drawer = useResourceDetailDrawer({
const {
rowSelection,
resources,
checkId: group.checkId,
totalResourceCount: totalCount ?? group.resourcesTotal,
onRequestMoreResources: loadMore,
isLoading,
sentinelRef,
drawer,
handleDrawerMuteComplete,
selectedFindingIds,
selectableRowCount,
getRowCanSelect,
clearSelection,
isSelected,
handleMuteComplete,
handleRowSelectionChange,
resolveSelectedFindingIds,
} = useFindingGroupResourceState({
group,
filters,
hasHistoricalData: hasHistoricalFilterActive,
});
const handleDrawerMuteComplete = () => {
drawer.refetchCurrent();
refresh();
};
// Selection logic — tracks by findingId (resource_id) for checkbox consistency
const selectedFindingIds = Object.keys(rowSelection)
.filter((key) => rowSelection[key])
.map((idx) => resources[parseInt(idx)]?.findingId)
.filter((id): id is string => id !== null && id !== undefined && id !== "");
/** findingId values are already real finding UUIDs — no resolution needed. */
const resolveResourceIds = async (ids: string[]) => {
return ids.filter(Boolean);
};
const selectableRowCount = resources.filter(canMuteFindingResource).length;
const getRowCanSelect = (row: Row<FindingResourceRow>): boolean => {
return canMuteFindingResource(row.original);
};
const clearSelection = () => {
setRowSelection({});
};
const isSelected = (id: string) => {
return selectedFindingIds.includes(id);
};
const handleMuteComplete = () => {
clearSelection();
refresh();
};
const columns = getColumnFindingResources({
rowSelection,
selectableRowCount,
@@ -148,7 +87,7 @@ export function FindingsGroupDrillDown({
columns,
enableRowSelection: getRowCanSelect,
getCoreRowModel: getCoreRowModel(),
onRowSelectionChange: setRowSelection,
onRowSelectionChange: handleRowSelectionChange,
manualPagination: true,
state: {
rowSelection,
@@ -175,7 +114,7 @@ export function FindingsGroupDrillDown({
selectedFindings: [],
clearSelection,
isSelected,
resolveMuteIds: resolveResourceIds,
resolveMuteIds: resolveSelectedFindingIds,
onMuteComplete: handleMuteComplete,
}}
>
@@ -280,14 +219,7 @@ export function FindingsGroupDrillDown({
</Table>
{/* Loading indicator */}
{isLoading && (
<div className="flex items-center justify-center gap-2 py-8">
<Spinner className="size-6" />
<span className="text-text-neutral-tertiary text-sm">
Loading resources...
</span>
</div>
)}
{isLoading && <LoadingState label="Loading resources..." />}
{/* Sentinel for infinite scroll */}
<div ref={sentinelRef} className="h-1" />
@@ -299,7 +231,7 @@ export function FindingsGroupDrillDown({
selectedCount={selectedFindingIds.length}
selectedFindingIds={selectedFindingIds}
onBeforeOpen={async () => {
return resolveResourceIds(selectedFindingIds);
return resolveSelectedFindingIds(selectedFindingIds);
}}
onComplete={handleMuteComplete}
isBulkOperation
+2 -7
View File
@@ -1,16 +1,11 @@
export * from "./column-finding-groups";
export * from "./column-finding-resources";
export * from "./column-findings";
export * from "./column-standalone-findings";
export * from "./data-table-row-actions";
export * from "./data-table-row-details";
export * from "./finding-detail";
export * from "./finding-detail-drawer";
export * from "./findings-group-drill-down";
export * from "./findings-group-table";
export * from "./findings-selection-context";
// TODO: Remove legacy exports once /resources and overview dashboard migrate to grouped view components
// export * from "./column-findings";
// export * from "./data-table-row-details";
// export * from "./finding-detail";
export * from "./impacted-resources-cell";
export * from "./notification-indicator";
export * from "./provider-icon-cell";
@@ -0,0 +1,16 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("inline resource container", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "inline-resource-container.tsx");
const source = readFileSync(filePath, "utf8");
it("uses the shared finding-group resource state hook", () => {
expect(source).toContain("useFindingGroupResourceState");
expect(source).not.toContain("useInfiniteResources");
});
});
@@ -3,32 +3,26 @@
import {
flexRender,
getCoreRowModel,
Row,
RowSelectionState,
useReactTable,
} from "@tanstack/react-table";
import { AnimatePresence, motion } from "framer-motion";
import { ChevronsDown } from "lucide-react";
import { useImperativeHandle, useRef, useState } from "react";
import { useImperativeHandle, useRef } from "react";
import { Skeleton } from "@/components/shadcn/skeleton/skeleton";
import { Spinner } from "@/components/shadcn/spinner/spinner";
import { LoadingState } from "@/components/shadcn/spinner/loading-state";
import { TableCell, TableRow } from "@/components/ui/table";
import { useInfiniteResources } from "@/hooks/use-infinite-resources";
import { useFindingGroupResourceState } from "@/hooks/use-finding-group-resource-state";
import { useScrollHint } from "@/hooks/use-scroll-hint";
import { FindingGroupRow, FindingResourceRow } from "@/types";
import { FindingGroupRow } from "@/types";
import { getColumnFindingResources } from "./column-finding-resources";
import { canMuteFindingResource } from "./finding-resource-selection";
import { FindingsSelectionContext } from "./findings-selection-context";
import {
getFilteredFindingGroupResourceCount,
getFindingGroupSkeletonCount,
} from "./inline-resource-container.utils";
import {
ResourceDetailDrawer,
useResourceDetailDrawer,
} from "./resource-detail-drawer";
import { ResourceDetailDrawer } from "./resource-detail-drawer";
export interface InlineResourceContainerHandle {
/** Soft-refresh resources (re-fetch page 1 without skeletons). */
@@ -140,22 +134,6 @@ export function InlineResourceContainer({
ref,
}: InlineResourceContainerProps) {
const scrollContainerRef = useRef<HTMLDivElement>(null);
const [rowSelection, setRowSelection] = useState<RowSelectionState>({});
const [resources, setResources] = useState<FindingResourceRow[]>([]);
const [isLoading, setIsLoading] = useState(true);
// Scroll hint: shows "scroll for more" when content overflows
const {
containerRef: scrollHintContainerRef,
sentinelRef: scrollHintSentinelRef,
showScrollHint,
} = useScrollHint({ refreshToken: resources.length });
// Combine scrollContainerRef (for IntersectionObserver root) with scrollHintContainerRef
const combinedScrollRef = (node: HTMLDivElement | null) => {
scrollContainerRef.current = node;
scrollHintContainerRef(node);
};
const filters: Record<string, string> = { ...resolvedFilters };
if (resourceSearch) {
filters["filter[name__icontains]"] = resourceSearch;
@@ -171,99 +149,45 @@ export function InlineResourceContainer({
filters,
);
const handleSetResources = (
newResources: FindingResourceRow[],
_hasMore: boolean,
) => {
setResources(newResources);
setIsLoading(false);
};
const handleAppendResources = (
newResources: FindingResourceRow[],
_hasMore: boolean,
) => {
setResources((prev) => [...prev, ...newResources]);
setIsLoading(false);
};
const handleSetLoading = (loading: boolean) => {
setIsLoading(loading);
};
const { sentinelRef, refresh, loadMore, totalCount } = useInfiniteResources({
checkId: group.checkId,
hasDateOrScanFilter: hasHistoricalData,
const {
rowSelection,
resources,
isLoading,
sentinelRef,
refresh,
drawer,
handleDrawerMuteComplete,
selectedFindingIds,
selectableRowCount,
getRowCanSelect,
clearSelection,
isSelected,
handleMuteComplete,
handleRowSelectionChange,
resolveSelectedFindingIds,
} = useFindingGroupResourceState({
group,
filters,
onSetResources: handleSetResources,
onAppendResources: handleAppendResources,
onSetLoading: handleSetLoading,
hasHistoricalData,
onResourceSelectionChange,
scrollContainerRef,
});
// Resource detail drawer
const drawer = useResourceDetailDrawer({
resources,
checkId: group.checkId,
totalResourceCount: totalCount ?? group.resourcesTotal,
onRequestMoreResources: loadMore,
});
// Scroll hint: shows "scroll for more" when content overflows
const {
containerRef: scrollHintContainerRef,
sentinelRef: scrollHintSentinelRef,
showScrollHint,
} = useScrollHint({ refreshToken: resources.length });
const handleDrawerMuteComplete = () => {
drawer.refetchCurrent();
refresh();
};
// Selection logic
const selectedFindingIds = Object.keys(rowSelection)
.filter((key) => rowSelection[key])
.map((idx) => resources[parseInt(idx)]?.findingId)
.filter(Boolean);
const resolveResourceIds = async (ids: string[]) => {
// findingId values are already real finding UUIDs (from the group
// resources endpoint), so no second resolution round-trip is needed.
return ids.filter(Boolean);
};
const selectableRowCount = resources.filter(canMuteFindingResource).length;
const getRowCanSelect = (row: Row<FindingResourceRow>): boolean => {
return canMuteFindingResource(row.original);
};
const clearSelection = () => {
setRowSelection({});
onResourceSelectionChange([]);
// Combine scrollContainerRef (for IntersectionObserver root) with scrollHintContainerRef
const combinedScrollRef = (node: HTMLDivElement | null) => {
scrollContainerRef.current = node;
scrollHintContainerRef(node);
};
useImperativeHandle(ref, () => ({ refresh, clearSelection }));
const isSelected = (id: string) => {
return selectedFindingIds.includes(id);
};
const handleMuteComplete = () => {
clearSelection();
refresh();
};
const handleRowSelectionChange = (
updater:
| RowSelectionState
| ((prev: RowSelectionState) => RowSelectionState),
) => {
const newSelection =
typeof updater === "function" ? updater(rowSelection) : updater;
setRowSelection(newSelection);
const newFindingIds = Object.keys(newSelection)
.filter((key) => newSelection[key])
.map((idx) => resources[parseInt(idx)]?.findingId)
.filter(Boolean);
onResourceSelectionChange(newFindingIds);
};
const columns = getColumnFindingResources({
rowSelection,
selectableRowCount,
@@ -290,7 +214,7 @@ export function InlineResourceContainer({
selectedFindings: [],
clearSelection,
isSelected,
resolveMuteIds: resolveResourceIds,
resolveMuteIds: resolveSelectedFindingIds,
onMuteComplete: handleMuteComplete,
}}
>
@@ -363,14 +287,9 @@ export function InlineResourceContainer({
</tbody>
</table>
{/* Spinner for infinite scroll (subsequent pages only) */}
{/* Loading state for infinite scroll (subsequent pages only) */}
{isLoading && rows.length > 0 && (
<div className="flex items-center justify-center gap-2 py-8">
<Spinner className="size-6" />
<span className="text-text-neutral-tertiary text-sm">
Loading resources...
</span>
</div>
<LoadingState label="Loading resources..." />
)}
{/* Sentinel for scroll hint detection */}
@@ -178,16 +178,30 @@ vi.mock("@/components/findings/markdown-container", () => ({
}));
vi.mock("@/components/shared/query-code-editor", () => ({
QUERY_EDITOR_LANGUAGE: {
OPEN_CYPHER: "openCypher",
PLAIN_TEXT: "plainText",
SHELL: "shell",
HCL: "hcl",
BICEP: "bicep",
YAML: "yaml",
},
QueryCodeEditor: ({
ariaLabel,
language,
value,
copyValue,
}: {
ariaLabel: string;
language?: string;
value: string;
copyValue?: string;
}) => (
<div data-testid="query-code-editor" data-aria-label={ariaLabel}>
<div
data-testid="query-code-editor"
data-aria-label={ariaLabel}
data-language={language}
>
<span>{ariaLabel}</span>
<span>{value}</span>
<button
@@ -512,6 +526,34 @@ describe("ResourceDetailDrawerContent — Fix 2: Remediation heading labels", ()
expect(mockClipboardWriteText).toHaveBeenCalledWith("aws s3 ...");
expect(screen.getByText("$ aws s3 ...")).toBeInTheDocument();
});
it("should pass syntax highlighting languages to each remediation editor", () => {
// Given
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating={false}
checkMeta={checkMetaWithCommands}
currentIndex={0}
totalResources={1}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// When
const editors = screen.getAllByTestId("query-code-editor");
// Then
expect(editors[0]).toHaveAttribute("data-language", "shell");
expect(editors[1]).toHaveAttribute("data-language", "hcl");
expect(editors[2]).toHaveAttribute("data-language", "yaml");
expect(editors[0]).toHaveAttribute("data-aria-label", "CLI Command");
expect(editors[2]).toHaveAttribute("data-aria-label", "CloudFormation");
});
});
// ---------------------------------------------------------------------------
@@ -37,14 +37,18 @@ import {
ActionDropdownItem,
} from "@/components/shadcn/dropdown";
import { Skeleton } from "@/components/shadcn/skeleton/skeleton";
import { Spinner } from "@/components/shadcn/spinner/spinner";
import { LoadingState } from "@/components/shadcn/spinner/loading-state";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/shadcn/tooltip";
import { EventsTimeline } from "@/components/shared/events-timeline/events-timeline";
import { QueryCodeEditor } from "@/components/shared/query-code-editor";
import {
QUERY_EDITOR_LANGUAGE,
QueryCodeEditor,
type QueryEditorLanguage,
} from "@/components/shared/query-code-editor";
import { CodeSnippet } from "@/components/ui/code-snippet/code-snippet";
import { CustomLink } from "@/components/ui/custom/custom-link";
import { DateWithTime } from "@/components/ui/entities/date-with-time";
@@ -81,19 +85,49 @@ function stripCodeFences(code: string): string {
.trim();
}
function resolveNativeIacConfig(providerType: string | undefined): {
label: string;
language: QueryEditorLanguage;
} {
switch (providerType) {
case "aws":
return {
label: "CloudFormation",
language: QUERY_EDITOR_LANGUAGE.YAML,
};
case "azure":
return {
label: "Bicep",
language: QUERY_EDITOR_LANGUAGE.BICEP,
};
case "kubernetes":
return {
label: "Kubernetes Manifest",
language: QUERY_EDITOR_LANGUAGE.YAML,
};
default:
return {
label: "Native IaC",
language: QUERY_EDITOR_LANGUAGE.PLAIN_TEXT,
};
}
}
function renderRemediationCodeBlock({
label,
value,
copyValue,
language = QUERY_EDITOR_LANGUAGE.PLAIN_TEXT,
}: {
label: string;
value: string;
copyValue?: string;
language?: QueryEditorLanguage;
}) {
return (
<QueryCodeEditor
ariaLabel={label}
language="plainText"
language={language}
value={value}
copyValue={copyValue}
editable={false}
@@ -351,6 +385,7 @@ export function ResourceDetailDrawerContent({
? (f?.scan?.id ?? null)
: null;
const regionFilter = searchParams.get("filter[region__in]");
const nativeIacConfig = resolveNativeIacConfig(f?.providerType);
const handleOpenCompliance = async (framework: string) => {
if (!complianceScanId || resolvingFramework) {
@@ -741,6 +776,7 @@ export function ResourceDetailDrawerContent({
<div className="flex flex-col gap-1">
{renderRemediationCodeBlock({
label: "CLI Command",
language: QUERY_EDITOR_LANGUAGE.SHELL,
value: `$ ${stripCodeFences(checkMeta.remediation.code.cli)}`,
copyValue: stripCodeFences(
checkMeta.remediation.code.cli,
@@ -753,6 +789,7 @@ export function ResourceDetailDrawerContent({
<div className="flex flex-col gap-1">
{renderRemediationCodeBlock({
label: "Terraform",
language: QUERY_EDITOR_LANGUAGE.HCL,
value: stripCodeFences(
checkMeta.remediation.code.terraform,
),
@@ -760,10 +797,11 @@ export function ResourceDetailDrawerContent({
</div>
)}
{checkMeta.remediation.code.nativeiac && (
{checkMeta.remediation.code.nativeiac && f && (
<div className="flex flex-col gap-1">
{renderRemediationCodeBlock({
label: "CloudFormation",
label: nativeIacConfig.label,
language: nativeIacConfig.language,
value: stripCodeFences(
checkMeta.remediation.code.nativeiac,
),
@@ -835,9 +873,7 @@ export function ResourceDetailDrawerContent({
className="minimal-scrollbar flex flex-col gap-2 overflow-y-auto"
>
{!f || isNavigating ? (
<div className="flex items-center justify-center py-8">
<Spinner className="size-5" />
</div>
<LoadingState spinnerClassName="size-5" />
) : (
<>
<div className="flex items-center justify-end">
@@ -191,7 +191,7 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
id: "other-1",
checkId: "check-other-1",
checkTitle: "Other 1",
status: "PASS",
status: "FAIL",
severity: "critical",
}),
makeDrawerFinding({
@@ -221,6 +221,55 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
]);
});
it("should exclude non-FAIL findings from otherFindings", async () => {
const resources = [makeResource()];
getLatestFindingsByResourceUidMock.mockResolvedValue({ data: [] });
adaptFindingsByResourceResponseMock.mockReturnValue([
makeDrawerFinding({
id: "current",
checkId: "s3_check",
status: "MANUAL",
severity: "informational",
}),
makeDrawerFinding({
id: "other-pass",
checkId: "check-pass",
status: "PASS",
severity: "low",
}),
makeDrawerFinding({
id: "other-manual",
checkId: "check-manual",
status: "MANUAL",
severity: "low",
}),
makeDrawerFinding({
id: "other-fail",
checkId: "check-fail",
status: "FAIL",
severity: "high",
}),
]);
const { result } = renderHook(() =>
useResourceDetailDrawer({
resources,
checkId: "s3_check",
}),
);
await act(async () => {
result.current.openDrawer(0);
await Promise.resolve();
});
expect(result.current.currentFinding?.id).toBe("current");
expect(result.current.otherFindings.map((f) => f.id)).toEqual([
"other-fail",
]);
});
it("should keep isNavigating true for a cached resource long enough to render skeletons", async () => {
vi.useFakeTimers();
@@ -46,6 +46,7 @@ interface UseResourceDetailDrawerOptions {
checkId: string;
totalResourceCount?: number;
onRequestMoreResources?: () => void;
initialIndex?: number | null;
}
interface UseResourceDetailDrawerReturn {
@@ -77,10 +78,11 @@ export function useResourceDetailDrawer({
checkId,
totalResourceCount,
onRequestMoreResources,
initialIndex = null,
}: UseResourceDetailDrawerOptions): UseResourceDetailDrawerReturn {
const [isOpen, setIsOpen] = useState(false);
const [isOpen, setIsOpen] = useState(initialIndex !== null);
const [isLoading, setIsLoading] = useState(false);
const [currentIndex, setCurrentIndex] = useState(0);
const [currentIndex, setCurrentIndex] = useState(initialIndex ?? 0);
const [findings, setFindings] = useState<ResourceDrawerFinding[]>([]);
const [isNavigating, setIsNavigating] = useState(false);
@@ -190,6 +192,22 @@ export function useResourceDetailDrawer({
}
};
useEffect(() => {
if (initialIndex === null) {
return;
}
const resource = resources[initialIndex];
if (!resource) {
return;
}
fetchFindings(resource.resourceUid);
// Only initialize once on mount for deep-link/inline entry points.
// User-driven navigations use openDrawer/navigateTo afterwards.
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const openDrawer = (index: number) => {
const resource = resources[index];
if (!resource) return;
@@ -251,10 +269,13 @@ export function useResourceDetailDrawer({
const currentFinding =
findings.find((f) => f.checkId === checkId) ?? findings[0] ?? null;
// All other findings for this resource
const otherFindings = currentFinding
? findings.filter((f) => f.id !== currentFinding.id)
: findings;
// "Other Findings For This Resource" intentionally shows only FAIL entries,
// while currentFinding (the drilled-down one) can be any status (FAIL, MANUAL, PASS…).
const otherFindings = (
currentFinding
? findings.filter((f) => f.id !== currentFinding.id)
: findings
).filter((f) => f.status === "FAIL");
return {
isOpen,

Some files were not shown because too many files have changed in this diff Show More