Compare commits

...

15 Commits

Author SHA1 Message Date
Hugo P.Brito 5b293fd509 refactor(m365): scope p1 license check to P1 features only
Reshape PremiumLicenseInsight to a canonical 5-field schema reused by
the upcoming P2 check (#10784): entitled P1/P2/total, plus pre-aggregated
p1_licenses_utilized (CA + CA-guest) and p2_licenses_utilized
(risk-based-CA + risk-based-CA-guest).

The P1 check now compares entitled_total_license_count (P1+P2) against
p1_licenses_utilized only — risk-based CA usage is the P2 check's domain.
Updates Description, Notes and tests accordingly.
2026-04-28 14:22:40 +01:00
Hugo P.Brito da70c9887d docs(m365): fix broken license utilization URL and adjust title
- Replace 404 concept-license-utilization link with the working
  Microsoft Entra licensing fundamentals page
- Add official Graph API resource doc URL for azureADPremiumLicenseInsight
- Update CheckTitle to reflect that the check counts both P1 and P2
2026-04-28 14:12:30 +01:00
Hugo P.Brito 9531983324 fix(m365): correct premium license insight parsing and semantics
- Parse correct Microsoft Graph keys (entitledP1/P2LicenseCount,
  p1FeatureUtilizations.conditionalAccess[GuestUsers].userCount)
- Sum P1 + P2 entitlements and count regular plus guest CA users
- Return MANUAL when license insight is unavailable instead of FAIL
- Downgrade missingLicense log to warning with explanatory message
- Add parser unit tests that mock the raw Graph response
2026-04-28 11:58:11 +01:00
Hugo P.Brito ec52c7b3fd feat(m365): add entra_conditional_access_policy_p1_license_utilization security check
Add new security check entra_conditional_access_policy_p1_license_utilization for m365 provider.
Includes check implementation, metadata, and unit tests.
2026-04-20 09:28:53 +01:00
Daniel Barranquero f7194b32de docs: remove prowler ctf page (#10782) 2026-04-20 09:37:30 +02:00
Pedro Martín 6ffe4e95bf fix(api): detect silent failures in ResourceFindingMapping (#10724)
Co-authored-by: Pepe Fagoaga <pepe@prowler.com>
2026-04-20 09:00:43 +02:00
Alan Buscaglia 577aa14acc fix(ui): correct IaC findings counters (#10736)
Co-authored-by: alejandrobailo <alejandrobailo94@gmail.com>
2026-04-17 12:48:57 +02:00
Andoni Alonso 19c752c127 fix(cloudflare): guard validate_credentials against paginator infinite loops (#10771) 2026-04-17 11:23:31 +02:00
Alejandro Bailo f2d35f5885 fix(ui): exclude muted findings and polish filter selectors (#10734) 2026-04-17 11:07:22 +02:00
Josema Camacho 536e90f2a5 perf(attack-paths): cleanup task prioritization, restore default batch sizes to 1000, upgrade Cartography to 0.135.0 (#10729) 2026-04-17 10:22:30 +02:00
Daniel Barranquero 276a5d66bd feat(docs): add ctf documentation (#10761) 2026-04-16 19:35:52 +02:00
Alejandro Bailo 489c6c1073 fix: CHANGELOG minor issue (#10758) 2026-04-16 17:07:22 +02:00
Adrián Peña b08b072288 fix(api): exclude muted findings from pass_count, fail_count and manual_count (#10753) 2026-04-16 15:56:08 +02:00
Josema Camacho ca29e354b6 chore(deps): bump msgraph-sdk to 1.55.0 and azure-mgmt-resource to 24.0.0, remove marshmallow (#10733) 2026-04-16 15:34:28 +02:00
Alejandro Bailo 85a3927950 fix(ui): upgrade React 19.2.5 and Next.js 16.2.3 to mitigate CVE-2026-23869 (#10752) 2026-04-16 15:24:10 +02:00
61 changed files with 5659 additions and 3142 deletions
+6
View File
@@ -4,9 +4,15 @@ All notable changes to the **Prowler API** are documented in this file.
## [1.25.1] (Prowler v5.24.1)
### 🔄 Changed
- Attack Paths: Restore `SYNC_BATCH_SIZE` and `FINDINGS_BATCH_SIZE` defaults to 1000, upgrade Cartography to 0.135.0, enable Celery queue priority for cleanup task, rewrite Finding insertion, remove AWS graph cleanup and add timing logs [(#10729)](https://github.com/prowler-cloud/prowler/pull/10729)
### 🐞 Fixed
- Attack Paths: Missing `tenant_id` filter while getting related findings after scan completes [(#10722)](https://github.com/prowler-cloud/prowler/pull/10722)
- Finding group counters `pass_count`, `fail_count` and `manual_count` now exclude muted findings [(#10753)](https://github.com/prowler-cloud/prowler/pull/10753)
- Silent data loss in `ResourceFindingMapping` bulk insert that left findings orphaned when `INSERT ... ON CONFLICT DO NOTHING` dropped rows without raising; added explicit `unique_fields` [(#10724)](https://github.com/prowler-cloud/prowler/pull/10724)
---
+140 -116
View File
@@ -682,21 +682,21 @@ requests = ">=2.21.0,<3.0.0"
[[package]]
name = "alibabacloud-tea-openapi"
version = "0.4.1"
version = "0.4.4"
description = "Alibaba Cloud openapi SDK Library for Python"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "alibabacloud_tea_openapi-0.4.1-py3-none-any.whl", hash = "sha256:e46bfa3ca34086d2c357d217a0b7284ecbd4b3bab5c88e075e73aec637b0e4a0"},
{file = "alibabacloud_tea_openapi-0.4.1.tar.gz", hash = "sha256:2384b090870fdb089c3c40f3fb8cf0145b8c7d6c14abbac521f86a01abb5edaf"},
{file = "alibabacloud_tea_openapi-0.4.4-py3-none-any.whl", hash = "sha256:cea6bc1fe35b0319a8752cb99eb0ecb0dab7ca1a71b99c12970ba0867410995f"},
{file = "alibabacloud_tea_openapi-0.4.4.tar.gz", hash = "sha256:1b0917bc03cd49417da64945e92731716d53e2eb8707b235f54e45b7473221ce"},
]
[package.dependencies]
alibabacloud-credentials = ">=1.0.2,<2.0.0"
alibabacloud-gateway-spi = ">=0.0.2,<1.0.0"
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
cryptography = ">=3.0.0,<45.0.0"
cryptography = {version = ">=3.0.0,<47.0.0", markers = "python_version >= \"3.8\""}
darabonba-core = ">=1.0.3,<2.0.0"
[[package]]
@@ -1526,19 +1526,19 @@ typing-extensions = ">=4.6.0"
[[package]]
name = "azure-mgmt-resource"
version = "23.3.0"
version = "24.0.0"
description = "Microsoft Azure Resource Management Client Library for Python"
optional = false
python-versions = ">=3.8"
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "azure_mgmt_resource-23.3.0-py3-none-any.whl", hash = "sha256:ab216ee28e29db6654b989746e0c85a1181f66653929d2cb6e48fba66d9af323"},
{file = "azure_mgmt_resource-23.3.0.tar.gz", hash = "sha256:fc4f1fd8b6aad23f8af4ed1f913df5f5c92df117449dc354fea6802a2829fea4"},
{file = "azure_mgmt_resource-24.0.0-py3-none-any.whl", hash = "sha256:27b32cd223e2784269f5a0db3c282042886ee4072d79cedc638438ece7cd0df4"},
{file = "azure_mgmt_resource-24.0.0.tar.gz", hash = "sha256:cf6b8995fcdd407ac9ff1dd474087129429a1d90dbb1ac77f97c19b96237b265"},
]
[package.dependencies]
azure-common = ">=1.1"
azure-mgmt-core = ">=1.3.2"
azure-mgmt-core = ">=1.5.0"
isodate = ">=0.6.1"
typing-extensions = ">=4.6.0"
@@ -1822,19 +1822,19 @@ crt = ["awscrt (==0.27.6)"]
[[package]]
name = "cartography"
version = "0.132.0"
version = "0.135.0"
description = "Explore assets and their relationships across your technical infrastructure."
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "cartography-0.132.0-py3-none-any.whl", hash = "sha256:c070aa51d0ab4479cb043cae70b35e7df49f2fb5f1fa95ccf10000bbeb952262"},
{file = "cartography-0.132.0.tar.gz", hash = "sha256:7c6332bc57fd2629d7b83aee7bd95a7b2edb0d51ef746efa0461399e0b66625c"},
{file = "cartography-0.135.0-py3-none-any.whl", hash = "sha256:c62c32a6917b8f23a8b98fe2b6c7c4a918b50f55918482966c4dae1cf5f538e1"},
{file = "cartography-0.135.0.tar.gz", hash = "sha256:3f500cd22c3b392d00e8b49f62acc95fd4dcd559ce514aafe2eb8101133c7a49"},
]
[package.dependencies]
adal = ">=1.2.4"
aioboto3 = ">=13.0.0"
aioboto3 = ">=15.0.0"
azure-cli-core = ">=2.26.0"
azure-identity = ">=1.5.0"
azure-keyvault-certificates = ">=4.0.0"
@@ -1852,9 +1852,9 @@ azure-mgmt-keyvault = ">=10.0.0"
azure-mgmt-logic = ">=10.0.0"
azure-mgmt-monitor = ">=3.0.0"
azure-mgmt-network = ">=25.0.0"
azure-mgmt-resource = ">=10.2.0,<25.0.0"
azure-mgmt-resource = ">=24.0.0,<25"
azure-mgmt-security = ">=5.0.0"
azure-mgmt-sql = ">=3.0.1,<4"
azure-mgmt-sql = ">=3.0.1"
azure-mgmt-storage = ">=16.0.0"
azure-mgmt-synapse = ">=2.0.0"
azure-mgmt-web = ">=7.0.0"
@@ -1862,38 +1862,39 @@ azure-synapse-artifacts = ">=0.17.0"
backoff = ">=2.1.2"
boto3 = ">=1.15.1"
botocore = ">=1.18.1"
cloudflare = ">=4.1.0,<5.0.0"
cloudflare = ">=4.1.0"
crowdstrike-falconpy = ">=0.5.1"
cryptography = "*"
dnspython = ">=1.15.0"
duo-client = "*"
google-api-python-client = ">=1.7.8"
cryptography = ">=45.0.0"
dnspython = ">=2.0.0"
duo-client = ">=5.5.0"
google-api-python-client = ">=2.0.0"
google-auth = ">=2.37.0"
google-cloud-asset = ">=1.0.0"
google-cloud-resource-manager = ">=1.14.2"
httpx = ">=0.24.0"
kubernetes = ">=22.6.0"
marshmallow = ">=3.0.0rc7"
msgraph-sdk = "*"
marshmallow = ">=4.0.0"
msgraph-sdk = ">=1.53.0"
msrestazure = ">=0.6.4"
neo4j = ">=6.0.0"
oci = ">=2.71.0"
okta = "<1.0.0"
packageurl-python = "*"
packaging = "*"
packageurl-python = ">=0.17.0"
packaging = ">=26.0.0"
pagerduty = ">=4.0.1"
policyuniverse = ">=1.1.0.0"
PyJWT = {version = ">=2.0.0", extras = ["crypto"]}
python-dateutil = "*"
python-dateutil = ">=2.9.0"
python-digitalocean = ">=1.16.0"
pyyaml = ">=5.3.1"
requests = ">=2.22.0"
scaleway = ">=2.10.0"
slack-sdk = ">=3.37.0"
statsd = "*"
statsd = ">=4.0.0"
typer = ">=0.9.0"
types-aiobotocore-ecr = "*"
xmltodict = "*"
types-aiobotocore-ecr = ">=3.1.0"
workos = ">=5.44.0"
xmltodict = ">=1.0.0"
[[package]]
name = "celery"
@@ -2503,62 +2504,74 @@ dev = ["bandit", "coverage", "flake8", "pydocstyle", "pylint", "pytest", "pytest
[[package]]
name = "cryptography"
version = "44.0.3"
version = "46.0.6"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = false
python-versions = "!=3.9.0,!=3.9.1,>=3.7"
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
groups = ["main", "dev"]
files = [
{file = "cryptography-44.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:962bc30480a08d133e631e8dfd4783ab71cc9e33d5d7c1e192f0b7c06397bb88"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffc61e8f3bf5b60346d89cd3d37231019c17a081208dfbbd6e1605ba03fa137"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58968d331425a6f9eedcee087f77fd3c927c88f55368f43ff7e0a19891f2642c"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:e28d62e59a4dbd1d22e747f57d4f00c459af22181f0b2f787ea83f5a876d7c76"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af653022a0c25ef2e3ffb2c673a50e5a0d02fecc41608f4954176f1933b12359"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:157f1f3b8d941c2bd8f3ffee0af9b049c9665c39d3da9db2dc338feca5e98a43"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:c6cd67722619e4d55fdb42ead64ed8843d64638e9c07f4011163e46bc512cf01"},
{file = "cryptography-44.0.3-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b424563394c369a804ecbee9b06dfb34997f19d00b3518e39f83a5642618397d"},
{file = "cryptography-44.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c91fc8e8fd78af553f98bc7f2a1d8db977334e4eea302a4bfd75b9461c2d8904"},
{file = "cryptography-44.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:25cd194c39fa5a0aa4169125ee27d1172097857b27109a45fadc59653ec06f44"},
{file = "cryptography-44.0.3-cp37-abi3-win32.whl", hash = "sha256:3be3f649d91cb182c3a6bd336de8b61a0a71965bd13d1a04a0e15b39c3d5809d"},
{file = "cryptography-44.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:3883076d5c4cc56dbef0b898a74eb6992fdac29a7b9013870b34efe4ddb39a0d"},
{file = "cryptography-44.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:5639c2b16764c6f76eedf722dbad9a0914960d3489c0cc38694ddf9464f1bb2f"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3ffef566ac88f75967d7abd852ed5f182da252d23fac11b4766da3957766759"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:192ed30fac1728f7587c6f4613c29c584abdc565d7417c13904708db10206645"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7d5fe7195c27c32a64955740b949070f21cba664604291c298518d2e255931d2"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3f07943aa4d7dad689e3bb1638ddc4944cc5e0921e3c227486daae0e31a05e54"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cb90f60e03d563ca2445099edf605c16ed1d5b15182d21831f58460c48bffb93"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:ab0b005721cc0039e885ac3503825661bd9810b15d4f374e473f8c89b7d5460c"},
{file = "cryptography-44.0.3-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3bb0847e6363c037df8f6ede57d88eaf3410ca2267fb12275370a76f85786a6f"},
{file = "cryptography-44.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b0cc66c74c797e1db750aaa842ad5b8b78e14805a9b5d1348dc603612d3e3ff5"},
{file = "cryptography-44.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6866df152b581f9429020320e5eb9794c8780e90f7ccb021940d7f50ee00ae0b"},
{file = "cryptography-44.0.3-cp39-abi3-win32.whl", hash = "sha256:c138abae3a12a94c75c10499f1cbae81294a6f983b3af066390adee73f433028"},
{file = "cryptography-44.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:5d186f32e52e66994dce4f766884bcb9c68b8da62d61d9d215bfe5fb56d21334"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:cad399780053fb383dc067475135e41c9fe7d901a97dd5d9c5dfb5611afc0d7d"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:21a83f6f35b9cc656d71b5de8d519f566df01e660ac2578805ab245ffd8523f8"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fc3c9babc1e1faefd62704bb46a69f359a9819eb0292e40df3fb6e3574715cd4"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:e909df4053064a97f1e6565153ff8bb389af12c5c8d29c343308760890560aff"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:dad80b45c22e05b259e33ddd458e9e2ba099c86ccf4e88db7bbab4b747b18d06"},
{file = "cryptography-44.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:479d92908277bed6e1a1c69b277734a7771c2b78633c224445b5c60a9f4bc1d9"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:896530bc9107b226f265effa7ef3f21270f18a2026bc09fed1ebd7b66ddf6375"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9b4d4a5dbee05a2c390bf212e78b99434efec37b17a4bff42f50285c5c8c9647"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02f55fb4f8b79c1221b0961488eaae21015b69b210e18c386b69de182ebb1259"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:dd3db61b8fe5be220eee484a17233287d0be6932d056cf5738225b9c05ef4fff"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:978631ec51a6bbc0b7e58f23b68a8ce9e5f09721940933e9c217068388789fe5"},
{file = "cryptography-44.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:5d20cc348cca3a8aa7312f42ab953a56e15323800ca3ab0706b8cd452a3a056c"},
{file = "cryptography-44.0.3.tar.gz", hash = "sha256:fe19d8bc5536a91a24a8133328880a41831b6c5df54599a8417b62fe015d3053"},
{file = "cryptography-46.0.6-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:64235194bad039a10bb6d2d930ab3323baaec67e2ce36215fd0952fad0930ca8"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:26031f1e5ca62fcb9d1fcb34b2b60b390d1aacaa15dc8b895a9ed00968b97b30"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9a693028b9cbe51b5a1136232ee8f2bc242e4e19d456ded3fa7c86e43c713b4a"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:67177e8a9f421aa2d3a170c3e56eca4e0128883cf52a071a7cbf53297f18b175"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:d9528b535a6c4f8ff37847144b8986a9a143585f0540fbcb1a98115b543aa463"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:22259338084d6ae497a19bae5d4c66b7ca1387d3264d1c2c0e72d9e9b6a77b97"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:760997a4b950ff00d418398ad73fbc91aa2894b5c1db7ccb45b4f68b42a63b3c"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3dfa6567f2e9e4c5dceb8ccb5a708158a2a871052fa75c8b78cb0977063f1507"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:cdcd3edcbc5d55757e5f5f3d330dd00007ae463a7e7aa5bf132d1f22a4b62b19"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:d4e4aadb7fc1f88687f47ca20bb7227981b03afaae69287029da08096853b738"},
{file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2b417edbe8877cda9022dde3a008e2deb50be9c407eef034aeeb3a8b11d9db3c"},
{file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:380343e0653b1c9d7e1f55b52aaa2dbb2fdf2730088d48c43ca1c7c0abb7cc2f"},
{file = "cryptography-46.0.6-cp311-abi3-win32.whl", hash = "sha256:bcb87663e1f7b075e48c3be3ecb5f0b46c8fc50b50a97cf264e7f60242dca3f2"},
{file = "cryptography-46.0.6-cp311-abi3-win_amd64.whl", hash = "sha256:6739d56300662c468fddb0e5e291f9b4d084bead381667b9e654c7dd81705124"},
{file = "cryptography-46.0.6-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:2ef9e69886cbb137c2aef9772c2e7138dc581fad4fcbcf13cc181eb5a3ab6275"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7f417f034f91dcec1cb6c5c35b07cdbb2ef262557f701b4ecd803ee8cefed4f4"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d24c13369e856b94892a89ddf70b332e0b70ad4a5c43cf3e9cb71d6d7ffa1f7b"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:aad75154a7ac9039936d50cf431719a2f8d4ed3d3c277ac03f3339ded1a5e707"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:3c21d92ed15e9cfc6eb64c1f5a0326db22ca9c2566ca46d845119b45b4400361"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:4668298aef7cddeaf5c6ecc244c2302a2b8e40f384255505c22875eebb47888b"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:8ce35b77aaf02f3b59c90b2c8a05c73bac12cea5b4e8f3fbece1f5fddea5f0ca"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:c89eb37fae9216985d8734c1afd172ba4927f5a05cfd9bf0e4863c6d5465b013"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:ed418c37d095aeddf5336898a132fba01091f0ac5844e3e8018506f014b6d2c4"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:69cf0056d6947edc6e6760e5f17afe4bea06b56a9ac8a06de9d2bd6b532d4f3a"},
{file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e7304c4f4e9490e11efe56af6713983460ee0780f16c63f219984dab3af9d2d"},
{file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b928a3ca837c77a10e81a814a693f2295200adb3352395fad024559b7be7a736"},
{file = "cryptography-46.0.6-cp314-cp314t-win32.whl", hash = "sha256:97c8115b27e19e592a05c45d0dd89c57f81f841cc9880e353e0d3bf25b2139ed"},
{file = "cryptography-46.0.6-cp314-cp314t-win_amd64.whl", hash = "sha256:c797e2517cb7880f8297e2c0f43bb910e91381339336f75d2c1c2cbf811b70b4"},
{file = "cryptography-46.0.6-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:12cae594e9473bca1a7aceb90536060643128bb274fcea0fc459ab90f7d1ae7a"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:639301950939d844a9e1c4464d7e07f902fe9a7f6b215bb0d4f28584729935d8"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ed3775295fb91f70b4027aeba878d79b3e55c0b3e97eaa4de71f8f23a9f2eb77"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8927ccfbe967c7df312ade694f987e7e9e22b2425976ddbf28271d7e58845290"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:b12c6b1e1651e42ab5de8b1e00dc3b6354fdfd778e7fa60541ddacc27cd21410"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:063b67749f338ca9c5a0b7fe438a52c25f9526b851e24e6c9310e7195aad3b4d"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:02fad249cb0e090b574e30b276a3da6a149e04ee2f049725b1f69e7b8351ec70"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:7e6142674f2a9291463e5e150090b95a8519b2fb6e6aaec8917dd8d094ce750d"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:456b3215172aeefb9284550b162801d62f5f264a081049a3e94307fe20792cfa"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:341359d6c9e68834e204ceaf25936dffeafea3829ab80e9503860dcc4f4dac58"},
{file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9a9c42a2723999a710445bc0d974e345c32adfd8d2fac6d8a251fa829ad31cfb"},
{file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6617f67b1606dfd9fe4dbfa354a9508d4a6d37afe30306fe6c101b7ce3274b72"},
{file = "cryptography-46.0.6-cp38-abi3-win32.whl", hash = "sha256:7f6690b6c55e9c5332c0b59b9c8a3fb232ebf059094c17f9019a51e9827df91c"},
{file = "cryptography-46.0.6-cp38-abi3-win_amd64.whl", hash = "sha256:79e865c642cfc5c0b3eb12af83c35c5aeff4fa5c672dc28c43721c2c9fdd2f0f"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:2ea0f37e9a9cf0df2952893ad145fd9627d326a59daec9b0802480fa3bcd2ead"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a3e84d5ec9ba01f8fd03802b2147ba77f0c8f2617b2aff254cedd551844209c8"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:12f0fa16cc247b13c43d56d7b35287ff1569b5b1f4c5e87e92cc4fcc00cd10c0"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:50575a76e2951fe7dbd1f56d181f8c5ceeeb075e9ff88e7ad997d2f42af06e7b"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:90e5f0a7b3be5f40c3a0a0eafb32c681d8d2c181fc2a1bdabe9b3f611d9f6b1a"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6728c49e3b2c180ef26f8e9f0a883a2c585638db64cf265b49c9ba10652d430e"},
{file = "cryptography-46.0.6.tar.gz", hash = "sha256:27550628a518c5c6c903d84f637fbecf287f6cb9ced3804838a1295dc1fd0759"},
]
[package.dependencies]
cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""}
cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""}
[package.extras]
docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=3.0.0) ; python_version >= \"3.8\""]
docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"]
docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"]
nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_version >= \"3.8\""]
pep8test = ["check-sdist ; python_version >= \"3.8\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"]
nox = ["nox[uv] (>=2024.4.15)"]
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
sdist = ["build (>=1.0.0)"]
ssh = ["bcrypt (>=3.1.5)"]
test = ["certifi (>=2024)", "cryptography-vectors (==44.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.6)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
test-randomorder = ["pytest-randomly"]
[[package]]
@@ -3740,19 +3753,19 @@ urllib3 = ["packaging", "urllib3"]
[[package]]
name = "google-auth-httplib2"
version = "0.2.1"
version = "0.2.0"
description = "Google Authentication Library: httplib2 transport"
optional = false
python-versions = ">=3.7"
python-versions = "*"
groups = ["main"]
files = [
{file = "google_auth_httplib2-0.2.1-py3-none-any.whl", hash = "sha256:1be94c611db91c01f9703e7f62b0a59bbd5587a95571c7b6fade510d648bc08b"},
{file = "google_auth_httplib2-0.2.1.tar.gz", hash = "sha256:5ef03be3927423c87fb69607b42df23a444e434ddb2555b73b3679793187b7de"},
{file = "google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05"},
{file = "google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"},
]
[package.dependencies]
google-auth = ">=1.32.0,<3.0.0"
httplib2 = ">=0.19.0,<1.0.0"
google-auth = "*"
httplib2 = ">=0.19.0"
[[package]]
name = "google-cloud-access-context-manager"
@@ -5181,24 +5194,16 @@ files = [
[[package]]
name = "marshmallow"
version = "3.26.2"
version = "4.3.0"
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
optional = false
python-versions = ">=3.9"
python-versions = ">=3.10"
groups = ["main", "dev"]
files = [
{file = "marshmallow-3.26.2-py3-none-any.whl", hash = "sha256:013fa8a3c4c276c24d26d84ce934dc964e2aa794345a0f8c7e5a7191482c8a73"},
{file = "marshmallow-3.26.2.tar.gz", hash = "sha256:bbe2adb5a03e6e3571b573f42527c6fe926e17467833660bebd11593ab8dfd57"},
{file = "marshmallow-4.3.0-py3-none-any.whl", hash = "sha256:46c4fe6984707e3cbd485dfebbf0a59874f58d695aad05c1668d15e8c6e13b46"},
{file = "marshmallow-4.3.0.tar.gz", hash = "sha256:fb43c53b3fe240b8f6af37223d6ef1636f927ad9bea8ab323afad95dff090880"},
]
[package.dependencies]
packaging = ">=17.0"
[package.extras]
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"]
docs = ["autodocsumm (==0.2.14)", "furo (==2024.8.6)", "sphinx (==8.1.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.0)", "sphinxext-opengraph (==0.9.1)"]
tests = ["pytest", "simplejson"]
[[package]]
name = "matplotlib"
version = "3.10.8"
@@ -5492,14 +5497,14 @@ dev = ["bumpver", "isort", "mypy", "pylint", "pytest", "yapf"]
[[package]]
name = "msgraph-sdk"
version = "1.23.0"
version = "1.55.0"
description = "The Microsoft Graph Python SDK"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "msgraph_sdk-1.23.0-py3-none-any.whl", hash = "sha256:58e0047b4ca59fd82022c02cd73fec0170a3d84f3b76721e3db2a0314df9a58a"},
{file = "msgraph_sdk-1.23.0.tar.gz", hash = "sha256:6dd1ba9a46f5f0ce8599fd9610133adbd9d1493941438b5d3632fce9e55ed607"},
{file = "msgraph_sdk-1.55.0-py3-none-any.whl", hash = "sha256:c8e68ebc4b88af5111de312e7fa910a4e76ddf48a4534feadb1fb8a411c48cfc"},
{file = "msgraph_sdk-1.55.0.tar.gz", hash = "sha256:6df691a31954a050d26b8a678968017e157d940fb377f2a8a4e17a9741b98756"},
]
[package.dependencies]
@@ -5925,23 +5930,24 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
[[package]]
name = "oci"
version = "2.160.3"
version = "2.169.0"
description = "Oracle Cloud Infrastructure Python SDK"
optional = false
python-versions = "*"
groups = ["main"]
files = [
{file = "oci-2.160.3-py3-none-any.whl", hash = "sha256:858bff3e697098bdda44833d2476bfb4632126f0182178e7dbde4dbd156d71f0"},
{file = "oci-2.160.3.tar.gz", hash = "sha256:57514889be3b713a8385d86e3ba8a33cf46e3563c2a7e29a93027fb30b8a2537"},
{file = "oci-2.169.0-py3-none-any.whl", hash = "sha256:c71bb5143f307791082b3e33cc1545c2490a518cfed85ab1948ef5107c36d30b"},
{file = "oci-2.169.0.tar.gz", hash = "sha256:f3c5fff00b01783b5325ea7b13bf140053ec1e9f41da20bfb9c8a349ee7662fa"},
]
[package.dependencies]
certifi = "*"
circuitbreaker = {version = ">=1.3.1,<3.0.0", markers = "python_version >= \"3.7\""}
cryptography = ">=3.2.1,<46.0.0"
pyOpenSSL = ">=17.5.0,<25.0.0"
cryptography = ">=3.2.1,<47.0.0"
pyOpenSSL = ">=17.5.0,<27.0.0"
python-dateutil = ">=2.5.3,<3.0.0"
pytz = ">=2016.10"
urllib3 = {version = ">=2.6.3", markers = "python_version >= \"3.10.0\""}
[package.extras]
adk = ["docstring-parser (>=0.16) ; python_version >= \"3.10\" and python_version < \"4\"", "mcp (>=1.6.0) ; python_version >= \"3.10\" and python_version < \"4\"", "pydantic (>=2.10.6) ; python_version >= \"3.10\" and python_version < \"4\"", "rich (>=13.9.4) ; python_version >= \"3.10\" and python_version < \"4\""]
@@ -6659,7 +6665,7 @@ files = [
[[package]]
name = "prowler"
version = "5.23.0"
version = "5.25.0"
description = "Prowler is an Open Source security tool to perform AWS, GCP and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness. It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks."
optional = false
python-versions = ">=3.10,<3.13"
@@ -6679,7 +6685,7 @@ alibabacloud-rds20140815 = "12.0.0"
alibabacloud_sas20181203 = "6.1.0"
alibabacloud-sls20201230 = "5.9.0"
alibabacloud_sts20150401 = "1.1.6"
alibabacloud_tea_openapi = "0.4.1"
alibabacloud_tea_openapi = "0.4.4"
alibabacloud_vpc20160428 = "6.13.0"
alive-progress = "3.3.0"
awsipranges = "0.3.3"
@@ -6701,7 +6707,7 @@ azure-mgmt-postgresqlflexibleservers = "1.1.0"
azure-mgmt-rdbms = "10.1.0"
azure-mgmt-recoveryservices = "3.1.0"
azure-mgmt-recoveryservicesbackup = "9.2.0"
azure-mgmt-resource = "23.3.0"
azure-mgmt-resource = "24.0.0"
azure-mgmt-search = "9.1.0"
azure-mgmt-security = "7.0.0"
azure-mgmt-sql = "3.0.1"
@@ -6714,29 +6720,29 @@ boto3 = "1.40.61"
botocore = "1.40.61"
cloudflare = "4.3.1"
colorama = "0.4.6"
cryptography = "44.0.3"
cryptography = "46.0.6"
dash = "3.1.1"
dash-bootstrap-components = "2.0.3"
defusedxml = ">=0.7.1"
defusedxml = "0.7.1"
detect-secrets = "1.5.0"
dulwich = "0.23.0"
google-api-python-client = "2.163.0"
google-auth-httplib2 = ">=0.1,<0.3"
google-auth-httplib2 = "0.2.0"
h2 = "4.3.0"
jsonschema = "4.23.0"
kubernetes = "32.0.1"
markdown = "3.10.2"
microsoft-kiota-abstractions = "1.9.2"
msgraph-sdk = "1.23.0"
msgraph-sdk = "1.55.0"
numpy = "2.0.2"
oci = "2.160.3"
oci = "2.169.0"
openstacksdk = "4.2.0"
pandas = "2.2.3"
py-iam-expand = "0.1.0"
py-ocsf-models = "0.8.1"
pydantic = ">=2.0,<3.0"
pydantic = "2.12.5"
pygithub = "2.8.0"
python-dateutil = ">=2.9.0.post0,<3.0.0"
python-dateutil = "2.9.0.post0"
pytz = "2025.1"
schema = "0.7.5"
shodan = "1.31.0"
@@ -6749,7 +6755,7 @@ uuid6 = "2024.7.10"
type = "git"
url = "https://github.com/prowler-cloud/prowler.git"
reference = "master"
resolved_reference = "6ac90eb1b58590b6f2f51645dbef17b9231053f4"
resolved_reference = "ca29e354b622198ff6a70e2ea5eb04e4a44a0903"
[[package]]
name = "psutil"
@@ -6958,11 +6964,11 @@ description = "C parser in Python"
optional = false
python-versions = ">=3.10"
groups = ["main", "dev"]
markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\""
files = [
{file = "pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992"},
{file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"},
]
markers = {main = "implementation_name != \"PyPy\" and platform_python_implementation != \"PyPy\"", dev = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\""}
[[package]]
name = "pydantic"
@@ -7288,18 +7294,19 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=7.4.0)", "pytest-cov (>=2.10.1)", "
[[package]]
name = "pyopenssl"
version = "24.3.0"
version = "26.0.0"
description = "Python wrapper module around the OpenSSL library"
optional = false
python-versions = ">=3.7"
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "pyOpenSSL-24.3.0-py3-none-any.whl", hash = "sha256:e474f5a473cd7f92221cc04976e48f4d11502804657a08a989fb3be5514c904a"},
{file = "pyopenssl-24.3.0.tar.gz", hash = "sha256:49f7a019577d834746bc55c5fce6ecbcec0f2b4ec5ce1cf43a9a173b8138bb36"},
{file = "pyopenssl-26.0.0-py3-none-any.whl", hash = "sha256:df94d28498848b98cc1c0ffb8ef1e71e40210d3b0a8064c9d29571ed2904bf81"},
{file = "pyopenssl-26.0.0.tar.gz", hash = "sha256:f293934e52936f2e3413b89c6ce36df66a0b34ae1ea3a053b8c5020ff2f513fc"},
]
[package.dependencies]
cryptography = ">=41.0.5,<45"
cryptography = ">=46.0.0,<47"
typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""}
[package.extras]
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx_rtd_theme"]
@@ -8807,6 +8814,23 @@ markupsafe = ">=2.1.1"
[package.extras]
watchdog = ["watchdog (>=2.3)"]
[[package]]
name = "workos"
version = "6.0.4"
description = "WorkOS Python Client"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "workos-6.0.4-py3-none-any.whl", hash = "sha256:548668b3702673536f853ba72a7b5bbbc269e467aaf9ac4f477b6e0177df5e21"},
{file = "workos-6.0.4.tar.gz", hash = "sha256:b0bfe8fd212b8567422c4ea3732eb33608794033eb3a69900c6b04db183c32d6"},
]
[package.dependencies]
cryptography = ">=46.0,<47.0"
httpx = ">=0.28,<1.0"
pyjwt = ">=2.12,<3.0"
[[package]]
name = "wrapt"
version = "1.17.3"
@@ -9400,4 +9424,4 @@ files = [
[metadata]
lock-version = "2.1"
python-versions = ">=3.11,<3.13"
content-hash = "077e89853cfe3a6d934841488cfa5a98ff6c92b71f74b817b71387d11559f143"
content-hash = "a3ab982d11a87d951ff15694d2ca7fd51f1f51a451abb0baa067ccf6966367a8"
+1 -2
View File
@@ -38,7 +38,7 @@ dependencies = [
"matplotlib (==3.10.8)",
"reportlab (==4.4.10)",
"neo4j (==6.1.0)",
"cartography (==0.132.0)",
"cartography (==0.135.0)",
"gevent (==25.9.1)",
"werkzeug (==3.1.7)",
"sqlparse (==0.5.5)",
@@ -62,7 +62,6 @@ django-silk = "5.3.2"
docker = "7.1.0"
filelock = "3.20.3"
freezegun = "1.5.1"
marshmallow = "==3.26.2"
mypy = "1.10.1"
pylint = "3.2.5"
pytest = "9.0.3"
@@ -0,0 +1,23 @@
from django.db import migrations
TASK_NAME = "attack-paths-cleanup-stale-scans"
def set_cleanup_priority(apps, schema_editor):
PeriodicTask = apps.get_model("django_celery_beat", "PeriodicTask")
PeriodicTask.objects.filter(name=TASK_NAME).update(priority=0)
def unset_cleanup_priority(apps, schema_editor):
PeriodicTask = apps.get_model("django_celery_beat", "PeriodicTask")
PeriodicTask.objects.filter(name=TASK_NAME).update(priority=None)
class Migration(migrations.Migration):
dependencies = [
("api", "0089_backfill_finding_group_status_muted"),
]
operations = [
migrations.RunPython(set_cleanup_priority, unset_cleanup_priority),
]
+1 -1
View File
@@ -15466,7 +15466,7 @@ class TestFindingGroupViewSet:
attrs = data[0]["attributes"]
assert attrs["status"] == "FAIL"
assert attrs["muted"] is True
assert attrs["fail_count"] == 2
assert attrs["fail_count"] == 0
assert attrs["fail_muted_count"] == 2
assert attrs["pass_muted_count"] == 0
assert attrs["manual_muted_count"] == 0
+23 -21
View File
@@ -7127,17 +7127,16 @@ class FindingGroupViewSet(BaseRLSViewSet):
output_field=IntegerField(),
)
# `pass_count`, `fail_count` and `manual_count` count *every* finding
# for the check (muted or not) so the aggregated `status` reflects the
# underlying check outcome regardless of mute state. Whether the group
# is actionable is signalled by the orthogonal `muted` flag below.
# `pass_count`, `fail_count` and `manual_count` only count non-muted
# findings. Muted findings are tracked separately via the
# `*_muted_count` fields.
return (
queryset.values("check_id")
.annotate(
severity_order=Max(severity_case),
pass_count=Count("id", filter=Q(status="PASS")),
fail_count=Count("id", filter=Q(status="FAIL")),
manual_count=Count("id", filter=Q(status="MANUAL")),
pass_count=Count("id", filter=Q(status="PASS", muted=False)),
fail_count=Count("id", filter=Q(status="FAIL", muted=False)),
manual_count=Count("id", filter=Q(status="MANUAL", muted=False)),
pass_muted_count=Count("id", filter=Q(status="PASS", muted=True)),
fail_muted_count=Count("id", filter=Q(status="FAIL", muted=True)),
manual_muted_count=Count("id", filter=Q(status="MANUAL", muted=True)),
@@ -7282,12 +7281,14 @@ class FindingGroupViewSet(BaseRLSViewSet):
# finding-level aggregation path.
row.pop("nonmuted_count", None)
# Compute aggregated status. Counts are inclusive of muted findings,
# so the underlying check outcome surfaces even when the group is
# fully muted.
if row.get("fail_count", 0) > 0:
# Compute aggregated status from non-muted counts first, then
# fall back to muted counts so fully-muted groups still reflect
# the underlying check outcome.
total_fail = row.get("fail_count", 0) + row.get("fail_muted_count", 0)
total_pass = row.get("pass_count", 0) + row.get("pass_muted_count", 0)
if total_fail > 0:
row["status"] = "FAIL"
elif row.get("pass_count", 0) > 0:
elif total_pass > 0:
row["status"] = "PASS"
else:
row["status"] = "MANUAL"
@@ -7387,9 +7388,12 @@ class FindingGroupViewSet(BaseRLSViewSet):
if computed_params.get("status") or computed_params.getlist("status__in"):
queryset = queryset.annotate(
total_fail=F("fail_count") + F("fail_muted_count"),
total_pass=F("pass_count") + F("pass_muted_count"),
).annotate(
aggregated_status=Case(
When(fail_count__gt=0, then=Value("FAIL")),
When(pass_count__gt=0, then=Value("PASS")),
When(total_fail__gt=0, then=Value("FAIL")),
When(total_pass__gt=0, then=Value("PASS")),
default=Value("MANUAL"),
output_field=CharField(),
)
@@ -7773,16 +7777,14 @@ class FindingGroupViewSet(BaseRLSViewSet):
sort_param, self._FINDING_GROUP_SORT_MAP
)
if ordering:
# status_order is annotated on demand so groups can be sorted by
# their aggregated status (FAIL > PASS > MANUAL), mirroring the
# priority used in _post_process_aggregation. Counts are
# inclusive of muted findings, so the underlying check outcome
# surfaces even for fully muted groups.
if any(field.lstrip("-") == "status_order" for field in ordering):
aggregated_queryset = aggregated_queryset.annotate(
total_fail_for_sort=F("fail_count") + F("fail_muted_count"),
total_pass_for_sort=F("pass_count") + F("pass_muted_count"),
).annotate(
status_order=Case(
When(fail_count__gt=0, then=Value(3)),
When(pass_count__gt=0, then=Value(2)),
When(total_fail_for_sort__gt=0, then=Value(3)),
When(total_pass_for_sort__gt=0, then=Value(2)),
default=Value(1),
output_field=IntegerField(),
)
+3 -1
View File
@@ -17,8 +17,10 @@ celery_app.config_from_object("django.conf:settings", namespace="CELERY")
celery_app.conf.update(result_extended=True, result_expires=None)
celery_app.conf.broker_transport_options = {
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT,
"queue_order_strategy": "priority",
}
celery_app.conf.task_default_priority = 6
celery_app.conf.result_backend_transport_options = {
"visibility_timeout": BROKER_VISIBILITY_TIMEOUT
}
+46 -10
View File
@@ -1,6 +1,8 @@
# Portions of this file are based on code from the Cartography project
# (https://github.com/cartography-cncf/cartography), which is licensed under the Apache 2.0 License.
import time
from typing import Any
import aioboto3
@@ -33,7 +35,7 @@ def start_aws_ingestion(
For the scan progress updates:
- The caller of this function (`tasks.jobs.attack_paths.scan.run`) has set it to 2.
- When the control returns to the caller, it will be set to 95.
- When the control returns to the caller, it will be set to 93.
"""
# Initialize variables common to all jobs
@@ -89,34 +91,50 @@ def start_aws_ingestion(
logger.info(
f"Syncing function permission_relationships for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.RESOURCE_FUNCTIONS["permission_relationships"](**sync_args)
logger.info(
f"Synced function permission_relationships for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 88)
if "resourcegroupstaggingapi" in requested_syncs:
logger.info(
f"Syncing function resourcegroupstaggingapi for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.RESOURCE_FUNCTIONS["resourcegroupstaggingapi"](**sync_args)
logger.info(
f"Synced function resourcegroupstaggingapi for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 89)
logger.info(
f"Syncing ec2_iaminstanceprofile scoped analysis for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.run_scoped_analysis_job(
"aws_ec2_iaminstanceprofile.json",
neo4j_session,
common_job_parameters,
)
logger.info(
f"Synced ec2_iaminstanceprofile scoped analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 90)
logger.info(
f"Syncing lambda_ecr analysis for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.run_analysis_job(
"aws_lambda_ecr.json",
neo4j_session,
common_job_parameters,
)
logger.info(
f"Synced lambda_ecr analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
if all(
s in requested_syncs
@@ -125,25 +143,34 @@ def start_aws_ingestion(
logger.info(
f"Syncing lb_container_exposure scoped analysis for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.run_scoped_analysis_job(
"aws_lb_container_exposure.json",
neo4j_session,
common_job_parameters,
)
logger.info(
f"Synced lb_container_exposure scoped analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
if all(s in requested_syncs for s in ["ec2:network_acls", "ec2:load_balancer_v2"]):
logger.info(
f"Syncing lb_nacl_direct scoped analysis for AWS account {prowler_api_provider.uid}"
)
t0 = time.perf_counter()
cartography_aws.run_scoped_analysis_job(
"aws_lb_nacl_direct.json",
neo4j_session,
common_job_parameters,
)
logger.info(
f"Synced lb_nacl_direct scoped analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 91)
logger.info(f"Syncing metadata for AWS account {prowler_api_provider.uid}")
t0 = time.perf_counter()
cartography_aws.merge_module_sync_metadata(
neo4j_session,
group_type="AWSAccount",
@@ -152,24 +179,23 @@ def start_aws_ingestion(
update_tag=cartography_config.update_tag,
stat_handler=cartography_aws.stat_handler,
)
logger.info(
f"Synced metadata for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 92)
# Removing the added extra field
del common_job_parameters["AWS_ID"]
logger.info(f"Syncing cleanup_job for AWS account {prowler_api_provider.uid}")
cartography_aws.run_cleanup_job(
"aws_post_ingestion_principals_cleanup.json",
neo4j_session,
common_job_parameters,
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 93)
logger.info(f"Syncing analysis for AWS account {prowler_api_provider.uid}")
t0 = time.perf_counter()
cartography_aws._perform_aws_analysis(
requested_syncs, neo4j_session, common_job_parameters
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 94)
logger.info(
f"Synced analysis for AWS account {prowler_api_provider.uid} in {time.perf_counter() - t0:.3f}s"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 93)
return failed_syncs
@@ -234,6 +260,8 @@ def sync_aws_account(
)
try:
func_t0 = time.perf_counter()
# `ecr:image_layers` uses `aioboto3_session` instead of `boto3_session`
if func_name == "ecr:image_layers":
cartography_aws.RESOURCE_FUNCTIONS[func_name](
@@ -257,7 +285,15 @@ def sync_aws_account(
else:
cartography_aws.RESOURCE_FUNCTIONS[func_name](**sync_args)
logger.info(
f"Synced function {func_name} for AWS account {prowler_api_provider.uid} in {time.perf_counter() - func_t0:.3f}s"
)
except Exception as e:
logger.info(
f"Synced function {func_name} for AWS account {prowler_api_provider.uid} in {time.perf_counter() - func_t0:.3f}s (FAILED)"
)
exception_message = utils.stringify_exception(
e, f"Exception for AWS sync function: {func_name}"
)
@@ -8,9 +8,9 @@ from tasks.jobs.attack_paths import aws
# Batch size for Neo4j write operations (resource labeling, cleanup)
BATCH_SIZE = env.int("ATTACK_PATHS_BATCH_SIZE", 1000)
# Batch size for Postgres findings fetch (keyset pagination page size)
FINDINGS_BATCH_SIZE = env.int("ATTACK_PATHS_FINDINGS_BATCH_SIZE", 500)
FINDINGS_BATCH_SIZE = env.int("ATTACK_PATHS_FINDINGS_BATCH_SIZE", 1000)
# Batch size for temp-to-tenant graph sync (nodes and relationships per cursor page)
SYNC_BATCH_SIZE = env.int("ATTACK_PATHS_SYNC_BATCH_SIZE", 250)
SYNC_BATCH_SIZE = env.int("ATTACK_PATHS_SYNC_BATCH_SIZE", 1000)
# Neo4j internal labels (Prowler-specific, not provider-specific)
# - `Internet`: Singleton node representing external internet access for exposed-resource queries
@@ -12,6 +12,7 @@ from typing import Any, Generator
from uuid import UUID
import neo4j
from cartography.config import Config as CartographyConfig
from celery.utils.log import get_task_logger
from tasks.jobs.attack_paths.config import (
@@ -86,17 +87,21 @@ def analysis(
prowler_api_provider: Provider,
scan_id: str,
config: CartographyConfig,
) -> None:
) -> tuple[int, int]:
"""
Main entry point for Prowler findings analysis.
Adds resource labels and loads findings.
Returns (labeled_nodes, findings_loaded).
"""
add_resource_label(
total_labeled = add_resource_label(
neo4j_session, prowler_api_provider.provider, str(prowler_api_provider.uid)
)
findings_data = stream_findings_with_resources(prowler_api_provider, scan_id)
load_findings(neo4j_session, findings_data, prowler_api_provider, config)
total_loaded = load_findings(
neo4j_session, findings_data, prowler_api_provider, config
)
return total_labeled, total_loaded
def add_resource_label(
@@ -146,12 +151,11 @@ def load_findings(
findings_batches: Generator[list[dict[str, Any]], None, None],
prowler_api_provider: Provider,
config: CartographyConfig,
) -> None:
) -> int:
"""Load Prowler findings into the graph, linking them to resources."""
query = render_cypher_template(
INSERT_FINDING_TEMPLATE,
{
"__ROOT_NODE_LABEL__": get_root_node_label(prowler_api_provider.provider),
"__NODE_UID_FIELD__": get_node_uid_field(prowler_api_provider.provider),
"__RESOURCE_LABEL__": get_provider_resource_label(
prowler_api_provider.provider
@@ -160,7 +164,6 @@ def load_findings(
)
parameters = {
"provider_uid": str(prowler_api_provider.uid),
"last_updated": config.update_tag,
"prowler_version": ProwlerConfig.prowler_version,
}
@@ -178,6 +181,7 @@ def load_findings(
neo4j_session.run(query, parameters)
logger.info(f"Finished loading {total_records} records in {batch_num} batches")
return total_records
# Findings Streaming (Generator-based)
@@ -32,17 +32,14 @@ ADD_RESOURCE_LABEL_TEMPLATE = """
"""
INSERT_FINDING_TEMPLATE = f"""
MATCH (account:__ROOT_NODE_LABEL__ {{id: $provider_uid}})
UNWIND $findings_data AS finding_data
OPTIONAL MATCH (account)-->(resource_by_uid:__RESOURCE_LABEL__)
WHERE resource_by_uid.__NODE_UID_FIELD__ = finding_data.resource_uid
WITH account, finding_data, resource_by_uid
OPTIONAL MATCH (resource_by_uid:__RESOURCE_LABEL__ {{__NODE_UID_FIELD__: finding_data.resource_uid}})
WITH finding_data, resource_by_uid
OPTIONAL MATCH (account)-->(resource_by_id:__RESOURCE_LABEL__)
OPTIONAL MATCH (resource_by_id:__RESOURCE_LABEL__ {{id: finding_data.resource_uid}})
WHERE resource_by_uid IS NULL
AND resource_by_id.id = finding_data.resource_uid
WITH account, finding_data, COALESCE(resource_by_uid, resource_by_id) AS resource
WITH finding_data, COALESCE(resource_by_uid, resource_by_id) AS resource
WHERE resource IS NOT NULL
MERGE (finding:{PROWLER_FINDING_LABEL} {{id: finding_data.id}})
+38 -10
View File
@@ -55,6 +55,7 @@ exception propagates to Celery.
import logging
import time
from typing import Any
from cartography.config import Config as CartographyConfig
@@ -144,6 +145,12 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
attack_paths_scan, task_id, tenant_cartography_config
)
scan_t0 = time.perf_counter()
logger.info(
f"Starting Attack Paths scan ({attack_paths_scan.id}) for "
f"{prowler_api_provider.provider.upper()} provider {prowler_api_provider.id}"
)
subgraph_dropped = False
sync_completed = False
provider_gated = False
@@ -169,6 +176,7 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 2)
# The real scan, where iterates over cloud services
t0 = time.perf_counter()
ingestion_exceptions = utils.call_within_event_loop(
cartography_ingestion_function,
tmp_neo4j_session,
@@ -177,19 +185,23 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
prowler_sdk_provider,
attack_paths_scan,
)
logger.info(
f"Cartography ingestion completed in {time.perf_counter() - t0:.3f}s "
f"(failed_syncs={len(ingestion_exceptions)})"
)
# Post-processing: Just keeping it to be more Cartography compliant
logger.info(
f"Syncing Cartography ontology for AWS account {prowler_api_provider.uid}"
)
cartography_ontology.run(tmp_neo4j_session, tmp_cartography_config)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 95)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 94)
logger.info(
f"Syncing Cartography analysis for AWS account {prowler_api_provider.uid}"
)
cartography_analysis.run(tmp_neo4j_session, tmp_cartography_config)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 96)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 95)
# Creating Internet node and CAN_ACCESS relationships
logger.info(
@@ -198,14 +210,20 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
internet.analysis(
tmp_neo4j_session, prowler_api_provider, tmp_cartography_config
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 96)
# Adding Prowler Finding nodes and relationships
logger.info(
f"Syncing Prowler analysis for AWS account {prowler_api_provider.uid}"
)
findings.analysis(
t0 = time.perf_counter()
labeled_nodes, findings_loaded = findings.analysis(
tmp_neo4j_session, prowler_api_provider, scan_id, tmp_cartography_config
)
logger.info(
f"Prowler analysis completed in {time.perf_counter() - t0:.3f}s "
f"(findings={findings_loaded}, labeled_nodes={labeled_nodes})"
)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 97)
logger.info(
@@ -227,22 +245,33 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
logger.info(f"Deleting existing provider graph in {tenant_database_name}")
db_utils.set_provider_graph_data_ready(attack_paths_scan, False)
provider_gated = True
graph_database.drop_subgraph(
t0 = time.perf_counter()
deleted_nodes = graph_database.drop_subgraph(
database=tenant_database_name,
provider_id=str(prowler_api_provider.id),
)
logger.info(
f"Deleted existing provider graph in {time.perf_counter() - t0:.3f}s "
f"(deleted_nodes={deleted_nodes})"
)
subgraph_dropped = True
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 98)
logger.info(
f"Syncing graph from {tmp_database_name} into {tenant_database_name}"
)
sync.sync_graph(
t0 = time.perf_counter()
sync_result = sync.sync_graph(
source_database=tmp_database_name,
target_database=tenant_database_name,
tenant_id=str(prowler_api_provider.tenant_id),
provider_id=str(prowler_api_provider.id),
)
logger.info(
f"Synced graph in {time.perf_counter() - t0:.3f}s "
f"(nodes={sync_result['nodes']}, relationships={sync_result['relationships']})"
)
sync_completed = True
db_utils.set_graph_data_ready(attack_paths_scan, True)
db_utils.update_attack_paths_scan_progress(attack_paths_scan, 99)
@@ -250,17 +279,16 @@ def run(tenant_id: str, scan_id: str, task_id: str) -> dict[str, Any]:
logger.info(f"Clearing Neo4j cache for database {tenant_database_name}")
graph_database.clear_cache(tenant_database_name)
logger.info(
f"Completed Cartography ({attack_paths_scan.id}) for "
f"{prowler_api_provider.provider.upper()} provider {prowler_api_provider.id}"
)
logger.info(f"Dropping temporary Neo4j database {tmp_database_name}")
graph_database.drop_database(tmp_database_name)
db_utils.finish_attack_paths_scan(
attack_paths_scan, StateChoices.COMPLETED, ingestion_exceptions
)
logger.info(
f"Attack Paths scan completed in {time.perf_counter() - scan_t0:.3f}s "
f"(state=completed, failed_syncs={len(ingestion_exceptions)})"
)
return ingestion_exceptions
except Exception as e:
@@ -5,6 +5,8 @@ This module handles syncing graph data from temporary scan databases
to the tenant database, adding provider isolation labels and properties.
"""
import time
from collections import defaultdict
from typing import Any
@@ -81,6 +83,7 @@ def sync_nodes(
Source and target sessions are opened sequentially per batch to avoid
holding two Bolt connections simultaneously for the entire sync duration.
"""
t0 = time.perf_counter()
last_id = -1
total_synced = 0
@@ -117,7 +120,7 @@ def sync_nodes(
total_synced += batch_count
logger.info(
f"Synced {total_synced} nodes from {source_database} to {target_database}"
f"Synced {total_synced} nodes from {source_database} to {target_database} in {time.perf_counter() - t0:.3f}s"
)
return total_synced
@@ -136,6 +139,7 @@ def sync_relationships(
Source and target sessions are opened sequentially per batch to avoid
holding two Bolt connections simultaneously for the entire sync duration.
"""
t0 = time.perf_counter()
last_id = -1
total_synced = 0
@@ -166,7 +170,7 @@ def sync_relationships(
total_synced += batch_count
logger.info(
f"Synced {total_synced} relationships from {source_database} to {target_database}"
f"Synced {total_synced} relationships from {source_database} to {target_database} in {time.perf_counter() - t0:.3f}s"
)
return total_synced
+15 -9
View File
@@ -752,11 +752,19 @@ def _process_finding_micro_batch(
)
if mappings_to_create:
ResourceFindingMapping.objects.bulk_create(
created_mappings = ResourceFindingMapping.objects.bulk_create(
mappings_to_create,
batch_size=SCAN_DB_BATCH_SIZE,
ignore_conflicts=True,
unique_fields=["tenant_id", "resource_id", "finding_id"],
)
inserted = sum(1 for m in created_mappings if m.pk)
if inserted != len(mappings_to_create):
logger.error(
f"scan {scan_instance.id}: expected "
f"{len(mappings_to_create)} ResourceFindingMapping rows, "
f"inserted {inserted}. Rolling back micro-batch."
)
# Update finding denormalized arrays
findings_to_update = []
@@ -1804,11 +1812,9 @@ def aggregate_finding_group_summaries(tenant_id: str, scan_id: str):
)
# Aggregate findings by check_id for this scan.
# `pass_count`, `fail_count` and `manual_count` count *every* finding
# in this group, regardless of mute state, so the aggregated `status`
# always reflects the underlying check outcome (FAIL > PASS > MANUAL)
# even when the group is fully muted. The orthogonal `muted` flag is
# what tells whether the group has any actionable (non-muted) findings.
# `pass_count`, `fail_count` and `manual_count` only count non-muted
# findings. Muted findings are tracked separately via the
# `*_muted_count` fields.
aggregated = (
Finding.objects.filter(
tenant_id=tenant_id,
@@ -1817,9 +1823,9 @@ def aggregate_finding_group_summaries(tenant_id: str, scan_id: str):
.values("check_id")
.annotate(
severity_order=Max(severity_case),
pass_count=Count("id", filter=Q(status="PASS")),
fail_count=Count("id", filter=Q(status="FAIL")),
manual_count=Count("id", filter=Q(status="MANUAL")),
pass_count=Count("id", filter=Q(status="PASS", muted=False)),
fail_count=Count("id", filter=Q(status="FAIL", muted=False)),
manual_count=Count("id", filter=Q(status="MANUAL", muted=False)),
pass_muted_count=Count("id", filter=Q(status="PASS", muted=True)),
fail_muted_count=Count("id", filter=Q(status="FAIL", muted=True)),
manual_muted_count=Count("id", filter=Q(status="MANUAL", muted=True)),
@@ -38,11 +38,14 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.finish_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
@patch(
"tasks.jobs.attack_paths.scan.sync.sync_graph",
return_value={"nodes": 0, "relationships": 0},
)
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph", return_value=0)
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -188,7 +191,7 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -287,7 +290,7 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -390,7 +393,7 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -489,14 +492,17 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
@patch(
"tasks.jobs.attack_paths.scan.sync.sync_graph",
return_value={"nodes": 0, "relationships": 0},
)
@patch(
"tasks.jobs.attack_paths.scan.graph_database.drop_subgraph",
side_effect=RuntimeError("drop failed"),
)
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -609,7 +615,7 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -718,11 +724,14 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
@patch(
"tasks.jobs.attack_paths.scan.sync.sync_graph",
return_value={"nodes": 0, "relationships": 0},
)
@patch("tasks.jobs.attack_paths.scan.graph_database.drop_subgraph")
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -833,14 +842,17 @@ class TestAttackPathsRun:
@patch("tasks.jobs.attack_paths.scan.db_utils.set_provider_graph_data_ready")
@patch("tasks.jobs.attack_paths.scan.db_utils.update_attack_paths_scan_progress")
@patch("tasks.jobs.attack_paths.scan.db_utils.starting_attack_paths_scan")
@patch("tasks.jobs.attack_paths.scan.sync.sync_graph")
@patch(
"tasks.jobs.attack_paths.scan.sync.sync_graph",
return_value={"nodes": 0, "relationships": 0},
)
@patch(
"tasks.jobs.attack_paths.scan.graph_database.drop_subgraph",
side_effect=RuntimeError("drop failed"),
)
@patch("tasks.jobs.attack_paths.scan.indexes.create_sync_indexes")
@patch("tasks.jobs.attack_paths.scan.internet.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis")
@patch("tasks.jobs.attack_paths.scan.findings.analysis", return_value=(0, 0))
@patch("tasks.jobs.attack_paths.scan.indexes.create_findings_indexes")
@patch("tasks.jobs.attack_paths.scan.cartography_ontology.run")
@patch("tasks.jobs.attack_paths.scan.cartography_analysis.run")
@@ -1274,10 +1286,6 @@ class TestAttackPathsFindingsHelpers:
mock_session = MagicMock()
with (
patch(
"tasks.jobs.attack_paths.findings.get_root_node_label",
return_value="AWSAccount",
),
patch(
"tasks.jobs.attack_paths.findings.get_node_uid_field",
return_value="arn",
@@ -1294,7 +1302,6 @@ class TestAttackPathsFindingsHelpers:
assert mock_session.run.call_count == 2
for call_args in mock_session.run.call_args_list:
params = call_args.args[1]
assert params["provider_uid"] == str(provider.uid)
assert params["last_updated"] == config.update_tag
assert "findings_data" in params
@@ -1673,10 +1680,6 @@ class TestAttackPathsFindingsHelpers:
yield # Make it a generator
with (
patch(
"tasks.jobs.attack_paths.findings.get_root_node_label",
return_value="AWSAccount",
),
patch(
"tasks.jobs.attack_paths.findings.get_node_uid_field",
return_value="arn",
Generated
+74 -20
View File
@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.3.3 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.3.4 and should not be changed by hand.
[[package]]
name = "about-time"
@@ -1267,19 +1267,19 @@ typing-extensions = ">=4.6.0"
[[package]]
name = "azure-mgmt-resource"
version = "23.3.0"
version = "24.0.0"
description = "Microsoft Azure Resource Management Client Library for Python"
optional = false
python-versions = ">=3.8"
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "azure_mgmt_resource-23.3.0-py3-none-any.whl", hash = "sha256:ab216ee28e29db6654b989746e0c85a1181f66653929d2cb6e48fba66d9af323"},
{file = "azure_mgmt_resource-23.3.0.tar.gz", hash = "sha256:fc4f1fd8b6aad23f8af4ed1f913df5f5c92df117449dc354fea6802a2829fea4"},
{file = "azure_mgmt_resource-24.0.0-py3-none-any.whl", hash = "sha256:27b32cd223e2784269f5a0db3c282042886ee4072d79cedc638438ece7cd0df4"},
{file = "azure_mgmt_resource-24.0.0.tar.gz", hash = "sha256:cf6b8995fcdd407ac9ff1dd474087129429a1d90dbb1ac77f97c19b96237b265"},
]
[package.dependencies]
azure-common = ">=1.1"
azure-mgmt-core = ">=1.3.2"
azure-mgmt-core = ">=1.5.0"
isodate = ">=0.6.1"
typing-extensions = ">=4.6.0"
@@ -1425,6 +1425,64 @@ typing-extensions = ">=4.6.0"
[package.extras]
aio = ["azure-core[aio] (>=1.30.0)"]
[[package]]
name = "backports-datetime-fromisoformat"
version = "2.0.3"
description = "Backport of Python 3.11's datetime.fromisoformat"
optional = false
python-versions = ">3"
groups = ["dev"]
markers = "python_version == \"3.10\""
files = [
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5f681f638f10588fa3c101ee9ae2b63d3734713202ddfcfb6ec6cea0778a29d4"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:cd681460e9142f1249408e5aee6d178c6d89b49e06d44913c8fdfb6defda8d1c"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:ee68bc8735ae5058695b76d3bb2aee1d137c052a11c8303f1e966aa23b72b65b"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8273fe7932db65d952a43e238318966eab9e49e8dd546550a41df12175cc2be4"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39d57ea50aa5a524bb239688adc1d1d824c31b6094ebd39aa164d6cadb85de22"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ac6272f87693e78209dc72e84cf9ab58052027733cd0721c55356d3c881791cf"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:44c497a71f80cd2bcfc26faae8857cf8e79388e3d5fbf79d2354b8c360547d58"},
{file = "backports_datetime_fromisoformat-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:6335a4c9e8af329cb1ded5ab41a666e1448116161905a94e054f205aa6d263bc"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2e4b66e017253cdbe5a1de49e0eecff3f66cd72bcb1229d7db6e6b1832c0443"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:43e2d648e150777e13bbc2549cc960373e37bf65bd8a5d2e0cef40e16e5d8dd0"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:4ce6326fd86d5bae37813c7bf1543bae9e4c215ec6f5afe4c518be2635e2e005"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7c8fac333bf860208fd522a5394369ee3c790d0aa4311f515fcc4b6c5ef8d75"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24a4da5ab3aa0cc293dc0662a0c6d1da1a011dc1edcbc3122a288cfed13a0b45"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:58ea11e3bf912bd0a36b0519eae2c5b560b3cb972ea756e66b73fb9be460af01"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8a375c7dbee4734318714a799b6c697223e4bbb57232af37fbfff88fb48a14c6"},
{file = "backports_datetime_fromisoformat-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:ac677b1664c4585c2e014739f6678137c8336815406052349c85898206ec7061"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66ce47ee1ba91e146149cf40565c3d750ea1be94faf660ca733d8601e0848147"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:8b7e069910a66b3bba61df35b5f879e5253ff0821a70375b9daf06444d046fa4"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_x86_64.whl", hash = "sha256:a3b5d1d04a9e0f7b15aa1e647c750631a873b298cdd1255687bb68779fe8eb35"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec1b95986430e789c076610aea704db20874f0781b8624f648ca9fb6ef67c6e1"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffe5f793db59e2f1d45ec35a1cf51404fdd69df9f6952a0c87c3060af4c00e32"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:620e8e73bd2595dfff1b4d256a12b67fce90ece3de87b38e1dde46b910f46f4d"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4cf9c0a985d68476c1cabd6385c691201dda2337d7453fb4da9679ce9f23f4e7"},
{file = "backports_datetime_fromisoformat-2.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:d144868a73002e6e2e6fef72333e7b0129cecdd121aa8f1edba7107fd067255d"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e81b26497a17c29595bc7df20bc6a872ceea5f8c9d6537283945d4b6396aec10"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:5ba00ead8d9d82fd6123eb4891c566d30a293454e54e32ff7ead7644f5f7e575"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:24d574cb4072e1640b00864e94c4c89858033936ece3fc0e1c6f7179f120d0a8"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9735695a66aad654500b0193525e590c693ab3368478ce07b34b443a1ea5e824"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63d39709e17eb72685d052ac82acf0763e047f57c86af1b791505b1fec96915d"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:1ea2cc84224937d6b9b4c07f5cb7c667f2bde28c255645ba27f8a675a7af8234"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4024e6d35a9fdc1b3fd6ac7a673bd16cb176c7e0b952af6428b7129a70f72cce"},
{file = "backports_datetime_fromisoformat-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5e2dcc94dc9c9ab8704409d86fcb5236316e9dcef6feed8162287634e3568f4c"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fa2de871801d824c255fac7e5e7e50f2be6c9c376fd9268b40c54b5e9da91f42"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:1314d4923c1509aa9696712a7bc0c7160d3b7acf72adafbbe6c558d523f5d491"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:b750ecba3a8815ad8bc48311552f3f8ab99dd2326d29df7ff670d9c49321f48f"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d5117dce805d8a2f78baeddc8c6127281fa0a5e2c40c6dd992ba6b2b367876"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb35f607bd1cbe37b896379d5f5ed4dc298b536f4b959cb63180e05cacc0539d"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:61c74710900602637d2d145dda9720c94e303380803bf68811b2a151deec75c2"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ece59af54ebf67ecbfbbf3ca9066f5687879e36527ad69d8b6e3ac565d565a62"},
{file = "backports_datetime_fromisoformat-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:d0a7c5f875068efe106f62233bc712d50db4d07c13c7db570175c7857a7b5dbd"},
{file = "backports_datetime_fromisoformat-2.0.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90e202e72a3d5aae673fcc8c9a4267d56b2f532beeb9173361293625fe4d2039"},
{file = "backports_datetime_fromisoformat-2.0.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2df98ef1b76f5a58bb493dda552259ba60c3a37557d848e039524203951c9f06"},
{file = "backports_datetime_fromisoformat-2.0.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7100adcda5e818b5a894ad0626e38118bb896a347f40ebed8981155675b9ba7b"},
{file = "backports_datetime_fromisoformat-2.0.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e410383f5d6a449a529d074e88af8bc80020bb42b402265f9c02c8358c11da5"},
{file = "backports_datetime_fromisoformat-2.0.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2797593760da6bcc32c4a13fa825af183cd4bfd333c60b3dbf84711afca26ef"},
{file = "backports_datetime_fromisoformat-2.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35a144fd681a0bea1013ccc4cd3fd4dc758ea17ee23dca019c02b82ec46fc0c4"},
{file = "backports_datetime_fromisoformat-2.0.3.tar.gz", hash = "sha256:b58edc8f517b66b397abc250ecc737969486703a66eb97e01e6d51291b1a139d"},
]
[[package]]
name = "bandit"
version = "1.8.3"
@@ -3350,23 +3408,19 @@ files = [
[[package]]
name = "marshmallow"
version = "3.26.2"
version = "4.3.0"
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
optional = false
python-versions = ">=3.9"
python-versions = ">=3.10"
groups = ["dev"]
files = [
{file = "marshmallow-3.26.2-py3-none-any.whl", hash = "sha256:013fa8a3c4c276c24d26d84ce934dc964e2aa794345a0f8c7e5a7191482c8a73"},
{file = "marshmallow-3.26.2.tar.gz", hash = "sha256:bbe2adb5a03e6e3571b573f42527c6fe926e17467833660bebd11593ab8dfd57"},
{file = "marshmallow-4.3.0-py3-none-any.whl", hash = "sha256:46c4fe6984707e3cbd485dfebbf0a59874f58d695aad05c1668d15e8c6e13b46"},
{file = "marshmallow-4.3.0.tar.gz", hash = "sha256:fb43c53b3fe240b8f6af37223d6ef1636f927ad9bea8ab323afad95dff090880"},
]
[package.dependencies]
packaging = ">=17.0"
[package.extras]
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"]
docs = ["autodocsumm (==0.2.14)", "furo (==2024.8.6)", "sphinx (==8.1.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.0)", "sphinxext-opengraph (==0.9.1)"]
tests = ["pytest", "simplejson"]
backports-datetime-fromisoformat = {version = "*", markers = "python_version < \"3.11\""}
typing-extensions = {version = "*", markers = "python_version < \"3.11\""}
[[package]]
name = "mccabe"
@@ -3662,14 +3716,14 @@ dev = ["bumpver", "isort", "mypy", "pylint", "pytest", "yapf"]
[[package]]
name = "msgraph-sdk"
version = "1.23.0"
version = "1.55.0"
description = "The Microsoft Graph Python SDK"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "msgraph_sdk-1.23.0-py3-none-any.whl", hash = "sha256:58e0047b4ca59fd82022c02cd73fec0170a3d84f3b76721e3db2a0314df9a58a"},
{file = "msgraph_sdk-1.23.0.tar.gz", hash = "sha256:6dd1ba9a46f5f0ce8599fd9610133adbd9d1493941438b5d3632fce9e55ed607"},
{file = "msgraph_sdk-1.55.0-py3-none-any.whl", hash = "sha256:c8e68ebc4b88af5111de312e7fa910a4e76ddf48a4534feadb1fb8a411c48cfc"},
{file = "msgraph_sdk-1.55.0.tar.gz", hash = "sha256:6df691a31954a050d26b8a678968017e157d940fb377f2a8a4e17a9741b98756"},
]
[package.dependencies]
@@ -6681,4 +6735,4 @@ files = [
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.13"
content-hash = "786921163bb46716defae1d9de1df001af2abf17edd3061165638707bcd28ce4"
content-hash = "09ce4507a464b318702ed8c6a738f3bb1bc4cc6ff5a50a9c2884f560af9ab034"
+9
View File
@@ -4,11 +4,20 @@ All notable changes to the **Prowler SDK** are documented in this file.
## [5.24.1] (Prowler UNRELEASED)
### 🚀 Added
- `entra_conditional_access_policy_p1_license_utilization` check for m365 provider [(#10783)](https://github.com/prowler-cloud/prowler/pull/10783)
### 🔄 Changed
- bumped `msgraph-sdk` from 1.23.0 to 1.55.0 and `azure-mgmt-resource` from 23.3.0 to 24.0.0, removing `marshmallow` as is a transitively dev dependency [(#10733)](https://github.com/prowler-cloud/prowler/pull/10733)
### 🐞 Fixed
- Cloudflare account-scoped API tokens failing connection test in the App with `CloudflareUserTokenRequiredError` [(#10723)](https://github.com/prowler-cloud/prowler/pull/10723)
- `prowler image --registry` failing with `ImageNoImagesProvidedError` due to registry arguments not being forwarded to `ImageProvider` in `init_global_provider` [(#10470)](https://github.com/prowler-cloud/prowler/pull/10470)
- Google Workspace Calendar checks false FAIL on unconfigured settings with secure Google defaults [(#10726)](https://github.com/prowler-cloud/prowler/pull/10726)
- Cloudflare `validate_credentials` can hang in an infinite pagination loop when the SDK repeats accounts, blocking connection tests [(#10771)](https://github.com/prowler-cloud/prowler/pull/10771)
---
+16 -13
View File
@@ -206,11 +206,12 @@
"admincenter_users_admins_reduced_license_footprint",
"entra_admin_portals_access_restriction",
"entra_admin_users_phishing_resistant_mfa_enabled",
"entra_conditional_access_policy_mfa_enforced_for_guest_users",
"entra_conditional_access_policy_block_o365_elevated_insider_risk",
"entra_conditional_access_policy_block_unknown_device_platforms",
"entra_conditional_access_policy_directory_sync_account_excluded",
"entra_conditional_access_policy_corporate_device_sign_in_frequency_enforced",
"entra_conditional_access_policy_directory_sync_account_excluded",
"entra_conditional_access_policy_mfa_enforced_for_guest_users",
"entra_conditional_access_policy_p1_license_utilization",
"entra_policy_guest_users_access_restrictions",
"entra_seamless_sso_disabled"
]
@@ -248,15 +249,16 @@
"defenderxdr_endpoint_privileged_user_exposed_credentials",
"entra_admin_users_mfa_enabled",
"entra_admin_users_sign_in_frequency_enabled",
"entra_break_glass_account_fido2_security_key_registered",
"entra_conditional_access_policy_mfa_enforced_for_guest_users",
"entra_default_app_management_policy_enabled",
"entra_all_apps_conditional_access_coverage",
"entra_conditional_access_policy_device_registration_mfa_required",
"entra_intune_enrollment_sign_in_frequency_every_time",
"entra_break_glass_account_fido2_security_key_registered",
"entra_conditional_access_policy_block_unknown_device_platforms",
"entra_conditional_access_policy_device_code_flow_blocked",
"entra_conditional_access_policy_corporate_device_sign_in_frequency_enforced",
"entra_conditional_access_policy_device_code_flow_blocked",
"entra_conditional_access_policy_device_registration_mfa_required",
"entra_conditional_access_policy_mfa_enforced_for_guest_users",
"entra_conditional_access_policy_p1_license_utilization",
"entra_default_app_management_policy_enabled",
"entra_intune_enrollment_sign_in_frequency_every_time",
"entra_legacy_authentication_blocked",
"entra_managed_device_required_for_authentication",
"entra_seamless_sso_disabled",
@@ -718,16 +720,17 @@
"entra_admin_users_mfa_enabled",
"entra_admin_users_sign_in_frequency_enabled",
"entra_all_apps_conditional_access_coverage",
"entra_conditional_access_policy_device_registration_mfa_required",
"entra_conditional_access_policy_mfa_enforced_for_guest_users",
"entra_intune_enrollment_sign_in_frequency_every_time",
"entra_break_glass_account_fido2_security_key_registered",
"entra_conditional_access_policy_approved_client_app_required_for_mobile",
"entra_conditional_access_policy_block_unknown_device_platforms",
"entra_conditional_access_policy_device_code_flow_blocked",
"entra_conditional_access_policy_directory_sync_account_excluded",
"entra_conditional_access_policy_corporate_device_sign_in_frequency_enforced",
"entra_conditional_access_policy_device_code_flow_blocked",
"entra_conditional_access_policy_device_registration_mfa_required",
"entra_conditional_access_policy_directory_sync_account_excluded",
"entra_conditional_access_policy_mfa_enforced_for_guest_users",
"entra_conditional_access_policy_p1_license_utilization",
"entra_identity_protection_sign_in_risk_enabled",
"entra_intune_enrollment_sign_in_frequency_every_time",
"entra_managed_device_required_for_authentication",
"entra_seamless_sso_disabled",
"entra_users_mfa_enabled"
@@ -274,8 +274,12 @@ class CloudflareProvider(Provider):
for account in client.accounts.list():
account_id = getattr(account, "id", None)
# Prevent infinite loop - skip if we've seen this account
# Prevent infinite loop on repeated pages from the SDK paginator
if account_id in seen_account_ids:
logger.warning(
"Detected repeated Cloudflare account ID while listing accounts. "
"Stopping pagination to avoid an infinite loop."
)
break
seen_account_ids.add(account_id)
@@ -395,7 +399,20 @@ class CloudflareProvider(Provider):
# Fallback: try accounts.list()
try:
accounts = list(client.accounts.list())
accounts: list = []
seen_account_ids: set = set()
for account in client.accounts.list():
account_id = getattr(account, "id", None)
# Prevent infinite loop on repeated pages from the SDK paginator
if account_id in seen_account_ids:
logger.warning(
"Detected repeated Cloudflare account ID while validating credentials. "
"Stopping pagination to avoid an infinite loop."
)
break
seen_account_ids.add(account_id)
accounts.append(account)
if not accounts:
logger.error("CloudflareNoAccountsError: No accounts found")
raise CloudflareNoAccountsError(
@@ -0,0 +1,39 @@
{
"Provider": "m365",
"CheckID": "entra_conditional_access_policy_p1_license_utilization",
"CheckTitle": "P1 license entitlements ensure sufficient coverage for Conditional Access users",
"CheckType": [],
"ServiceName": "entra",
"SubServiceName": "",
"ResourceIdTemplate": "",
"Severity": "medium",
"ResourceType": "NotDefined",
"ResourceGroup": "IAM",
"Description": "Verifies that the entitled premium license count (P1 + P2, since P2 includes P1) covers all users actively using **P1-level Conditional Access** (regular plus guest). Unlicensed Conditional Access usage may violate Microsoft licensing terms. For risk-based (P2) Conditional Access, see `entra_conditional_access_policy_p2_license_utilization`.",
"Risk": "When more users consume P1-level Conditional Access than the organization has premium licenses for, it creates a **licensing compliance gap**. Microsoft may flag the tenant during audits, leading to retroactive charges or service restrictions. Additionally, unlicensed users may lose Conditional Access protection if Microsoft enforces entitlements.",
"RelatedUrl": "",
"AdditionalURLs": [
"https://learn.microsoft.com/en-us/entra/identity/conditional-access/overview",
"https://learn.microsoft.com/en-us/graph/api/resources/azureadpremiumlicenseinsight?view=graph-rest-beta",
"https://learn.microsoft.com/en-us/entra/fundamentals/licensing"
],
"Remediation": {
"Code": {
"CLI": "",
"NativeIaC": "",
"Other": "1. Navigate to the **Microsoft Entra admin center** (https://entra.microsoft.com).\n2. Go to **Identity** > **Billing** > **Licenses** > **Licensed features**.\n3. Review the **License utilization** report for P1 and P2 features.\n4. Purchase additional P1 or P2 licenses, or remove Conditional Access policy assignments for unlicensed users to bring entitlements in line with utilization.",
"Terraform": ""
},
"Recommendation": {
"Text": "Review the Microsoft Entra ID Premium license utilization report and ensure that combined P1 and P2 license entitlements match or exceed the number of users utilizing Conditional Access (regular and guest). Purchase additional licenses or adjust policy scope as needed.",
"Url": "https://hub.prowler.com/check/entra_conditional_access_policy_p1_license_utilization"
}
},
"Categories": [
"identity-access",
"e3"
],
"DependsOn": [],
"RelatedTo": [],
"Notes": "Uses Microsoft Graph beta reports/azureADPremiumLicenseInsight. Requires Reports.Read.All. Returns MANUAL when the tenant has no Microsoft Entra ID P1 or P2 license (Graph returns 403 missingLicense). Compares P1 Conditional Access utilization (regular + guest) against the combined P1 + P2 entitled license count, since P2 includes P1."
}
@@ -0,0 +1,64 @@
from prowler.lib.check.models import Check, CheckReportM365
from prowler.providers.m365.services.entra.entra_client import entra_client
class entra_conditional_access_policy_p1_license_utilization(Check):
"""Check P1-feature (Conditional Access) license coverage.
Compares the number of users consuming **P1-level Conditional Access**
(regular plus guest, exposed via ``p1FeatureUtilizations`` in
``reports/azureADPremiumLicenseInsight``) against the entitled premium
license count. P2 entitlements include P1, so coverage is measured
against the combined ``entitled_total_license_count`` (P1 + P2).
For risk-based Conditional Access (P2), see
``entra_conditional_access_policy_p2_license_utilization``.
- PASS: total premium entitlements cover all P1 Conditional Access users.
- FAIL: P1 Conditional Access utilization exceeds entitled premium licenses.
- MANUAL: license insight unavailable, typically because the tenant has no
P1/P2 license (Microsoft Graph returns 403 ``missingLicense``) or the
``Reports.Read.All`` permission was not granted.
"""
def execute(self) -> list[CheckReportM365]:
"""Execute the P1 license utilization check.
Returns:
A list with a single report describing the licensing coverage.
"""
findings = []
insight = entra_client.premium_license_insight
report = CheckReportM365(
metadata=self.metadata(),
resource=insight or {},
resource_name="Premium License Insight",
resource_id="azureADPremiumLicenseInsight",
)
if insight is None:
report.status = "MANUAL"
report.status_extended = (
"Could not retrieve Azure AD Premium license insight. "
"Verify the tenant has at least one Microsoft Entra ID P1 or P2 "
"license and that Reports.Read.All permission is granted."
)
elif insight.entitled_total_license_count >= insight.p1_licenses_utilized:
report.status = "PASS"
report.status_extended = (
f"Premium license entitlements ({insight.entitled_total_license_count}) "
f"cover all P1 Conditional Access users "
f"({insight.p1_licenses_utilized})."
)
else:
report.status = "FAIL"
report.status_extended = (
f"Premium license entitlements ({insight.entitled_total_license_count}) "
f"do not cover all P1 Conditional Access users "
f"({insight.p1_licenses_utilized})."
)
findings.append(report)
return findings
@@ -5,6 +5,8 @@ from enum import Enum
from typing import Dict, List, Optional
from uuid import UUID
from kiota_abstractions.method import Method
from kiota_abstractions.request_information import RequestInformation
from msgraph.generated.models.o_data_errors.o_data_error import ODataError
from msgraph.generated.security.microsoft_graph_security_run_hunting_query.run_hunting_query_post_request_body import (
RunHuntingQueryPostRequestBody,
@@ -36,6 +38,7 @@ class Entra(M365Service):
user_accounts_status (dict): Dictionary of user account statuses.
oauth_apps (dict): Dictionary of OAuth applications from Defender XDR.
authentication_method_configurations (dict): Dictionary of authentication method configurations.
premium_license_insight (PremiumLicenseInsight): Azure AD Premium license utilization insight.
"""
def __init__(self, provider: M365Provider):
@@ -83,6 +86,7 @@ class Entra(M365Service):
self._get_oauth_apps(),
self._get_directory_sync_settings(),
self._get_authentication_method_configurations(),
self._get_premium_license_insight(),
)
)
@@ -98,6 +102,7 @@ class Entra(M365Service):
self.authentication_method_configurations: Dict[
str, AuthenticationMethodConfiguration
] = attributes[9]
self.premium_license_insight: Optional[PremiumLicenseInsight] = attributes[10]
self.user_accounts_status = {}
if created_loop:
@@ -1019,6 +1024,84 @@ OAuthAppInfo
return oauth_apps
async def _get_premium_license_insight(self) -> Optional["PremiumLicenseInsight"]:
"""Retrieve Azure AD Premium license insight from the Microsoft Graph beta API.
Fetches the premium license utilization report which exposes entitled
P1/P2 license counts and per-feature utilization (Conditional Access
regular and guest users).
Tenants without any P1/P2 license receive HTTP 403 with the
``missingLicense`` error code; this is surfaced as ``None`` so the
consuming check can distinguish "not applicable" from a real
coverage gap.
Returns:
Optional[PremiumLicenseInsight]: The premium license insight data,
or None if the API call failed or data is unavailable.
"""
logger.info("Entra - Getting premium license insight...")
try:
request_info = RequestInformation()
request_info.http_method = Method.GET
request_info.url_template = (
"{+baseurl}/reports/azureADPremiumLicenseInsight"
)
request_info.path_parameters = {
"baseurl": "https://graph.microsoft.com/beta"
}
response = await self.client.request_adapter.send_primitive_async(
request_info, "bytes", {}
)
if response:
data = json.loads(response)
p1 = int(data.get("entitledP1LicenseCount", 0) or 0)
p2 = int(data.get("entitledP2LicenseCount", 0) or 0)
total_licenses = int(
data.get("entitledTotalLicenseCount", p1 + p2) or (p1 + p2)
)
p1_features = data.get("p1FeatureUtilizations") or {}
p2_features = data.get("p2FeatureUtilizations") or {}
ca_users = int(
(p1_features.get("conditionalAccess") or {}).get("userCount", 0)
or 0
)
ca_guest_users = int(
(p1_features.get("conditionalAccessGuestUsers") or {}).get(
"userCount", 0
)
or 0
)
rb_ca_users = int(
(p2_features.get("riskBasedConditionalAccess") or {}).get(
"userCount", 0
)
or 0
)
rb_ca_guest_users = int(
(
p2_features.get("riskBasedConditionalAccessGuestUsers")
or {}
).get("userCount", 0)
or 0
)
return PremiumLicenseInsight(
entitled_p1_license_count=p1,
entitled_p2_license_count=p2,
entitled_total_license_count=total_licenses,
p1_licenses_utilized=ca_users + ca_guest_users,
p2_licenses_utilized=rb_ca_users + rb_ca_guest_users,
)
except Exception as error:
# 403 missingLicense is expected for tenants without P1/P2.
logger.warning(
f"Entra - Could not retrieve Azure AD Premium license insight. "
f"Requires Reports.Read.All and a tenant with at least one "
f"Microsoft Entra ID P1 or P2 license. "
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return None
async def _get_authentication_method_configurations(self):
"""Retrieve authentication method configurations from Microsoft Entra.
@@ -1481,3 +1564,34 @@ class OAuthApp(BaseModel):
is_admin_consented: bool = False
last_used_time: Optional[str] = None
app_origin: str = ""
class PremiumLicenseInsight(BaseModel):
"""Mirror of Microsoft Graph beta ``reports/azureADPremiumLicenseInsight``.
Stores entitled license counts (P1, P2, total) and per-feature
utilisation totals already aggregated across regular and guest users:
- **P1 features** (Conditional Access): ``p1_licenses_utilized`` =
``conditionalAccess.userCount`` + ``conditionalAccessGuestUsers.userCount``.
- **P2 features** (risk-based Conditional Access): ``p2_licenses_utilized`` =
``riskBasedConditionalAccess.userCount`` +
``riskBasedConditionalAccessGuestUsers.userCount``.
P2 entitlements include P1; the P1 utilisation check therefore compares
against ``entitled_total_license_count`` (P1 + P2), while the P2 check
compares against ``entitled_p2_license_count`` alone.
Attributes:
entitled_p1_license_count: Tenant-wide entitled Microsoft Entra ID P1 licenses.
entitled_p2_license_count: Tenant-wide entitled Microsoft Entra ID P2 licenses.
entitled_total_license_count: Total premium licenses entitled (P1 + P2).
p1_licenses_utilized: Users consuming P1 features (regular + guest CA).
p2_licenses_utilized: Users consuming P2 features (regular + guest risk-based CA).
"""
entitled_p1_license_count: int = 0
entitled_p2_license_count: int = 0
entitled_total_license_count: int = 0
p1_licenses_utilized: int = 0
p2_licenses_utilized: int = 0
+2 -3
View File
@@ -30,7 +30,7 @@ dependencies = [
"azure-mgmt-postgresqlflexibleservers==1.1.0",
"azure-mgmt-recoveryservices==3.1.0",
"azure-mgmt-recoveryservicesbackup==9.2.0",
"azure-mgmt-resource==23.3.0",
"azure-mgmt-resource==24.0.0",
"azure-mgmt-search==9.1.0",
"azure-mgmt-security==7.0.0",
"azure-mgmt-sql==3.0.1",
@@ -57,7 +57,7 @@ dependencies = [
"kubernetes==32.0.1",
"markdown==3.10.2",
"microsoft-kiota-abstractions==1.9.2",
"msgraph-sdk==1.23.0",
"msgraph-sdk==1.55.0",
"numpy==2.0.2",
"openstacksdk==4.2.0",
"pandas==2.2.3",
@@ -121,7 +121,6 @@ docker = "7.1.0"
filelock = "3.20.3"
flake8 = "7.1.2"
freezegun = "1.5.1"
marshmallow = "==3.26.2"
mock = "5.2.0"
moto = {extras = ["all"], version = "5.1.11"}
openapi-schema-validator = "0.6.3"
@@ -433,6 +433,29 @@ class TestCloudflareValidateCredentials:
with pytest.raises(CloudflareNoAccountsError):
CloudflareProvider.validate_credentials(session)
def test_validate_credentials_breaks_on_repeated_account_ids(self):
"""Pagination must stop when the SDK repeats account IDs to avoid infinite loops."""
def repeating_accounts():
account = MagicMock()
account.id = ACCOUNT_ID
while True:
yield account
mock_client = MagicMock()
mock_client.user.get.side_effect = Exception("Some other error")
mock_client.accounts.list.return_value = repeating_accounts()
session = CloudflareSession(
client=mock_client,
api_token=API_TOKEN,
api_key=None,
api_email=None,
)
# Must return without hanging; repeated IDs break the loop.
CloudflareProvider.validate_credentials(session)
class TestCloudflareTestConnection:
"""Tests for test_connection method."""
@@ -0,0 +1,330 @@
from unittest import mock
from tests.providers.m365.m365_fixtures import DOMAIN, set_mocked_m365_provider
CHECK_MODULE_PATH = "prowler.providers.m365.services.entra.entra_conditional_access_policy_p1_license_utilization.entra_conditional_access_policy_p1_license_utilization"
class Test_entra_conditional_access_policy_p1_license_utilization:
def test_no_premium_license_insight(self):
"""MANUAL when premium license insight data is unavailable (None)."""
entra_client = mock.MagicMock
entra_client.audited_tenant = "audited_tenant"
entra_client.audited_domain = DOMAIN
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_m365_provider(),
),
mock.patch(
"prowler.providers.m365.lib.powershell.m365_powershell.M365PowerShell.connect_exchange_online"
),
mock.patch(
f"{CHECK_MODULE_PATH}.entra_client",
new=entra_client,
),
):
from prowler.providers.m365.services.entra.entra_conditional_access_policy_p1_license_utilization.entra_conditional_access_policy_p1_license_utilization import (
entra_conditional_access_policy_p1_license_utilization,
)
entra_client.premium_license_insight = None
check = entra_conditional_access_policy_p1_license_utilization()
result = check.execute()
assert len(result) == 1
assert result[0].status == "MANUAL"
assert "Could not retrieve" in result[0].status_extended
assert "P1 or P2" in result[0].status_extended
assert "Reports.Read.All" in result[0].status_extended
assert result[0].resource == {}
assert result[0].resource_name == "Premium License Insight"
assert result[0].resource_id == "azureADPremiumLicenseInsight"
assert result[0].location == "global"
def test_p1_only_covers_utilization(self):
"""PASS when P1 entitlements alone cover all P1 CA users (regular + guest)."""
entra_client = mock.MagicMock
entra_client.audited_tenant = "audited_tenant"
entra_client.audited_domain = DOMAIN
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_m365_provider(),
),
mock.patch(
"prowler.providers.m365.lib.powershell.m365_powershell.M365PowerShell.connect_exchange_online"
),
mock.patch(
f"{CHECK_MODULE_PATH}.entra_client",
new=entra_client,
),
):
from prowler.providers.m365.services.entra.entra_conditional_access_policy_p1_license_utilization.entra_conditional_access_policy_p1_license_utilization import (
entra_conditional_access_policy_p1_license_utilization,
)
from prowler.providers.m365.services.entra.entra_service import (
PremiumLicenseInsight,
)
entra_client.premium_license_insight = PremiumLicenseInsight(
entitled_p1_license_count=100,
entitled_p2_license_count=0,
entitled_total_license_count=100,
p1_licenses_utilized=70,
p2_licenses_utilized=0,
)
check = entra_conditional_access_policy_p1_license_utilization()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Premium license entitlements (100) cover all P1 Conditional Access users (70)."
)
assert (
result[0].resource == entra_client.premium_license_insight.dict()
)
assert result[0].resource_name == "Premium License Insight"
assert result[0].resource_id == "azureADPremiumLicenseInsight"
assert result[0].location == "global"
def test_p1_plus_p2_covers_utilization(self):
"""PASS when combined P1 + P2 entitlements cover all P1 CA users."""
entra_client = mock.MagicMock
entra_client.audited_tenant = "audited_tenant"
entra_client.audited_domain = DOMAIN
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_m365_provider(),
),
mock.patch(
"prowler.providers.m365.lib.powershell.m365_powershell.M365PowerShell.connect_exchange_online"
),
mock.patch(
f"{CHECK_MODULE_PATH}.entra_client",
new=entra_client,
),
):
from prowler.providers.m365.services.entra.entra_conditional_access_policy_p1_license_utilization.entra_conditional_access_policy_p1_license_utilization import (
entra_conditional_access_policy_p1_license_utilization,
)
from prowler.providers.m365.services.entra.entra_service import (
PremiumLicenseInsight,
)
entra_client.premium_license_insight = PremiumLicenseInsight(
entitled_p1_license_count=50,
entitled_p2_license_count=50,
entitled_total_license_count=100,
p1_licenses_utilized=80,
p2_licenses_utilized=20,
)
check = entra_conditional_access_policy_p1_license_utilization()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Premium license entitlements (100) cover all P1 Conditional Access users (80)."
)
assert (
result[0].resource == entra_client.premium_license_insight.dict()
)
assert result[0].resource_name == "Premium License Insight"
assert result[0].resource_id == "azureADPremiumLicenseInsight"
assert result[0].location == "global"
def test_p2_only_covers_utilization(self):
"""PASS when only P2 licenses are entitled (P2 includes P1)."""
entra_client = mock.MagicMock
entra_client.audited_tenant = "audited_tenant"
entra_client.audited_domain = DOMAIN
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_m365_provider(),
),
mock.patch(
"prowler.providers.m365.lib.powershell.m365_powershell.M365PowerShell.connect_exchange_online"
),
mock.patch(
f"{CHECK_MODULE_PATH}.entra_client",
new=entra_client,
),
):
from prowler.providers.m365.services.entra.entra_conditional_access_policy_p1_license_utilization.entra_conditional_access_policy_p1_license_utilization import (
entra_conditional_access_policy_p1_license_utilization,
)
from prowler.providers.m365.services.entra.entra_service import (
PremiumLicenseInsight,
)
entra_client.premium_license_insight = PremiumLicenseInsight(
entitled_p1_license_count=0,
entitled_p2_license_count=150,
entitled_total_license_count=150,
p1_licenses_utilized=120,
p2_licenses_utilized=40,
)
check = entra_conditional_access_policy_p1_license_utilization()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Premium license entitlements (150) cover all P1 Conditional Access users (120)."
)
assert (
result[0].resource == entra_client.premium_license_insight.dict()
)
assert result[0].resource_name == "Premium License Insight"
assert result[0].resource_id == "azureADPremiumLicenseInsight"
assert result[0].location == "global"
def test_licenses_insufficient_with_guests(self):
"""FAIL when guest CA users push P1 utilization above the entitled count."""
entra_client = mock.MagicMock
entra_client.audited_tenant = "audited_tenant"
entra_client.audited_domain = DOMAIN
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_m365_provider(),
),
mock.patch(
"prowler.providers.m365.lib.powershell.m365_powershell.M365PowerShell.connect_exchange_online"
),
mock.patch(
f"{CHECK_MODULE_PATH}.entra_client",
new=entra_client,
),
):
from prowler.providers.m365.services.entra.entra_conditional_access_policy_p1_license_utilization.entra_conditional_access_policy_p1_license_utilization import (
entra_conditional_access_policy_p1_license_utilization,
)
from prowler.providers.m365.services.entra.entra_service import (
PremiumLicenseInsight,
)
entra_client.premium_license_insight = PremiumLicenseInsight(
entitled_p1_license_count=30,
entitled_p2_license_count=0,
entitled_total_license_count=30,
p1_licenses_utilized=35,
p2_licenses_utilized=0,
)
check = entra_conditional_access_policy_p1_license_utilization()
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== "Premium license entitlements (30) do not cover all P1 Conditional Access users (35)."
)
assert (
result[0].resource == entra_client.premium_license_insight.dict()
)
assert result[0].resource_name == "Premium License Insight"
assert result[0].resource_id == "azureADPremiumLicenseInsight"
assert result[0].location == "global"
def test_p2_utilization_does_not_affect_p1_check(self):
"""P2 risk-based CA usage is ignored by this check; only P1 CA is evaluated."""
entra_client = mock.MagicMock
entra_client.audited_tenant = "audited_tenant"
entra_client.audited_domain = DOMAIN
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_m365_provider(),
),
mock.patch(
"prowler.providers.m365.lib.powershell.m365_powershell.M365PowerShell.connect_exchange_online"
),
mock.patch(
f"{CHECK_MODULE_PATH}.entra_client",
new=entra_client,
),
):
from prowler.providers.m365.services.entra.entra_conditional_access_policy_p1_license_utilization.entra_conditional_access_policy_p1_license_utilization import (
entra_conditional_access_policy_p1_license_utilization,
)
from prowler.providers.m365.services.entra.entra_service import (
PremiumLicenseInsight,
)
entra_client.premium_license_insight = PremiumLicenseInsight(
entitled_p1_license_count=10,
entitled_p2_license_count=0,
entitled_total_license_count=10,
p1_licenses_utilized=5,
p2_licenses_utilized=999,
)
check = entra_conditional_access_policy_p1_license_utilization()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Premium license entitlements (10) cover all P1 Conditional Access users (5)."
)
def test_zero_licenses_zero_users(self):
"""PASS when both license count and utilization are zero."""
entra_client = mock.MagicMock
entra_client.audited_tenant = "audited_tenant"
entra_client.audited_domain = DOMAIN
with (
mock.patch(
"prowler.providers.common.provider.Provider.get_global_provider",
return_value=set_mocked_m365_provider(),
),
mock.patch(
"prowler.providers.m365.lib.powershell.m365_powershell.M365PowerShell.connect_exchange_online"
),
mock.patch(
f"{CHECK_MODULE_PATH}.entra_client",
new=entra_client,
),
):
from prowler.providers.m365.services.entra.entra_conditional_access_policy_p1_license_utilization.entra_conditional_access_policy_p1_license_utilization import (
entra_conditional_access_policy_p1_license_utilization,
)
from prowler.providers.m365.services.entra.entra_service import (
PremiumLicenseInsight,
)
entra_client.premium_license_insight = PremiumLicenseInsight()
check = entra_conditional_access_policy_p1_license_utilization()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert (
result[0].status_extended
== "Premium license entitlements (0) cover all P1 Conditional Access users (0)."
)
assert result[0].resource_name == "Premium License Insight"
assert result[0].resource_id == "azureADPremiumLicenseInsight"
assert result[0].location == "global"
@@ -1,4 +1,5 @@
import asyncio
import json
from types import SimpleNamespace
from unittest.mock import AsyncMock, MagicMock, patch
@@ -24,6 +25,7 @@ from prowler.providers.m365.services.entra.entra_service import (
InvitationsFrom,
Organization,
PersistentBrowser,
PremiumLicenseInsight,
SessionControls,
SignInFrequency,
SignInFrequencyInterval,
@@ -593,3 +595,92 @@ class Test_Entra_Service:
registration_builder.get.assert_awaited()
registration_builder.with_url.assert_called_once_with("next-link")
registration_builder_next.get.assert_awaited()
class Test_Entra_get_premium_license_insight:
"""Validate the raw JSON parsing of the Microsoft Graph beta endpoint
`reports/azureADPremiumLicenseInsight`. These tests bypass full Entra
initialisation and exercise the parser directly via a stand-alone
instance with a mocked `request_adapter`.
"""
@staticmethod
def _build_entra_with_adapter(adapter):
entra = Entra.__new__(Entra)
entra.client = MagicMock(request_adapter=adapter)
return entra
def test_parses_realistic_response(self):
"""Parses the canonical Microsoft Graph response and exposes derived totals."""
payload = {
"id": "00000000-0000-0000-0000-000000000000",
"entitledP1LicenseCount": 100,
"entitledP2LicenseCount": 50,
"entitledTotalLicenseCount": 150,
"p1FeatureUtilizations": {
"conditionalAccess": {"userCount": 85},
"conditionalAccessGuestUsers": {"userCount": 12},
},
"p2FeatureUtilizations": {
"riskBasedConditionalAccess": {"userCount": 30},
"riskBasedConditionalAccessGuestUsers": {"userCount": 5},
},
}
adapter = MagicMock()
adapter.send_primitive_async = AsyncMock(
return_value=json.dumps(payload).encode("utf-8")
)
entra = self._build_entra_with_adapter(adapter)
insight = asyncio.run(entra._get_premium_license_insight())
assert isinstance(insight, PremiumLicenseInsight)
assert insight.entitled_p1_license_count == 100
assert insight.entitled_p2_license_count == 50
assert insight.entitled_total_license_count == 150
assert insight.p1_licenses_utilized == 97
assert insight.p2_licenses_utilized == 35
def test_returns_none_on_403(self):
"""Returns None when the API raises (e.g. 403 missingLicense)."""
adapter = MagicMock()
adapter.send_primitive_async = AsyncMock(
side_effect=Exception("403 Forbidden: missingLicense")
)
entra = self._build_entra_with_adapter(adapter)
insight = asyncio.run(entra._get_premium_license_insight())
assert insight is None
def test_returns_none_on_empty_response(self):
"""Returns None when the API returns empty bytes."""
adapter = MagicMock()
adapter.send_primitive_async = AsyncMock(return_value=b"")
entra = self._build_entra_with_adapter(adapter)
insight = asyncio.run(entra._get_premium_license_insight())
assert insight is None
def test_handles_missing_feature_utilizations(self):
"""Falls back to P1+P2 sum when entitledTotalLicenseCount is missing
and tolerates null feature-utilization objects."""
payload = {
"entitledP1LicenseCount": 10,
"entitledP2LicenseCount": 0,
"p1FeatureUtilizations": None,
"p2FeatureUtilizations": None,
}
adapter = MagicMock()
adapter.send_primitive_async = AsyncMock(
return_value=json.dumps(payload).encode("utf-8")
)
entra = self._build_entra_with_adapter(adapter)
insight = asyncio.run(entra._get_premium_license_insight())
assert insight is not None
assert insight.entitled_total_license_count == 10
assert insight.p1_licenses_utilized == 0
assert insight.p2_licenses_utilized == 0
+13
View File
@@ -2,6 +2,19 @@
All notable changes to the **Prowler UI** are documented in this file.
## [1.24.1] (Prowler v5.24.1)
### 🔒 Security
- Upgrade React to 19.2.5 and Next.js to 16.2.3 to mitigate CVE-2026-23869 (React2DoS), a high-severity unauthenticated remote DoS vulnerability in the React Flight Protocol's Server Function deserialization [(#10754)](https://github.com/prowler-cloud/prowler/pull/10754)
### 🐞 Fixed
- Findings and filter UX fixes: exclude muted findings by default in the resource detail drawer and finding group resource views, show category context label (for example `Status: FAIL`) on MultiSelect triggers instead of hiding the placeholder, and add a `wide` width option for filter dropdowns applied to the findings Scan filter to prevent label truncation [(#10734)](https://github.com/prowler-cloud/prowler/pull/10734)
- Findings grouped view now handles zero-resource IaC counters, refines drawer loading states, and adds provider indicators to finding groups [(#10736)](https://github.com/prowler-cloud/prowler/pull/10736)
---
## [1.24.0] (Prowler v5.24.0)
### 🚀 Added
@@ -115,4 +115,35 @@ describe("adaptFindingsByResourceResponse — malformed input", () => {
expect(result[0].id).toBe("finding-1");
expect(result[0].checkId).toBe("s3_check");
});
it("should normalize a single finding response into a one-item drawer array", () => {
// Given — getFindingById returns a single JSON:API resource object
const input = {
data: {
id: "finding-1",
attributes: {
uid: "uid-1",
check_id: "s3_check",
status: "FAIL",
severity: "critical",
check_metadata: {
checktitle: "S3 Check",
},
},
relationships: {
resources: { data: [] },
scan: { data: null },
},
},
included: [],
};
// When
const result = adaptFindingsByResourceResponse(input);
// Then
expect(result).toHaveLength(1);
expect(result[0].id).toBe("finding-1");
expect(result[0].checkTitle).toBe("S3 Check");
});
});
@@ -165,16 +165,18 @@ type IncludedDict = Record<string, IncludedItem>;
* then resolves each finding's resource and provider relationships.
*/
interface JsonApiResponse {
data: FindingApiItem[];
data: FindingApiItem | FindingApiItem[];
included?: Record<string, unknown>[];
}
function isJsonApiResponse(value: unknown): value is JsonApiResponse {
const data = (value as { data?: unknown })?.data;
return (
value !== null &&
typeof value === "object" &&
"data" in value &&
Array.isArray((value as { data: unknown }).data)
(Array.isArray(data) || (data !== null && typeof data === "object"))
);
}
@@ -188,8 +190,11 @@ export function adaptFindingsByResourceResponse(
const resourcesDict = createDict("resources", apiResponse) as IncludedDict;
const scansDict = createDict("scans", apiResponse) as IncludedDict;
const providersDict = createDict("providers", apiResponse) as IncludedDict;
const findings = Array.isArray(apiResponse.data)
? apiResponse.data
: [apiResponse.data];
return apiResponse.data.map((item) => {
return findings.map((item) => {
const attrs = item.attributes;
const meta = (attrs.check_metadata || {}) as Record<string, unknown>;
const remediationRaw = meta.remediation as
@@ -43,6 +43,7 @@ vi.mock("@/actions/finding-groups", () => ({
}));
import {
getLatestFindingsByResourceUid,
resolveFindingIdsByCheckIds,
resolveFindingIdsByVisibleGroupResources,
} from "./findings-by-resource";
@@ -262,3 +263,41 @@ describe("resolveFindingIdsByVisibleGroupResources", () => {
expect(fetchMock).not.toHaveBeenCalled();
});
});
describe("getLatestFindingsByResourceUid", () => {
beforeEach(() => {
vi.clearAllMocks();
vi.stubGlobal("fetch", fetchMock);
getAuthHeadersMock.mockResolvedValue({ Authorization: "Bearer token" });
handleApiResponseMock.mockResolvedValue({ data: [] });
});
it("should exclude muted findings by default and always apply severity/time sorting", async () => {
fetchMock.mockResolvedValue(new Response("", { status: 200 }));
await getLatestFindingsByResourceUid({
resourceUid: "resource-1",
});
const calledUrl = new URL(fetchMock.mock.calls[0][0]);
expect(calledUrl.pathname).toBe("/api/v1/findings/latest");
expect(calledUrl.searchParams.get("filter[resource_uid]")).toBe(
"resource-1",
);
expect(calledUrl.searchParams.get("filter[muted]")).toBe("false");
expect(calledUrl.searchParams.get("sort")).toBe("-severity,-updated_at");
});
it("should include muted findings only when explicitly requested", async () => {
fetchMock.mockResolvedValue(new Response("", { status: 200 }));
await getLatestFindingsByResourceUid({
resourceUid: "resource-1",
includeMuted: true,
});
const calledUrl = new URL(fetchMock.mock.calls[0][0]);
expect(calledUrl.searchParams.get("filter[muted]")).toBe("include");
expect(calledUrl.searchParams.get("sort")).toBe("-severity,-updated_at");
});
});
+3 -1
View File
@@ -250,10 +250,12 @@ export const getLatestFindingsByResourceUid = async ({
resourceUid,
page = 1,
pageSize = 50,
includeMuted = false,
}: {
resourceUid: string;
page?: number;
pageSize?: number;
includeMuted?: boolean;
}) => {
const headers = await getAuthHeaders({ contentType: false });
@@ -262,7 +264,7 @@ export const getLatestFindingsByResourceUid = async ({
);
url.searchParams.append("filter[resource_uid]", resourceUid);
url.searchParams.append("filter[muted]", "include");
url.searchParams.append("filter[muted]", includeMuted ? "include" : "false");
url.searchParams.append("sort", "-severity,-updated_at");
if (page) url.searchParams.append("page[number]", page.toString());
if (pageSize) url.searchParams.append("page[size]", pageSize.toString());
+9 -1
View File
@@ -141,7 +141,15 @@ export const getLatestMetadataInfo = async ({
}
};
export const getFindingById = async (findingId: string, include = "") => {
interface GetFindingByIdOptions {
source?: "resource-detail-drawer";
}
export const getFindingById = async (
findingId: string,
include = "",
_options?: GetFindingByIdOptions,
) => {
const headers = await getAuthHeaders({ contentType: false });
const url = new URL(`${apiBaseUrl}/findings/${findingId}`);
@@ -132,6 +132,7 @@ export const FindingsFilters = ({
key: FilterType.SCAN,
labelCheckboxGroup: "Scan ID",
values: completedScanIds,
width: "wide" as const,
valueLabelMapping: scanDetails,
labelFormatter: (value: string) =>
getFindingsFilterDisplayValue(`filter[${FilterType.SCAN}]`, value, {
@@ -78,6 +78,18 @@ vi.mock("./notification-indicator", () => ({
},
}));
vi.mock("@/components/shadcn/tooltip", () => ({
Tooltip: ({ children }: { children: ReactNode }) => <>{children}</>,
TooltipContent: ({ children }: { children: ReactNode }) => <>{children}</>,
TooltipTrigger: ({ children }: { children: ReactNode }) => <>{children}</>,
}));
vi.mock("./provider-icon-cell", () => ({
ProviderIconCell: ({ provider }: { provider: string }) => (
<span data-testid={`provider-icon-${provider}`}>{provider}</span>
),
}));
// ---------------------------------------------------------------------------
// Import after mocks
// ---------------------------------------------------------------------------
@@ -148,6 +160,26 @@ function renderFindingCell(
render(<div>{CellComponent({ row: { original: group } })}</div>);
}
function renderFindingGroupTitleCell(overrides?: Partial<FindingGroupRow>) {
const columns = getColumnFindingGroups({
rowSelection: {},
selectableRowCount: 1,
onDrillDown: vi.fn(),
});
const findingColumn = columns.find(
(col) => (col as { accessorKey?: string }).accessorKey === "finding",
);
if (!findingColumn?.cell) throw new Error("finding column not found");
const group = makeGroup(overrides);
const CellComponent = findingColumn.cell as (props: {
row: { original: FindingGroupRow };
}) => ReactNode;
render(<div>{CellComponent({ row: { original: group } })}</div>);
}
function renderImpactedResourcesCell(overrides?: Partial<FindingGroupRow>) {
const columns = getColumnFindingGroups({
rowSelection: {},
@@ -171,11 +203,13 @@ function renderImpactedResourcesCell(overrides?: Partial<FindingGroupRow>) {
}
function renderSelectCell(overrides?: Partial<FindingGroupRow>) {
const onDrillDown =
vi.fn<(checkId: string, group: FindingGroupRow) => void>();
const toggleSelected = vi.fn();
const columns = getColumnFindingGroups({
rowSelection: {},
selectableRowCount: 1,
onDrillDown: vi.fn(),
onDrillDown,
});
const selectColumn = columns.find(
@@ -206,7 +240,7 @@ function renderSelectCell(overrides?: Partial<FindingGroupRow>) {
</div>,
);
return { toggleSelected };
return { onDrillDown, toggleSelected };
}
// ---------------------------------------------------------------------------
@@ -231,6 +265,15 @@ describe("column-finding-groups — accessibility of check title cell", () => {
expect(impactedProvidersColumn).toBeUndefined();
});
it("should render the first provider icon with its provider name", () => {
// Given
renderFindingGroupTitleCell({ providers: ["iac"] });
// Then
expect(screen.getByTestId("provider-icon-iac")).toBeInTheDocument();
expect(screen.getByText("Infrastructure as Code")).toBeInTheDocument();
});
it("should render the check title as a button element (not a <p>)", () => {
// Given
const onDrillDown =
@@ -332,6 +375,47 @@ describe("column-finding-groups — accessibility of check title cell", () => {
}),
);
});
it("should keep zero-resource fallback groups non-clickable even when fallback counts are present", () => {
// Given
const onDrillDown =
vi.fn<(checkId: string, group: FindingGroupRow) => void>();
renderFindingCell("Fallback IaC Check", onDrillDown, {
resourcesTotal: 0,
resourcesFail: 0,
failCount: 0,
passCount: 2,
manualCount: 1,
});
// Then
expect(
screen.queryByRole("button", { name: "Fallback IaC Check" }),
).not.toBeInTheDocument();
expect(screen.getByText("Fallback IaC Check")).toBeInTheDocument();
expect(onDrillDown).not.toHaveBeenCalled();
});
it("should keep fallback groups non-clickable when the displayed total is zero", () => {
// Given
const onDrillDown =
vi.fn<(checkId: string, group: FindingGroupRow) => void>();
// When
renderFindingCell("No failing findings", onDrillDown, {
resourcesTotal: 0,
resourcesFail: 0,
failCount: 0,
passCount: 0,
});
// Then
expect(
screen.queryByRole("button", { name: "No failing findings" }),
).not.toBeInTheDocument();
expect(screen.getByText("No failing findings")).toBeInTheDocument();
});
});
describe("column-finding-groups — impacted resources count", () => {
@@ -345,6 +429,36 @@ describe("column-finding-groups — impacted resources count", () => {
// Then
expect(screen.getByText("3/5")).toBeInTheDocument();
});
it("should fall back to finding counts when resources total is zero", () => {
// Given/When
renderImpactedResourcesCell({
resourcesTotal: 0,
resourcesFail: 0,
failCount: 3,
passCount: 2,
muted: false,
});
// Then
expect(screen.getByText("3/5")).toBeInTheDocument();
});
it("should include muted findings in the denominator when the row is muted", () => {
// Given/When
renderImpactedResourcesCell({
resourcesTotal: 0,
resourcesFail: 0,
failCount: 3,
passCount: 2,
failMutedCount: 4,
passMutedCount: 1,
muted: true,
});
// Then
expect(screen.getByText("3/10")).toBeInTheDocument();
});
});
describe("column-finding-groups — group selection", () => {
@@ -357,6 +471,42 @@ describe("column-finding-groups — group selection", () => {
expect(screen.getByRole("checkbox", { name: "Select row" })).toBeDisabled();
});
it("should hide the chevron for zero-resource fallback groups even when fallback counts are present", () => {
// Given
const { onDrillDown } = renderSelectCell({
resourcesTotal: 0,
resourcesFail: 0,
failCount: 0,
passCount: 2,
manualCount: 1,
});
// Then
expect(
screen.queryByRole("button", {
name: "Expand S3 Bucket Public Access",
}),
).not.toBeInTheDocument();
expect(onDrillDown).not.toHaveBeenCalled();
});
it("should hide the chevron for zero-resource groups when the displayed total is zero", () => {
// Given/When
renderSelectCell({
resourcesTotal: 0,
resourcesFail: 0,
failCount: 0,
passCount: 0,
});
// Then
expect(
screen.queryByRole("button", {
name: "Expand S3 Bucket Public Access",
}),
).not.toBeInTheDocument();
});
});
describe("column-finding-groups — indicators", () => {
@@ -4,6 +4,11 @@ import { ColumnDef, RowSelectionState } from "@tanstack/react-table";
import { ChevronRight } from "lucide-react";
import { Checkbox } from "@/components/shadcn";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/shadcn/tooltip";
import {
DataTableColumnHeader,
SeverityBadge,
@@ -11,15 +16,19 @@ import {
} from "@/components/ui/table";
import { cn } from "@/lib";
import {
canDrillDownFindingGroup,
getFilteredFindingGroupDelta,
getFindingGroupImpactedCounts,
isFindingGroupMuted,
} from "@/lib/findings-groups";
import { FindingGroupRow } from "@/types";
import { getProviderDisplayName } from "@/types/providers";
import { DataTableRowActions } from "./data-table-row-actions";
import { canMuteFindingGroup } from "./finding-group-selection";
import { ImpactedResourcesCell } from "./impacted-resources-cell";
import { DeltaValues, NotificationIndicator } from "./notification-indicator";
import { NotificationIndicator } from "./notification-indicator";
import { ProviderIconCell } from "./provider-icon-cell";
interface GetColumnFindingGroupsOptions {
rowSelection: RowSelectionState;
@@ -83,14 +92,7 @@ export function getColumnFindingGroups({
const allMuted = isFindingGroupMuted(group);
const isExpanded = expandedCheckId === group.checkId;
const deltaKey = getFilteredFindingGroupDelta(group, filters);
const delta =
deltaKey === "new"
? DeltaValues.NEW
: deltaKey === "changed"
? DeltaValues.CHANGED
: DeltaValues.NONE;
const canExpand = group.resourcesTotal > 0;
const canExpand = canDrillDownFindingGroup(group);
const canSelect = canMuteFindingGroup({
resourcesFail: group.resourcesFail,
resourcesTotal: group.resourcesTotal,
@@ -101,7 +103,7 @@ export function getColumnFindingGroups({
return (
<div className="flex items-center gap-2">
<NotificationIndicator
delta={delta}
delta={deltaKey}
isMuted={allMuted}
showDeltaWhenMuted
/>
@@ -175,23 +177,43 @@ export function getColumnFindingGroups({
),
cell: ({ row }) => {
const group = row.original;
const canExpand = group.resourcesTotal > 0;
const canExpand = canDrillDownFindingGroup(group);
const provider = group.providers[0];
const providerName = provider
? getProviderDisplayName(provider)
: undefined;
return (
<div>
{canExpand ? (
<button
type="button"
className="text-text-neutral-primary hover:text-button-tertiary w-full cursor-pointer border-none bg-transparent p-0 text-left text-sm break-words whitespace-normal hover:underline"
onClick={() => onDrillDown(group.checkId, group)}
>
{group.checkTitle}
</button>
) : (
<span className="text-text-neutral-primary w-full text-left text-sm break-words whitespace-normal">
{group.checkTitle}
</span>
)}
<div className="flex items-center gap-2">
{provider && providerName ? (
<Tooltip>
<TooltipTrigger asChild>
<div className="shrink-0">
<ProviderIconCell
provider={provider}
size={20}
className="size-5 rounded-none bg-transparent"
/>
</div>
</TooltipTrigger>
<TooltipContent side="top">{providerName}</TooltipContent>
</Tooltip>
) : null}
<div>
{canExpand ? (
<button
type="button"
className="text-text-neutral-primary hover:text-button-tertiary w-full cursor-pointer border-none bg-transparent p-0 text-left text-sm break-words whitespace-normal hover:underline"
onClick={() => onDrillDown(group.checkId, group)}
>
{group.checkTitle}
</button>
) : (
<span className="text-text-neutral-primary w-full text-left text-sm break-words whitespace-normal">
{group.checkTitle}
</span>
)}
</div>
</div>
);
},
@@ -216,10 +238,11 @@ export function getColumnFindingGroups({
),
cell: ({ row }) => {
const group = row.original;
const counts = getFindingGroupImpactedCounts(group);
return (
<ImpactedResourcesCell
impacted={group.resourcesFail}
total={group.resourcesTotal}
impacted={counts.impacted}
total={counts.total}
/>
);
},
@@ -30,7 +30,6 @@ export function FindingDetailDrawer({
}: FindingDetailDrawerProps) {
const drawer = useResourceDetailDrawer({
resources: [findingToFindingResourceRow(finding)],
checkId: finding.attributes.check_id,
totalResourceCount: 1,
initialIndex: defaultOpen || inline ? 0 : null,
});
@@ -63,6 +62,7 @@ export function FindingDetailDrawer({
checkMeta={drawer.checkMeta}
currentIndex={drawer.currentIndex}
totalResources={drawer.totalResources}
currentResource={drawer.currentResource}
currentFinding={drawer.currentFinding}
otherFindings={drawer.otherFindings}
onNavigatePrev={drawer.navigatePrev}
@@ -87,6 +87,7 @@ export function FindingDetailDrawer({
checkMeta={drawer.checkMeta}
currentIndex={drawer.currentIndex}
totalResources={drawer.totalResources}
currentResource={drawer.currentResource}
currentFinding={drawer.currentFinding}
otherFindings={drawer.otherFindings}
onNavigatePrev={drawer.navigatePrev}
@@ -22,6 +22,7 @@ import { useFindingGroupResourceState } from "@/hooks/use-finding-group-resource
import { cn, hasHistoricalFindingFilter } from "@/lib";
import {
getFilteredFindingGroupDelta,
getFindingGroupImpactedCounts,
isFindingGroupMuted,
} from "@/lib/findings-groups";
import { FindingGroupRow } from "@/types";
@@ -30,7 +31,8 @@ import { FloatingMuteButton } from "../floating-mute-button";
import { getColumnFindingResources } from "./column-finding-resources";
import { FindingsSelectionContext } from "./findings-selection-context";
import { ImpactedResourcesCell } from "./impacted-resources-cell";
import { DeltaValues, NotificationIndicator } from "./notification-indicator";
import { getFindingGroupEmptyStateMessage } from "./inline-resource-container.utils";
import { NotificationIndicator } from "./notification-indicator";
import { ResourceDetailDrawer } from "./resource-detail-drawer";
interface FindingsGroupDrillDownProps {
@@ -96,14 +98,8 @@ export function FindingsGroupDrillDown({
// Delta for the sticky header
const deltaKey = getFilteredFindingGroupDelta(group, filters);
const delta =
deltaKey === "new"
? DeltaValues.NEW
: deltaKey === "changed"
? DeltaValues.CHANGED
: DeltaValues.NONE;
const allMuted = isFindingGroupMuted(group);
const impactedCounts = getFindingGroupImpactedCounts(group);
const rows = table.getRowModel().rows;
@@ -139,7 +135,7 @@ export function FindingsGroupDrillDown({
{/* Notification indicator */}
<NotificationIndicator
delta={delta}
delta={deltaKey}
isMuted={allMuted}
showDeltaWhenMuted
/>
@@ -159,8 +155,8 @@ export function FindingsGroupDrillDown({
{/* Impacted resources count */}
<ImpactedResourcesCell
impacted={group.resourcesFail}
total={group.resourcesTotal}
impacted={impactedCounts.impacted}
total={impactedCounts.total}
/>
</div>
</div>
@@ -209,9 +205,7 @@ export function FindingsGroupDrillDown({
colSpan={columns.length}
className="h-24 text-center"
>
{Object.keys(filters).length > 0
? "No resources found for the selected filters."
: "No resources found."}
{getFindingGroupEmptyStateMessage(group, filters)}
</TableCell>
</TableRow>
) : null}
@@ -248,8 +242,10 @@ export function FindingsGroupDrillDown({
checkMeta={drawer.checkMeta}
currentIndex={drawer.currentIndex}
totalResources={drawer.totalResources}
currentResource={drawer.currentResource}
currentFinding={drawer.currentFinding}
otherFindings={drawer.otherFindings}
showSyntheticResourceHint={group.resourcesTotal === 0}
onNavigatePrev={drawer.navigatePrev}
onNavigateNext={drawer.navigateNext}
onMuteComplete={handleDrawerMuteComplete}
@@ -6,6 +6,7 @@ import { useRef, useState } from "react";
import { resolveFindingIdsByVisibleGroupResources } from "@/actions/findings/findings-by-resource";
import { DataTable } from "@/components/ui/table";
import { canDrillDownFindingGroup } from "@/lib/findings-groups";
import { FindingGroupRow, MetaDataProps } from "@/types";
import { FloatingMuteButton } from "../floating-mute-button";
@@ -140,7 +141,7 @@ export function FindingsGroupTable({
const handleDrillDown = (checkId: string, group: FindingGroupRow) => {
// No resources in the group → nothing to show, skip drill-down
if (group.resourcesTotal === 0) return;
if (!canDrillDownFindingGroup(group)) return;
// Toggle: same group = collapse, different = switch
if (expandedCheckId === checkId) {
@@ -20,6 +20,7 @@ import { getColumnFindingResources } from "./column-finding-resources";
import { FindingsSelectionContext } from "./findings-selection-context";
import {
getFilteredFindingGroupResourceCount,
getFindingGroupEmptyStateMessage,
getFindingGroupSkeletonCount,
} from "./inline-resource-container.utils";
import { ResourceDetailDrawer } from "./resource-detail-drawer";
@@ -278,9 +279,7 @@ export function InlineResourceContainer({
colSpan={columns.length}
className="h-24 text-center"
>
{Object.keys(filters).length > 0
? "No resources found for the selected filters."
: "No resources found."}
{getFindingGroupEmptyStateMessage(group, filters)}
</TableCell>
</TableRow>
)}
@@ -334,8 +333,10 @@ export function InlineResourceContainer({
checkMeta={drawer.checkMeta}
currentIndex={drawer.currentIndex}
totalResources={drawer.totalResources}
currentResource={drawer.currentResource}
currentFinding={drawer.currentFinding}
otherFindings={drawer.otherFindings}
showSyntheticResourceHint={group.resourcesTotal === 0}
onNavigatePrev={drawer.navigatePrev}
onNavigateNext={drawer.navigateNext}
onMuteComplete={handleDrawerMuteComplete}
@@ -4,6 +4,7 @@ import type { FindingGroupRow } from "@/types";
import {
getFilteredFindingGroupResourceCount,
getFindingGroupEmptyStateMessage,
getFindingGroupSkeletonCount,
isFailOnlyStatusFilter,
} from "./inline-resource-container.utils";
@@ -99,3 +100,47 @@ describe("getFindingGroupSkeletonCount", () => {
).toBe(1);
});
});
describe("getFindingGroupEmptyStateMessage", () => {
it("returns the muted hint when muted findings are excluded and no visible resources remain", () => {
expect(
getFindingGroupEmptyStateMessage(
makeGroup({
resourcesTotal: 0,
resourcesFail: 0,
mutedCount: 1,
failCount: 0,
passCount: 0,
}),
{
"filter[status]": "FAIL",
"filter[muted]": "false",
},
),
).toBe(
"No resources match the current filters. Try enabling Include muted to view muted findings.",
);
});
it("keeps the generic filtered empty state when muted findings are already included", () => {
expect(
getFindingGroupEmptyStateMessage(
makeGroup({
resourcesTotal: 0,
resourcesFail: 0,
mutedCount: 1,
}),
{
"filter[status]": "FAIL",
"filter[muted]": "include",
},
),
).toBe("No resources found for the selected filters.");
});
it("keeps the generic empty state when no filters are active", () => {
expect(getFindingGroupEmptyStateMessage(makeGroup(), {})).toBe(
"No resources found.",
);
});
});
@@ -33,6 +33,18 @@ export function isFailOnlyStatusFilter(
return multiStatusValues.length === 1 && multiStatusValues[0] === "FAIL";
}
function includesMutedFindings(
filters: Record<string, string | string[] | undefined>,
): boolean {
const mutedFilter = filters["filter[muted]"];
if (Array.isArray(mutedFilter)) {
return mutedFilter.includes("include");
}
return mutedFilter === "include";
}
export function getFilteredFindingGroupResourceCount(
group: FindingGroupRow,
filters: Record<string, string | string[] | undefined>,
@@ -53,3 +65,24 @@ export function getFindingGroupSkeletonCount(
// empty state ("No resources found") replaces the skeleton.
return Math.max(1, Math.min(filteredTotal, maxSkeletonRows));
}
export function getFindingGroupEmptyStateMessage(
group: FindingGroupRow,
filters: Record<string, string | string[] | undefined>,
): string {
const hasFilters = Object.keys(filters).length > 0;
if (!hasFilters) {
return "No resources found.";
}
const mutedExcluded = !includesMutedFindings(filters);
const hasMutedFindings = (group.mutedCount ?? 0) > 0;
const visibleCount = getFilteredFindingGroupResourceCount(group, filters);
if (mutedExcluded && hasMutedFindings && visibleCount === 0) {
return "No resources match the current filters. Try enabling Include muted to view muted findings.";
}
return "No resources found for the selected filters.";
}
@@ -17,14 +17,11 @@ import {
} from "@/components/shadcn/tooltip";
import { DOCS_URLS } from "@/lib/external-urls";
import { cn } from "@/lib/utils";
import { FINDING_DELTA, type FindingDelta } from "@/types";
export const DeltaValues = {
NEW: "new",
CHANGED: "changed",
NONE: "none",
} as const;
export const DeltaValues = FINDING_DELTA;
export type DeltaType = (typeof DeltaValues)[keyof typeof DeltaValues];
export type DeltaType = Exclude<FindingDelta, null>;
interface NotificationIndicatorProps {
delta?: DeltaType;
@@ -124,12 +121,12 @@ function MutedIndicator({ mutedReason }: { mutedReason?: string }) {
<PopoverTrigger asChild>
<button
type="button"
className="flex w-4 shrink-0 cursor-pointer items-center justify-center bg-transparent p-0"
className="flex w-5 shrink-0 cursor-pointer items-center justify-center bg-transparent p-0"
onClick={(e) => e.stopPropagation()}
onMouseEnter={() => setOpen(true)}
onMouseLeave={() => setOpen(false)}
>
<MutedIcon className="text-bg-data-muted size-2" />
<MutedIcon className="text-bg-data-muted size-3" />
</button>
</PopoverTrigger>
<PopoverContent
@@ -134,7 +134,12 @@ vi.mock("@/components/shadcn/dropdown", () => ({
}));
vi.mock("@/components/shadcn/skeleton/skeleton", () => ({
Skeleton: () => <div />,
Skeleton: ({
className,
...props
}: HTMLAttributes<HTMLDivElement> & { className?: string }) => (
<div data-testid="inline-skeleton" className={className} {...props} />
),
}));
vi.mock("@/components/shadcn/spinner/spinner", () => ({
@@ -309,6 +314,7 @@ vi.mock("../../muted", () => ({
// ---------------------------------------------------------------------------
import type { ResourceDrawerFinding } from "@/actions/findings";
import type { FindingResourceRow } from "@/types";
import { ResourceDetailDrawerContent } from "./resource-detail-drawer-content";
import type { CheckMeta } from "./use-resource-detail-drawer";
@@ -374,6 +380,29 @@ const mockFinding: ResourceDrawerFinding = {
scan: null,
};
const mockResourceRow: FindingResourceRow = {
id: "row-1",
rowType: "resource",
findingId: "finding-1",
checkId: "s3_check",
providerType: "aws",
providerAlias: "prod",
providerUid: "123456789",
resourceName: "my-bucket",
resourceType: "Bucket",
resourceGroup: "default",
resourceUid: "arn:aws:s3:::bucket",
service: "s3",
region: "us-east-1",
severity: "critical",
status: "FAIL",
delta: null,
isMuted: false,
mutedReason: undefined,
firstSeenAt: null,
lastSeenAt: null,
};
// ---------------------------------------------------------------------------
// Fix 1: Lighthouse AI button text change
// ---------------------------------------------------------------------------
@@ -937,3 +966,385 @@ describe("ResourceDetailDrawerContent — other findings mute refresh", () => {
expect(onMuteComplete).not.toHaveBeenCalled();
});
});
describe("ResourceDetailDrawerContent — synthetic resource empty state", () => {
it("should explain that simulated IaC resources never have other findings", () => {
// Given/When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating={false}
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={1}
currentFinding={mockFinding}
otherFindings={[]}
showSyntheticResourceHint
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(
screen.getByText(
"No other findings are available for this IaC resource.",
),
).toBeInTheDocument();
});
});
describe("ResourceDetailDrawerContent — current resource row display", () => {
it("should render resource card fields from the current resource row instead of the fetched finding", () => {
// Given
const currentResource: FindingResourceRow = {
...mockResourceRow,
providerAlias: "row-account",
providerUid: "row-provider-uid",
resourceName: "row-resource-name",
resourceUid: "row-resource-uid",
service: "row-service",
region: "eu-west-1",
resourceType: "row-type",
resourceGroup: "row-group",
severity: "low",
status: "PASS",
};
const fetchedFinding: ResourceDrawerFinding = {
...mockFinding,
providerAlias: "finding-account",
providerUid: "finding-provider-uid",
resourceName: "finding-resource-name",
resourceUid: "finding-resource-uid",
resourceService: "finding-service",
resourceRegion: "ap-south-1",
resourceType: "finding-type",
resourceGroup: "finding-group",
severity: "critical",
status: "FAIL",
};
// When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating={false}
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={1}
currentResource={currentResource}
currentFinding={fetchedFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByText("row-service")).toBeInTheDocument();
expect(screen.getByText("eu-west-1")).toBeInTheDocument();
expect(screen.getByText("row-group")).toBeInTheDocument();
expect(screen.getByText("row-type")).toBeInTheDocument();
expect(screen.getByText("FAIL")).toBeInTheDocument();
expect(screen.getByText("critical")).toBeInTheDocument();
expect(screen.queryByText("finding-service")).not.toBeInTheDocument();
expect(screen.queryByText("ap-south-1")).not.toBeInTheDocument();
expect(screen.queryByText("finding-group")).not.toBeInTheDocument();
expect(screen.queryByText("finding-type")).not.toBeInTheDocument();
});
it("should prefer the fetched finding status and severity in the header when the current row is stale", () => {
// Given
const currentResource: FindingResourceRow = {
...mockResourceRow,
severity: "critical",
status: "FAIL",
isMuted: false,
};
const fetchedFinding: ResourceDrawerFinding = {
...mockFinding,
severity: "low",
status: "PASS",
isMuted: true,
mutedReason: "Muted after refresh",
};
// When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating={false}
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={1}
currentResource={currentResource}
currentFinding={fetchedFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByText("PASS")).toBeInTheDocument();
expect(screen.getByText("low")).toBeInTheDocument();
expect(screen.queryByText("FAIL")).not.toBeInTheDocument();
expect(screen.queryByText("critical")).not.toBeInTheDocument();
});
});
describe("ResourceDetailDrawerContent — header skeleton while navigating", () => {
it("should keep row-backed navigation chrome visible while hiding stale finding details during carousel navigation", () => {
// Given
const currentResource: FindingResourceRow = {
...mockResourceRow,
checkId: mockCheckMeta.checkId,
resourceName: "next-bucket",
resourceUid: "next-resource-uid",
service: "ec2",
region: "eu-west-1",
resourceType: "Instance",
resourceGroup: "row-group",
severity: "low",
status: "PASS",
findingId: "finding-2",
};
// When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={currentResource}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByText("PASS")).toBeInTheDocument();
expect(screen.getByText("low")).toBeInTheDocument();
expect(screen.getByText("ec2")).toBeInTheDocument();
expect(screen.getByText("eu-west-1")).toBeInTheDocument();
expect(screen.getByText("row-group")).toBeInTheDocument();
expect(
screen.getByRole("button", { name: "Finding Overview" }),
).toBeInTheDocument();
expect(
screen.getByRole("button", { name: "Other Findings For This Resource" }),
).toBeInTheDocument();
expect(screen.queryByText("uid-1")).not.toBeInTheDocument();
expect(screen.queryByText("Status extended")).not.toBeInTheDocument();
expect(screen.queryByText("FAIL")).not.toBeInTheDocument();
expect(screen.queryByText("critical")).not.toBeInTheDocument();
});
it("should skeletonize stale check-level header content when navigating to a different check", () => {
// Given
const currentResource: FindingResourceRow = {
...mockResourceRow,
checkId: "ec2_check",
findingId: "finding-2",
severity: "low",
status: "PASS",
};
// When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={currentResource}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByTestId("drawer-header-skeleton")).toBeInTheDocument();
expect(screen.queryByText("S3 Check")).not.toBeInTheDocument();
expect(screen.queryByText("PCI-DSS")).not.toBeInTheDocument();
expect(screen.getByText("PASS")).toBeInTheDocument();
expect(screen.getByText("low")).toBeInTheDocument();
});
it("should keep same-check overview sections visible while hiding stale finding-specific details during navigation", () => {
// Given/When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={mockResourceRow}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByText("Risk:")).toBeInTheDocument();
expect(screen.getByText("Description:")).toBeInTheDocument();
expect(screen.getByText("Remediation:")).toBeInTheDocument();
expect(screen.getByText("security")).toBeInTheDocument();
expect(screen.queryByText("Status Extended:")).not.toBeInTheDocument();
expect(screen.queryByText("uid-1")).not.toBeInTheDocument();
expect(
screen.queryByRole("link", {
name: "Analyze This Finding With Lighthouse AI",
}),
).not.toBeInTheDocument();
});
it("should keep the overview tab shell visible with section skeletons when navigating to a different check", () => {
// Given
const currentResource: FindingResourceRow = {
...mockResourceRow,
checkId: "ec2_check",
findingId: "finding-2",
severity: "low",
status: "PASS",
};
// When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={currentResource}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(
screen.getByTestId("overview-navigation-skeleton"),
).toBeInTheDocument();
expect(screen.queryByText("Risk:")).not.toBeInTheDocument();
expect(screen.queryByText("Description:")).not.toBeInTheDocument();
expect(screen.queryByText("Remediation:")).not.toBeInTheDocument();
expect(
screen.getByRole("button", { name: "Finding Overview" }),
).toBeInTheDocument();
expect(
screen.getByRole("button", { name: "Other Findings For This Resource" }),
).toBeInTheDocument();
});
it("should keep other findings table headers visible while skeletonizing only the rows during navigation", () => {
// Given/When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={mockResourceRow}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByText("Status")).toBeInTheDocument();
expect(screen.getByText("Finding")).toBeInTheDocument();
expect(screen.getByText("Severity")).toBeInTheDocument();
expect(screen.getByText("Time")).toBeInTheDocument();
expect(
screen.getByTestId("other-findings-total-entries-skeleton"),
).toBeInTheDocument();
expect(
screen.getByTestId("other-findings-navigation-skeleton"),
).toBeInTheDocument();
});
it("should keep scans labels visible while skeletonizing only the scan values during navigation", () => {
// Given/When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={mockResourceRow}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(
screen.getByText("Showing the latest scan that evaluated this finding"),
).toBeInTheDocument();
expect(screen.getByText("Scan Name")).toBeInTheDocument();
expect(screen.getByText("Resources Scanned")).toBeInTheDocument();
expect(screen.getByText("Progress")).toBeInTheDocument();
expect(screen.getByText("Trigger")).toBeInTheDocument();
expect(screen.getByText("State")).toBeInTheDocument();
expect(screen.getByText("Duration")).toBeInTheDocument();
expect(screen.getByText("Started At")).toBeInTheDocument();
expect(screen.getByText("Completed At")).toBeInTheDocument();
expect(screen.getByText("Launched At")).toBeInTheDocument();
expect(screen.getByText("Scheduled At")).toBeInTheDocument();
expect(screen.getByTestId("scans-navigation-skeleton")).toBeInTheDocument();
});
it("should keep the events tab shell visible while showing timeline row skeletons during navigation", () => {
// Given/When
render(
<ResourceDetailDrawerContent
isLoading={false}
isNavigating
checkMeta={mockCheckMeta}
currentIndex={0}
totalResources={2}
currentResource={mockResourceRow}
currentFinding={mockFinding}
otherFindings={[]}
onNavigatePrev={vi.fn()}
onNavigateNext={vi.fn()}
onMuteComplete={vi.fn()}
/>,
);
// Then
expect(screen.getByRole("button", { name: "Events" })).toBeInTheDocument();
expect(
screen.getByTestId("events-navigation-skeleton"),
).toBeInTheDocument();
});
});
File diff suppressed because it is too large Load Diff
@@ -11,6 +11,7 @@ import {
DrawerHeader,
DrawerTitle,
} from "@/components/shadcn";
import type { FindingResourceRow } from "@/types";
import { ResourceDetailDrawerContent } from "./resource-detail-drawer-content";
import type { CheckMeta } from "./use-resource-detail-drawer";
@@ -23,8 +24,10 @@ interface ResourceDetailDrawerProps {
checkMeta: CheckMeta | null;
currentIndex: number;
totalResources: number;
currentResource: FindingResourceRow | null;
currentFinding: ResourceDrawerFinding | null;
otherFindings: ResourceDrawerFinding[];
showSyntheticResourceHint?: boolean;
onNavigatePrev: () => void;
onNavigateNext: () => void;
onMuteComplete: () => void;
@@ -38,8 +41,10 @@ export function ResourceDetailDrawer({
checkMeta,
currentIndex,
totalResources,
currentResource,
currentFinding,
otherFindings,
showSyntheticResourceHint = false,
onNavigatePrev,
onNavigateNext,
onMuteComplete,
@@ -64,8 +69,10 @@ export function ResourceDetailDrawer({
checkMeta={checkMeta}
currentIndex={currentIndex}
totalResources={totalResources}
currentResource={currentResource}
currentFinding={currentFinding}
otherFindings={otherFindings}
showSyntheticResourceHint={showSyntheticResourceHint}
onNavigatePrev={onNavigatePrev}
onNavigateNext={onNavigateNext}
onMuteComplete={onMuteComplete}
@@ -6,14 +6,17 @@ import { beforeEach, describe, expect, it, vi } from "vitest";
// ---------------------------------------------------------------------------
const {
getFindingByIdMock,
getLatestFindingsByResourceUidMock,
adaptFindingsByResourceResponseMock,
} = vi.hoisted(() => ({
getFindingByIdMock: vi.fn(),
getLatestFindingsByResourceUidMock: vi.fn(),
adaptFindingsByResourceResponseMock: vi.fn(),
}));
vi.mock("@/actions/findings", () => ({
getFindingById: getFindingByIdMock,
getLatestFindingsByResourceUid: getLatestFindingsByResourceUidMock,
adaptFindingsByResourceResponse: adaptFindingsByResourceResponseMock,
}));
@@ -109,6 +112,7 @@ describe("useResourceDetailDrawer — unmount cleanup", () => {
beforeEach(() => {
vi.clearAllMocks();
vi.restoreAllMocks();
getLatestFindingsByResourceUidMock.mockResolvedValue({ data: [] });
});
it("should abort the in-flight fetch controller when the hook unmounts", async () => {
@@ -116,9 +120,7 @@ describe("useResourceDetailDrawer — unmount cleanup", () => {
const abortSpy = vi.spyOn(AbortController.prototype, "abort");
// never-resolving fetch to simulate in-flight request
getLatestFindingsByResourceUidMock.mockImplementation(
() => new Promise(() => {}),
);
getFindingByIdMock.mockImplementation(() => new Promise(() => {}));
adaptFindingsByResourceResponseMock.mockReturnValue([]);
const resources = [makeResource()];
@@ -126,7 +128,6 @@ describe("useResourceDetailDrawer — unmount cleanup", () => {
const { result, unmount } = renderHook(() =>
useResourceDetailDrawer({
resources,
checkId: "s3_check",
}),
);
@@ -136,7 +137,7 @@ describe("useResourceDetailDrawer — unmount cleanup", () => {
});
// Verify a fetch was started
expect(getLatestFindingsByResourceUidMock).toHaveBeenCalledTimes(1);
expect(getFindingByIdMock).toHaveBeenCalledTimes(1);
// Reset spy count to detect only the unmount abort
abortSpy.mockClear();
@@ -158,7 +159,6 @@ describe("useResourceDetailDrawer — unmount cleanup", () => {
const { unmount } = renderHook(() =>
useResourceDetailDrawer({
resources,
checkId: "s3_check",
}),
);
@@ -173,40 +173,63 @@ describe("useResourceDetailDrawer — unmount cleanup", () => {
describe("useResourceDetailDrawer — other findings filtering", () => {
beforeEach(() => {
vi.clearAllMocks();
getLatestFindingsByResourceUidMock.mockResolvedValue({ data: [] });
});
it("should exclude the current finding from otherFindings and preserve API order", async () => {
it("should load other findings from the current resource uid and exclude the current finding", async () => {
const resources = [makeResource()];
getLatestFindingsByResourceUidMock.mockResolvedValue({ data: [] });
adaptFindingsByResourceResponseMock.mockReturnValue([
makeDrawerFinding({
id: "current",
checkId: "s3_check",
checkTitle: "Current",
status: "FAIL",
severity: "critical",
}),
makeDrawerFinding({
id: "other-1",
checkId: "check-other-1",
checkTitle: "Other 1",
status: "FAIL",
severity: "critical",
}),
makeDrawerFinding({
id: "other-2",
checkId: "check-other-2",
checkTitle: "Other 2",
status: "FAIL",
severity: "medium",
}),
]);
// Given
getFindingByIdMock.mockResolvedValue({ data: ["detail"] });
getLatestFindingsByResourceUidMock.mockResolvedValue({
data: ["resource"],
});
adaptFindingsByResourceResponseMock.mockImplementation(
(response: { data: string[] }) => {
if (response.data[0] === "detail") {
return [
makeDrawerFinding({
id: "finding-1",
checkId: "s3_check",
checkTitle: "Current",
status: "MANUAL",
severity: "informational",
}),
];
}
return [
makeDrawerFinding({
id: "finding-3",
checkTitle: "First other finding",
status: "FAIL",
severity: "high",
}),
makeDrawerFinding({
id: "finding-1",
checkTitle: "Current finding duplicate from resource fetch",
status: "FAIL",
severity: "critical",
}),
makeDrawerFinding({
id: "finding-4",
checkTitle: "Manual finding should be filtered out",
status: "MANUAL",
severity: "low",
}),
makeDrawerFinding({
id: "finding-5",
checkTitle: "Second other finding",
status: "FAIL",
severity: "medium",
}),
];
},
);
const { result } = renderHook(() =>
useResourceDetailDrawer({
resources,
checkId: "s3_check",
}),
);
@@ -215,47 +238,77 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
await Promise.resolve();
});
// Then
expect(getFindingByIdMock).toHaveBeenCalledWith(
"finding-1",
"resources,scan.provider",
{ source: "resource-detail-drawer" },
);
expect(getLatestFindingsByResourceUidMock).toHaveBeenCalledWith({
resourceUid: "arn:aws:s3:::my-bucket",
pageSize: 50,
includeMuted: false,
});
expect(result.current.currentFinding?.id).toBe("finding-1");
expect(result.current.otherFindings.map((finding) => finding.id)).toEqual([
"other-1",
"other-2",
"finding-3",
"finding-5",
]);
});
it("should exclude non-FAIL findings from otherFindings", async () => {
const resources = [makeResource()];
it("should skip loading other findings for synthetic IaC resources and keep the current detail on findingId", async () => {
const resources = [
makeResource({
findingId: "synthetic-finding",
resourceUid: "synthetic://iac-resource",
}),
];
getLatestFindingsByResourceUidMock.mockResolvedValue({ data: [] });
// Given
getFindingByIdMock.mockResolvedValue({ data: ["detail"] });
adaptFindingsByResourceResponseMock.mockReturnValue([
makeDrawerFinding({
id: "current",
id: "synthetic-finding",
checkId: "s3_check",
status: "MANUAL",
severity: "informational",
}),
makeDrawerFinding({
id: "other-pass",
checkId: "check-pass",
status: "PASS",
severity: "low",
}),
makeDrawerFinding({
id: "other-manual",
checkId: "check-manual",
status: "MANUAL",
severity: "low",
}),
makeDrawerFinding({
id: "other-fail",
checkId: "check-fail",
status: "FAIL",
severity: "high",
}),
]);
const { result } = renderHook(() =>
useResourceDetailDrawer({
resources,
checkId: "s3_check",
canLoadOtherFindings: false,
}),
);
await act(async () => {
// When
result.current.openDrawer(0);
await Promise.resolve();
});
// Then
expect(getFindingByIdMock).toHaveBeenCalledWith(
"synthetic-finding",
"resources,scan.provider",
{ source: "resource-detail-drawer" },
);
expect(getLatestFindingsByResourceUidMock).not.toHaveBeenCalled();
expect(result.current.currentFinding?.id).toBe("synthetic-finding");
expect(result.current.otherFindings).toEqual([]);
});
it("should request muted findings only when explicitly enabled", async () => {
const resources = [makeResource()];
getLatestFindingsByResourceUidMock.mockResolvedValue({ data: [] });
adaptFindingsByResourceResponseMock.mockReturnValue([makeDrawerFinding()]);
const { result } = renderHook(() =>
useResourceDetailDrawer({
resources,
includeMutedInOtherFindings: true,
}),
);
@@ -264,10 +317,11 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
await Promise.resolve();
});
expect(result.current.currentFinding?.id).toBe("current");
expect(result.current.otherFindings.map((f) => f.id)).toEqual([
"other-fail",
]);
expect(getLatestFindingsByResourceUidMock).toHaveBeenCalledWith({
resourceUid: "arn:aws:s3:::my-bucket",
pageSize: 50,
includeMuted: true,
});
});
it("should keep isNavigating true for a cached resource long enough to render skeletons", async () => {
@@ -288,19 +342,19 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
}),
];
getLatestFindingsByResourceUidMock.mockImplementation(
async ({ resourceUid }: { resourceUid: string }) => ({
data: [resourceUid],
}),
);
getFindingByIdMock.mockImplementation(async (findingId: string) => ({
data: [findingId],
}));
adaptFindingsByResourceResponseMock.mockImplementation(
(response: { data: string[] }) => [
makeDrawerFinding({
id: response.data[0].includes("first") ? "finding-1" : "finding-2",
resourceUid: response.data[0],
resourceName: response.data[0].includes("first")
? "first-bucket"
: "second-bucket",
id: response.data[0],
resourceUid:
response.data[0] === "finding-1"
? "arn:aws:s3:::first-bucket"
: "arn:aws:s3:::second-bucket",
resourceName:
response.data[0] === "finding-1" ? "first-bucket" : "second-bucket",
}),
],
);
@@ -308,7 +362,6 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
const { result } = renderHook(() =>
useResourceDetailDrawer({
resources,
checkId: "s3_check",
}),
);
@@ -333,6 +386,8 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
expect(result.current.isNavigating).toBe(true);
await act(async () => {
await Promise.resolve();
await Promise.resolve();
vi.runAllTimers();
await Promise.resolve();
});
@@ -362,19 +417,19 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
}),
];
getLatestFindingsByResourceUidMock.mockImplementation(
async ({ resourceUid }: { resourceUid: string }) => ({
data: [resourceUid],
}),
);
getFindingByIdMock.mockImplementation(async (findingId: string) => ({
data: [findingId],
}));
adaptFindingsByResourceResponseMock.mockImplementation(
(response: { data: string[] }) => [
makeDrawerFinding({
id: response.data[0].includes("first") ? "finding-1" : "finding-2",
resourceUid: response.data[0],
resourceName: response.data[0].includes("first")
? "first-bucket"
: "second-bucket",
id: response.data[0],
resourceUid:
response.data[0] === "finding-1"
? "arn:aws:s3:::first-bucket"
: "arn:aws:s3:::second-bucket",
resourceName:
response.data[0] === "finding-1" ? "first-bucket" : "second-bucket",
}),
],
);
@@ -382,7 +437,6 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
const { result } = renderHook(() =>
useResourceDetailDrawer({
resources,
checkId: "s3_check",
}),
);
@@ -427,6 +481,154 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
vi.useRealTimers();
});
it("should update checkMeta when navigating to a resource with a different check", async () => {
// Given
const resources = [
makeResource({
id: "row-1",
findingId: "finding-1",
checkId: "s3_check",
}),
makeResource({
id: "row-2",
findingId: "finding-2",
checkId: "ec2_check",
resourceUid: "arn:aws:ec2:::instance/i-123",
resourceName: "instance-1",
service: "ec2",
}),
];
getFindingByIdMock.mockImplementation(async (findingId: string) => ({
data: [findingId],
}));
getLatestFindingsByResourceUidMock.mockResolvedValue({ data: [] });
adaptFindingsByResourceResponseMock.mockImplementation(
(response: { data: string[] }) => [
response.data[0] === "finding-1"
? makeDrawerFinding({
id: "finding-1",
checkId: "s3_check",
checkTitle: "S3 Check",
description: "s3 description",
})
: makeDrawerFinding({
id: "finding-2",
checkId: "ec2_check",
checkTitle: "EC2 Check",
description: "ec2 description",
}),
],
);
const { result } = renderHook(() =>
useResourceDetailDrawer({
resources,
}),
);
// When
await act(async () => {
result.current.openDrawer(0);
await Promise.resolve();
});
expect(result.current.checkMeta?.checkTitle).toBe("S3 Check");
await act(async () => {
result.current.navigateNext();
await Promise.resolve();
});
// Then
expect(result.current.checkMeta?.checkTitle).toBe("EC2 Check");
expect(result.current.checkMeta?.description).toBe("ec2 description");
});
it("should keep the previous check metadata cached while reopening until the new finding arrives", async () => {
// Given
const resources = [
makeResource({
id: "row-1",
findingId: "finding-1",
checkId: "s3_check",
}),
makeResource({
id: "row-2",
findingId: "finding-2",
checkId: "ec2_check",
resourceUid: "arn:aws:ec2:::instance/i-123",
resourceName: "instance-1",
service: "ec2",
}),
];
let resolveSecondFinding: ((value: { data: string[] }) => void) | null =
null;
getFindingByIdMock.mockImplementation((findingId: string) => {
if (findingId === "finding-2") {
return new Promise((resolve) => {
resolveSecondFinding = resolve;
});
}
return Promise.resolve({ data: [findingId] });
});
getLatestFindingsByResourceUidMock.mockResolvedValue({ data: [] });
adaptFindingsByResourceResponseMock.mockImplementation(
(response: { data: string[] }) => [
response.data[0] === "finding-1"
? makeDrawerFinding({
id: "finding-1",
checkId: "s3_check",
checkTitle: "S3 Check",
description: "s3 description",
})
: makeDrawerFinding({
id: "finding-2",
checkId: "ec2_check",
checkTitle: "EC2 Check",
description: "ec2 description",
}),
],
);
const { result } = renderHook(() =>
useResourceDetailDrawer({
resources,
}),
);
await act(async () => {
result.current.openDrawer(0);
await Promise.resolve();
});
expect(result.current.checkMeta?.checkTitle).toBe("S3 Check");
// When
act(() => {
result.current.closeDrawer();
result.current.openDrawer(1);
});
// Then
expect(result.current.isOpen).toBe(true);
expect(result.current.currentIndex).toBe(1);
expect(result.current.currentFinding).toBeNull();
expect(result.current.checkMeta?.checkTitle).toBe("S3 Check");
await act(async () => {
resolveSecondFinding?.({ data: ["finding-2"] });
await Promise.resolve();
await Promise.resolve();
});
expect(result.current.checkMeta?.checkTitle).toBe("EC2 Check");
expect(result.current.checkMeta?.description).toBe("ec2 description");
});
it("should clear the previous resource findings when navigation to the next resource fails", async () => {
// Given
const resources = [
@@ -444,24 +646,24 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
}),
];
getLatestFindingsByResourceUidMock.mockImplementation(
async ({ resourceUid }: { resourceUid: string }) => {
if (resourceUid.includes("second")) {
throw new Error("Fetch failed");
}
getFindingByIdMock.mockImplementation(async (findingId: string) => {
if (findingId === "finding-2") {
throw new Error("Fetch failed");
}
return { data: [resourceUid] };
},
);
return { data: [findingId] };
});
adaptFindingsByResourceResponseMock.mockImplementation(
(response: { data: string[] }) => [
makeDrawerFinding({
id: response.data[0].includes("first") ? "finding-1" : "finding-2",
resourceUid: response.data[0],
resourceName: response.data[0].includes("first")
? "first-bucket"
: "second-bucket",
id: response.data[0],
resourceUid:
response.data[0] === "finding-1"
? "arn:aws:s3:::first-bucket"
: "arn:aws:s3:::second-bucket",
resourceName:
response.data[0] === "finding-1" ? "first-bucket" : "second-bucket",
}),
],
);
@@ -469,7 +671,6 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
const { result } = renderHook(() =>
useResourceDetailDrawer({
resources,
checkId: "s3_check",
}),
);
@@ -481,6 +682,7 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
expect(result.current.currentFinding?.resourceUid).toBe(
"arn:aws:s3:::first-bucket",
);
expect(result.current.checkMeta?.checkTitle).toBe("S3 Check");
// When
await act(async () => {
@@ -492,5 +694,123 @@ describe("useResourceDetailDrawer — other findings filtering", () => {
expect(result.current.currentIndex).toBe(1);
expect(result.current.currentFinding).toBeNull();
expect(result.current.otherFindings).toEqual([]);
expect(result.current.checkMeta).toBeNull();
});
it("should clear other findings immediately while the next resource is loading", async () => {
// Given
const resources = [
makeResource({
id: "row-1",
findingId: "finding-1",
resourceUid: "arn:aws:s3:::first-bucket",
resourceName: "first-bucket",
}),
makeResource({
id: "row-2",
findingId: "finding-2",
resourceUid: "arn:aws:s3:::second-bucket",
resourceName: "second-bucket",
}),
];
let resolveSecondFinding: ((value: { data: string[] }) => void) | null =
null;
let resolveSecondResource: ((value: { data: string[] }) => void) | null =
null;
getFindingByIdMock.mockImplementation((findingId: string) => {
if (findingId === "finding-2") {
return new Promise((resolve) => {
resolveSecondFinding = resolve;
});
}
return Promise.resolve({ data: [findingId] });
});
getLatestFindingsByResourceUidMock.mockImplementation(
({ resourceUid }: { resourceUid: string }) => {
if (resourceUid === "arn:aws:s3:::second-bucket") {
return new Promise((resolve) => {
resolveSecondResource = resolve;
});
}
return Promise.resolve({ data: ["resource-1"] });
},
);
adaptFindingsByResourceResponseMock.mockImplementation(
(response: { data: string[] }) => {
if (response.data[0] === "finding-1") {
return [makeDrawerFinding({ id: "finding-1" })];
}
if (response.data[0] === "finding-2") {
return [
makeDrawerFinding({
id: "finding-2",
resourceUid: "arn:aws:s3:::second-bucket",
resourceName: "second-bucket",
}),
];
}
if (response.data[0] === "resource-1") {
return [
makeDrawerFinding({
id: "finding-3",
checkTitle: "First bucket other finding",
resourceUid: "arn:aws:s3:::first-bucket",
}),
];
}
return [
makeDrawerFinding({
id: "finding-4",
checkTitle: "Second bucket other finding",
resourceUid: "arn:aws:s3:::second-bucket",
}),
];
},
);
const { result } = renderHook(() =>
useResourceDetailDrawer({
resources,
}),
);
await act(async () => {
result.current.openDrawer(0);
await Promise.resolve();
});
expect(result.current.otherFindings.map((finding) => finding.id)).toEqual([
"finding-3",
]);
// When
act(() => {
result.current.navigateNext();
});
// Then
expect(result.current.currentIndex).toBe(1);
expect(result.current.currentFinding).toBeNull();
expect(result.current.otherFindings).toEqual([]);
await act(async () => {
resolveSecondFinding?.({ data: ["finding-2"] });
resolveSecondResource?.({ data: ["resource-2"] });
await Promise.resolve();
await Promise.resolve();
});
expect(result.current.otherFindings.map((finding) => finding.id)).toEqual([
"finding-4",
]);
});
});
@@ -4,6 +4,7 @@ import { useEffect, useRef, useState } from "react";
import {
adaptFindingsByResourceResponse,
getFindingById,
getLatestFindingsByResourceUid,
type ResourceDrawerFinding,
} from "@/actions/findings";
@@ -43,10 +44,11 @@ function extractCheckMeta(finding: ResourceDrawerFinding): CheckMeta {
interface UseResourceDetailDrawerOptions {
resources: FindingResourceRow[];
checkId: string;
totalResourceCount?: number;
onRequestMoreResources?: () => void;
initialIndex?: number | null;
canLoadOtherFindings?: boolean;
includeMutedInOtherFindings?: boolean;
}
interface UseResourceDetailDrawerReturn {
@@ -56,9 +58,9 @@ interface UseResourceDetailDrawerReturn {
checkMeta: CheckMeta | null;
currentIndex: number;
totalResources: number;
currentResource: FindingResourceRow | null;
currentFinding: ResourceDrawerFinding | null;
otherFindings: ResourceDrawerFinding[];
allFindings: ResourceDrawerFinding[];
openDrawer: (index: number) => void;
closeDrawer: () => void;
navigatePrev: () => void;
@@ -70,23 +72,33 @@ interface UseResourceDetailDrawerReturn {
/**
* Manages the resource detail drawer state, fetching, and navigation.
*
* Caches findings per resourceUid in a Map ref so navigating prev/next
* Caches findings per findingId in a Map ref so navigating prev/next
* doesn't re-fetch already-visited resources.
*/
export function useResourceDetailDrawer({
resources,
checkId,
totalResourceCount,
onRequestMoreResources,
initialIndex = null,
canLoadOtherFindings = true,
includeMutedInOtherFindings = false,
}: UseResourceDetailDrawerOptions): UseResourceDetailDrawerReturn {
const [isOpen, setIsOpen] = useState(initialIndex !== null);
const [isLoading, setIsLoading] = useState(false);
const [currentIndex, setCurrentIndex] = useState(initialIndex ?? 0);
const [findings, setFindings] = useState<ResourceDrawerFinding[]>([]);
const [currentFinding, setCurrentFinding] =
useState<ResourceDrawerFinding | null>(null);
const [otherFindings, setOtherFindings] = useState<ResourceDrawerFinding[]>(
[],
);
const [isNavigating, setIsNavigating] = useState(false);
const cacheRef = useRef<Map<string, ResourceDrawerFinding[]>>(new Map());
const currentFindingCacheRef = useRef<
Map<string, ResourceDrawerFinding | null>
>(new Map());
const otherFindingsCacheRef = useRef<Map<string, ResourceDrawerFinding[]>>(
new Map(),
);
const checkMetaRef = useRef<CheckMeta | null>(null);
const fetchControllerRef = useRef<AbortController | null>(null);
const navigationTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>(
@@ -134,6 +146,11 @@ export function useResourceDetailDrawer({
setIsNavigating(true);
};
const resetCurrentResourceState = () => {
setCurrentFinding(null);
setOtherFindings([]);
};
// Abort any in-flight request on unmount to prevent state updates
// on an already-unmounted component.
useEffect(() => {
@@ -144,46 +161,83 @@ export function useResourceDetailDrawer({
};
}, []);
const fetchFindings = async (resourceUid: string) => {
const fetchFindings = async (resource: FindingResourceRow) => {
// Abort any in-flight request to prevent stale data from out-of-order responses
fetchControllerRef.current?.abort();
clearNavigationTimeout();
const controller = new AbortController();
fetchControllerRef.current = controller;
// Check cache first
const cached = cacheRef.current.get(resourceUid);
if (cached) {
if (!checkMetaRef.current) {
const main = cached.find((f) => f.checkId === checkId) ?? cached[0];
if (main) checkMetaRef.current = extractCheckMeta(main);
const { findingId, resourceUid } = resource;
const fetchCurrentFinding = async () => {
const cached = currentFindingCacheRef.current.get(findingId);
if (cached !== undefined) {
return cached;
}
setFindings(cached);
finishNavigation();
return;
}
const response = await getFindingById(
findingId,
"resources,scan.provider",
{ source: "resource-detail-drawer" },
);
const adapted = adaptFindingsByResourceResponse(response);
const finding =
adapted.find((item) => item.id === findingId) ?? adapted[0] ?? null;
currentFindingCacheRef.current.set(findingId, finding);
return finding;
};
const fetchOtherFindings = async () => {
if (!canLoadOtherFindings || !resourceUid) {
return [];
}
const cached = otherFindingsCacheRef.current.get(resourceUid);
if (cached) {
return cached;
}
const response = await getLatestFindingsByResourceUid({
resourceUid,
pageSize: 50,
includeMuted: includeMutedInOtherFindings,
});
const adapted = adaptFindingsByResourceResponse(response);
otherFindingsCacheRef.current.set(resourceUid, adapted);
return adapted;
};
setIsLoading(true);
try {
const response = await getLatestFindingsByResourceUid({ resourceUid });
const [nextCurrentFinding, nextOtherFindings] = await Promise.all([
fetchCurrentFinding(),
fetchOtherFindings(),
]);
// Discard stale response if a newer request was started
if (controller.signal.aborted) return;
const adapted = adaptFindingsByResourceResponse(response);
cacheRef.current.set(resourceUid, adapted);
checkMetaRef.current = nextCurrentFinding
? extractCheckMeta(nextCurrentFinding)
: null;
// Extract check-level metadata once (stable across all resources)
if (!checkMetaRef.current) {
const main = adapted.find((f) => f.checkId === checkId) ?? adapted[0];
if (main) checkMetaRef.current = extractCheckMeta(main);
}
setFindings(adapted);
} catch (error) {
setCurrentFinding(nextCurrentFinding);
setOtherFindings(
nextOtherFindings.filter(
(finding) => finding.id !== findingId && finding.status === "FAIL",
),
);
} catch (_error) {
if (!controller.signal.aborted) {
console.error("Error fetching findings for resource:", error);
setFindings([]);
checkMetaRef.current = null;
setCurrentFinding(null);
setOtherFindings([]);
}
} finally {
if (!controller.signal.aborted) {
@@ -202,7 +256,7 @@ export function useResourceDetailDrawer({
return;
}
fetchFindings(resource.resourceUid);
fetchFindings(resource);
// Only initialize once on mount for deep-link/inline entry points.
// User-driven navigations use openDrawer/navigateTo afterwards.
// eslint-disable-next-line react-hooks/exhaustive-deps
@@ -212,13 +266,11 @@ export function useResourceDetailDrawer({
const resource = resources[index];
if (!resource) return;
clearNavigationTimeout();
navigationStartedAtRef.current = null;
setCurrentIndex(index);
setIsOpen(true);
setIsNavigating(false);
setFindings([]);
fetchFindings(resource.resourceUid);
startNavigation();
resetCurrentResourceState();
fetchFindings(resource);
};
const closeDrawer = () => {
@@ -228,10 +280,11 @@ export function useResourceDetailDrawer({
const refetchCurrent = () => {
const resource = resources[currentIndex];
if (!resource) return;
cacheRef.current.delete(resource.resourceUid);
currentFindingCacheRef.current.delete(resource.findingId);
otherFindingsCacheRef.current.delete(resource.resourceUid);
startNavigation();
setFindings([]);
fetchFindings(resource.resourceUid);
resetCurrentResourceState();
fetchFindings(resource);
};
const navigateTo = (index: number) => {
@@ -240,8 +293,8 @@ export function useResourceDetailDrawer({
setCurrentIndex(index);
startNavigation();
setFindings([]);
fetchFindings(resource.resourceUid);
resetCurrentResourceState();
fetchFindings(resource);
};
const navigatePrev = () => {
@@ -265,17 +318,7 @@ export function useResourceDetailDrawer({
}
};
// The finding whose checkId matches the drill-down's checkId
const currentFinding =
findings.find((f) => f.checkId === checkId) ?? findings[0] ?? null;
// "Other Findings For This Resource" intentionally shows only FAIL entries,
// while currentFinding (the drilled-down one) can be any status (FAIL, MANUAL, PASS…).
const otherFindings = (
currentFinding
? findings.filter((f) => f.id !== currentFinding.id)
: findings
).filter((f) => f.status === "FAIL");
const currentResource = resources[currentIndex];
return {
isOpen,
@@ -284,9 +327,9 @@ export function useResourceDetailDrawer({
checkMeta: checkMetaRef.current,
currentIndex,
totalResources: totalResourceCount ?? resources.length,
currentResource: currentResource ?? null,
currentFinding,
otherFindings,
allFindings: findings,
openDrawer,
closeDrawer,
navigatePrev,
@@ -125,7 +125,10 @@ export const ProvidersFilters = ({
placeholder={`All ${filter.labelCheckboxGroup}`}
/>
</MultiSelectTrigger>
<MultiSelectContent search={false}>
<MultiSelectContent
search={false}
width={filter.width ?? "default"}
>
<MultiSelectSelectAll>Select All</MultiSelectSelectAll>
<MultiSelectSeparator />
{filter.values.map((value) => {
@@ -47,6 +47,33 @@ describe("MultiSelect", () => {
expect(
within(screen.getByRole("combobox")).getByText("Production AWS"),
).toBeInTheDocument();
expect(
within(screen.getByRole("combobox")).queryByText("Select accounts"),
).not.toBeInTheDocument();
});
it("keeps the filter label context when a value is selected", () => {
render(
<MultiSelect values={["FAIL"]} onValuesChange={() => {}}>
<MultiSelectTrigger>
<MultiSelectValue placeholder="All Status" />
</MultiSelectTrigger>
<MultiSelectContent search={false}>
<MultiSelectItem value="FAIL">FAIL</MultiSelectItem>
<MultiSelectItem value="PASS">PASS</MultiSelectItem>
</MultiSelectContent>
</MultiSelect>,
);
expect(
within(screen.getByRole("combobox")).getByText("Status"),
).toBeInTheDocument();
expect(
within(screen.getByRole("combobox")).getByText("FAIL"),
).toBeInTheDocument();
expect(
within(screen.getByRole("combobox")).queryByText("All Status"),
).not.toBeInTheDocument();
});
it("filters items without crashing when search is enabled", async () => {
+10 -1
View File
@@ -163,6 +163,10 @@ export function MultiSelectValue({
const shouldWrap =
overflowBehavior === "wrap" ||
(overflowBehavior === "wrap-when-open" && open);
const selectedContextLabel =
placeholder && /^All\s+/i.test(placeholder) && selectedValues.size > 0
? placeholder.replace(/^All\s+/i, "").trim()
: "";
const checkOverflow = useCallback(() => {
if (valueRef.current === null) return;
@@ -222,11 +226,16 @@ export function MultiSelectValue({
className,
)}
>
{placeholder && (
{placeholder && selectedValues.size === 0 && (
<span className="text-bg-button-secondary shrink-0 font-normal">
{placeholder}
</span>
)}
{selectedContextLabel && (
<span className="text-bg-button-secondary shrink-0 font-normal">
{selectedContextLabel}
</span>
)}
{Array.from(selectedValues)
.filter((value) => items.has(value))
.map((value) => (
@@ -62,8 +62,16 @@ vi.mock("@/components/shadcn/select/multiselect", () => ({
MultiSelectValue: ({ placeholder }: { placeholder: string }) => (
<span>{placeholder}</span>
),
MultiSelectContent: ({ children }: { children: React.ReactNode }) => (
<>{children}</>
MultiSelectContent: ({
children,
width,
}: {
children: React.ReactNode;
width?: string;
}) => (
<div data-testid="multiselect-content" data-width={width ?? "default"}>
{children}
</div>
),
MultiSelectSelectAll: ({ children }: { children: React.ReactNode }) => (
<button type="button">{children}</button>
@@ -114,6 +122,13 @@ const severityFilter: FilterOption = {
values: ["critical", "high"],
};
const scanFilter: FilterOption = {
key: "filter[scan__in]",
labelCheckboxGroup: "Scan ID",
values: ["scan-1"],
width: "wide",
};
describe("DataTableFilterCustom — batch vs instant mode", () => {
beforeEach(() => {
vi.clearAllMocks();
@@ -275,4 +290,15 @@ describe("DataTableFilterCustom — batch vs instant mode", () => {
expect(screen.getByRole("button", { name: "Clear" })).toBeInTheDocument();
});
});
describe("dropdown width", () => {
it("should propagate the filter width to the dropdown content", () => {
render(<DataTableFilterCustom filters={[scanFilter]} />);
expect(screen.getByTestId("multiselect-content")).toHaveAttribute(
"data-width",
"wide",
);
});
});
});
@@ -16,6 +16,7 @@ import {
import { EntityInfo } from "@/components/ui/entities/entity-info";
import { useUrlFilters } from "@/hooks/use-url-filters";
import { isConnectionStatus, isScanEntity } from "@/lib/helper-filters";
import { cn } from "@/lib/utils";
import {
FilterEntity,
FilterOption,
@@ -29,6 +30,8 @@ export interface DataTableFilterCustomProps {
filters: FilterOption[];
/** Optional element to render at the start of the filters grid */
prependElement?: React.ReactNode;
/** Optional className override for the filters grid layout */
gridClassName?: string;
/** Hide the clear filters button and active badges (useful when parent manages this) */
hideClearButton?: boolean;
/**
@@ -54,6 +57,7 @@ export interface DataTableFilterCustomProps {
export const DataTableFilterCustom = ({
filters,
prependElement,
gridClassName,
hideClearButton = false,
mode = DATA_TABLE_FILTER_MODE.INSTANT,
onBatchChange,
@@ -173,7 +177,12 @@ export const DataTableFilterCustom = ({
};
return (
<div className="grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4 2xl:grid-cols-5">
<div
className={cn(
"grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4 2xl:grid-cols-5",
gridClassName,
)}
>
{prependElement}
{sortedFilters().map((filter) => {
const selectedValues = getSelectedValues(filter);
@@ -189,7 +198,10 @@ export const DataTableFilterCustom = ({
placeholder={`All ${filter.labelCheckboxGroup}`}
/>
</MultiSelectTrigger>
<MultiSelectContent search={false}>
<MultiSelectContent
search={false}
width={filter.width ?? "default"}
>
<MultiSelectSelectAll>Select All</MultiSelectSelectAll>
<MultiSelectSeparator />
{filter.values.map((value) => {
@@ -0,0 +1,15 @@
import { readFileSync } from "node:fs";
import path from "node:path";
import { fileURLToPath } from "node:url";
import { describe, expect, it } from "vitest";
describe("useFindingGroupResourceState", () => {
const currentDir = path.dirname(fileURLToPath(import.meta.url));
const filePath = path.join(currentDir, "use-finding-group-resource-state.ts");
const source = readFileSync(filePath, "utf8");
it("enables muted findings only for the finding-group resource drawer", () => {
expect(source).toContain("includeMutedInOtherFindings: true");
});
});
+2 -1
View File
@@ -80,9 +80,10 @@ export function useFindingGroupResourceState({
const drawer = useResourceDetailDrawer({
resources,
checkId: group.checkId,
totalResourceCount: totalCount ?? group.resourcesTotal,
onRequestMoreResources: loadMore,
canLoadOtherFindings: group.resourcesTotal !== 0,
includeMutedInOtherFindings: true,
});
const handleDrawerMuteComplete = () => {
+115
View File
@@ -3,9 +3,11 @@ import { describe, expect, it } from "vitest";
import type { FindingGroupRow } from "@/types";
import {
canDrillDownFindingGroup,
getActiveStatusFilter,
getFilteredFindingGroupDelta,
getFindingGroupDelta,
getFindingGroupImpactedCounts,
isFindingGroupMuted,
} from "./findings-groups";
@@ -138,6 +140,119 @@ describe("getActiveStatusFilter", () => {
});
});
describe("getFindingGroupImpactedCounts", () => {
it("should fall back to pass and fail counts when resources total is zero", () => {
// Given
const group = makeGroup({
resourcesTotal: 0,
resourcesFail: 0,
failCount: 3,
passCount: 2,
muted: false,
});
// When
const result = getFindingGroupImpactedCounts(group);
// Then
expect(result).toEqual({ impacted: 3, total: 5 });
});
it("should include manual findings in fallback counts when resources total is zero", () => {
// Given
const group = makeGroup({
resourcesTotal: 0,
resourcesFail: 0,
failCount: 3,
passCount: 2,
manualCount: 4,
muted: false,
});
// When
const result = getFindingGroupImpactedCounts(group);
// Then
expect(result).toEqual({ impacted: 3, total: 9 });
});
it("should include muted pass and fail counts in the denominator when the result is muted", () => {
// Given
const group = makeGroup({
resourcesTotal: 0,
resourcesFail: 0,
failCount: 3,
passCount: 2,
failMutedCount: 4,
passMutedCount: 1,
muted: true,
});
// When
const result = getFindingGroupImpactedCounts(group);
// Then
expect(result).toEqual({ impacted: 3, total: 10 });
});
it("should keep resource-based counts when resources total is available", () => {
// Given
const group = makeGroup({
resourcesTotal: 6,
resourcesFail: 4,
failCount: 2,
passCount: 1,
failMutedCount: 5,
passMutedCount: 3,
muted: true,
});
// When
const result = getFindingGroupImpactedCounts(group);
// Then
expect(result).toEqual({ impacted: 4, total: 6 });
});
});
describe("canDrillDownFindingGroup", () => {
it("should allow drill-down when resources exist", () => {
expect(
canDrillDownFindingGroup(
makeGroup({
resourcesTotal: 2,
failCount: 0,
}),
),
).toBe(true);
});
it("should keep zero-resource fallback groups non-expandable even when fallback counts are present", () => {
expect(
canDrillDownFindingGroup(
makeGroup({
resourcesTotal: 0,
failCount: 0,
passCount: 2,
manualCount: 1,
}),
),
).toBe(false);
});
it("should keep drill-down disabled for zero-resource groups when the displayed total is zero", () => {
expect(
canDrillDownFindingGroup(
makeGroup({
resourcesTotal: 0,
failCount: 0,
passCount: 0,
}),
),
).toBe(false);
});
});
describe("getFilteredFindingGroupDelta", () => {
it("falls back to the aggregate delta when no status filter is active", () => {
expect(
+81 -24
View File
@@ -1,9 +1,10 @@
import type { FindingGroupRow } from "@/types";
type FindingGroupMutedState = Pick<
FindingGroupRow,
"muted" | "mutedCount" | "resourcesFail" | "resourcesTotal"
>;
import {
FINDING_DELTA,
FINDING_STATUS,
type FindingDelta,
type FindingGroupRow,
type FindingStatus,
} from "@/types";
type FindingGroupDeltaState = Pick<
FindingGroupRow,
@@ -23,7 +24,18 @@ type FindingGroupDeltaState = Pick<
| "changedManualMutedCount"
>;
export function isFindingGroupMuted(group: FindingGroupMutedState): boolean {
type FindingGroupDelta = Exclude<FindingDelta, null>;
type FindingGroupStatus = FindingStatus;
const FINDING_GROUP_STATUSES = Object.values(FINDING_STATUS);
export function isFindingGroupMuted(
group: Pick<
FindingGroupRow,
"muted" | "mutedCount" | "resourcesFail" | "resourcesTotal"
>,
): boolean {
if (typeof group.muted === "boolean") {
return group.muted;
}
@@ -38,6 +50,54 @@ export function isFindingGroupMuted(group: FindingGroupMutedState): boolean {
);
}
export function getFindingGroupImpactedCounts(
group: Pick<
FindingGroupRow,
| "resourcesTotal"
| "resourcesFail"
| "passCount"
| "failCount"
| "manualCount"
| "passMutedCount"
| "failMutedCount"
| "manualMutedCount"
| "muted"
| "mutedCount"
>,
): { impacted: number; total: number } {
if (group.resourcesTotal > 0) {
return {
impacted: group.resourcesFail,
total: group.resourcesTotal,
};
}
const total =
(group.passCount ?? 0) + (group.failCount ?? 0) + (group.manualCount ?? 0);
if (!isFindingGroupMuted(group)) {
return {
impacted: group.failCount ?? 0,
total,
};
}
return {
impacted: group.failCount ?? 0,
total:
total +
(group.passMutedCount ?? 0) +
(group.failMutedCount ?? 0) +
(group.manualMutedCount ?? 0),
};
}
export function canDrillDownFindingGroup(
group: Pick<FindingGroupRow, "resourcesTotal">,
): boolean {
return group.resourcesTotal > 0;
}
function getNewDeltaTotal(group: FindingGroupDeltaState): number {
const breakdownTotal =
(group.newFailCount ?? 0) +
@@ -64,21 +124,18 @@ function getChangedDeltaTotal(group: FindingGroupDeltaState): number {
export function getFindingGroupDelta(
group: FindingGroupDeltaState,
): "new" | "changed" | "none" {
): FindingGroupDelta {
if (getNewDeltaTotal(group) > 0) {
return "new";
return FINDING_DELTA.NEW;
}
if (getChangedDeltaTotal(group) > 0) {
return "changed";
return FINDING_DELTA.CHANGED;
}
return "none";
return FINDING_DELTA.NONE;
}
const FINDING_GROUP_STATUSES = ["FAIL", "PASS", "MANUAL"] as const;
type FindingGroupStatus = (typeof FINDING_GROUP_STATUSES)[number];
type FindingGroupFiltersRecord = Record<string, string | string[] | undefined>;
function parseStatusFilterValue(
@@ -142,13 +199,13 @@ function getNewDeltaForStatuses(
statuses: Set<FindingGroupStatus>,
): number {
let total = 0;
if (statuses.has("FAIL")) {
if (statuses.has(FINDING_STATUS.FAIL)) {
total += (group.newFailCount ?? 0) + (group.newFailMutedCount ?? 0);
}
if (statuses.has("PASS")) {
if (statuses.has(FINDING_STATUS.PASS)) {
total += (group.newPassCount ?? 0) + (group.newPassMutedCount ?? 0);
}
if (statuses.has("MANUAL")) {
if (statuses.has(FINDING_STATUS.MANUAL)) {
total += (group.newManualCount ?? 0) + (group.newManualMutedCount ?? 0);
}
return total;
@@ -159,13 +216,13 @@ function getChangedDeltaForStatuses(
statuses: Set<FindingGroupStatus>,
): number {
let total = 0;
if (statuses.has("FAIL")) {
if (statuses.has(FINDING_STATUS.FAIL)) {
total += (group.changedFailCount ?? 0) + (group.changedFailMutedCount ?? 0);
}
if (statuses.has("PASS")) {
if (statuses.has(FINDING_STATUS.PASS)) {
total += (group.changedPassCount ?? 0) + (group.changedPassMutedCount ?? 0);
}
if (statuses.has("MANUAL")) {
if (statuses.has(FINDING_STATUS.MANUAL)) {
total +=
(group.changedManualCount ?? 0) + (group.changedManualMutedCount ?? 0);
}
@@ -182,7 +239,7 @@ function getChangedDeltaForStatuses(
export function getFilteredFindingGroupDelta(
group: FindingGroupDeltaState,
filters: FindingGroupFiltersRecord,
): "new" | "changed" | "none" {
): FindingGroupDelta {
const activeStatuses = getActiveStatusFilter(filters);
if (!activeStatuses || !hasAnyDeltaBreakdown(group)) {
@@ -190,12 +247,12 @@ export function getFilteredFindingGroupDelta(
}
if (getNewDeltaForStatuses(group, activeStatuses) > 0) {
return "new";
return FINDING_DELTA.NEW;
}
if (getChangedDeltaForStatuses(group, activeStatuses) > 0) {
return "changed";
return FINDING_DELTA.CHANGED;
}
return "none";
return FINDING_DELTA.NONE;
}
+12 -12
View File
@@ -42,7 +42,7 @@
"@langchain/mcp-adapters": "1.1.3",
"@langchain/openai": "1.1.3",
"@lezer/highlight": "1.2.3",
"@next/third-parties": "16.1.6",
"@next/third-parties": "16.2.3",
"@radix-ui/react-alert-dialog": "1.1.14",
"@radix-ui/react-avatar": "1.1.11",
"@radix-ui/react-checkbox": "1.3.3",
@@ -93,13 +93,13 @@
"lucide-react": "0.543.0",
"marked": "15.0.12",
"nanoid": "5.1.6",
"next": "16.1.6",
"next": "16.2.3",
"next-auth": "5.0.0-beta.30",
"next-themes": "0.2.1",
"radix-ui": "1.4.2",
"react": "19.2.4",
"react": "19.2.5",
"react-day-picker": "9.13.0",
"react-dom": "19.2.4",
"react-dom": "19.2.5",
"react-hook-form": "7.62.0",
"react-markdown": "10.1.0",
"recharts": "2.15.4",
@@ -122,7 +122,7 @@
"devDependencies": {
"@eslint/eslintrc": "3.3.3",
"@iconify/react": "5.2.1",
"@next/eslint-plugin-next": "16.1.6",
"@next/eslint-plugin-next": "16.2.3",
"@playwright/test": "1.56.1",
"@testing-library/jest-dom": "6.9.1",
"@testing-library/react": "16.3.2",
@@ -143,7 +143,7 @@
"babel-plugin-react-compiler": "1.0.0",
"dotenv-expand": "12.0.3",
"eslint": "9.39.2",
"eslint-config-next": "16.1.6",
"eslint-config-next": "16.2.3",
"eslint-config-prettier": "10.1.5",
"eslint-plugin-import": "2.32.0",
"eslint-plugin-jsx-a11y": "6.10.2",
@@ -169,12 +169,12 @@
"overrides": {
"@react-types/shared": "3.26.0",
"@internationalized/date": "3.10.0",
"alert>react": "19.2.4",
"alert>react-dom": "19.2.4",
"@react-aria/ssr>react": "19.2.4",
"@react-aria/ssr>react-dom": "19.2.4",
"@react-aria/visually-hidden>react": "19.2.4",
"@react-aria/interactions>react": "19.2.4",
"alert>react": "19.2.5",
"alert>react-dom": "19.2.5",
"@react-aria/ssr>react": "19.2.5",
"@react-aria/ssr>react-dom": "19.2.5",
"@react-aria/visually-hidden>react": "19.2.5",
"@react-aria/interactions>react": "19.2.5",
"lodash": "4.17.23",
"lodash-es": "4.17.23",
"hono": "4.12.4",
+2330 -2323
View File
File diff suppressed because it is too large Load Diff
+1
View File
@@ -82,6 +82,7 @@ export type PermissionState =
export const FINDING_DELTA = {
NEW: "new",
CHANGED: "changed",
NONE: "none",
} as const;
export type FindingDelta =
| (typeof FINDING_DELTA)[keyof typeof FINDING_DELTA]
+1
View File
@@ -15,6 +15,7 @@ export interface FilterOption {
key: string;
labelCheckboxGroup: string;
values: string[];
width?: "default" | "wide";
valueLabelMapping?: Array<{ [uid: string]: FilterEntity }>;
labelFormatter?: (value: string) => string;
index?: number;