mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-03-28 02:49:53 +00:00
Compare commits
5 Commits
5.16.0
...
feat/PROWL
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f34a025acc | ||
|
|
d2886a5e10 | ||
|
|
1e1dfa29c0 | ||
|
|
747e6c9f81 | ||
|
|
dab231d626 |
7
.env
7
.env
@@ -15,13 +15,6 @@ AUTH_SECRET="N/c6mnaS5+SWq81+819OrzQZlmx1Vxtp/orjttJSmw8="
|
||||
# Google Tag Manager ID
|
||||
NEXT_PUBLIC_GOOGLE_TAG_MANAGER_ID=""
|
||||
|
||||
#### MCP Server ####
|
||||
PROWLER_MCP_VERSION=stable
|
||||
# For UI and MCP running on docker:
|
||||
PROWLER_MCP_SERVER_URL=http://mcp-server:8000/mcp
|
||||
# For UI running on host, MCP in docker:
|
||||
# PROWLER_MCP_SERVER_URL=http://localhost:8000/mcp
|
||||
|
||||
#### Code Review Configuration ####
|
||||
# Enable Claude Code standards validation on pre-push hook
|
||||
# Set to 'true' to validate changes against AGENTS.md standards via Claude Code
|
||||
|
||||
8
Makefile
8
Makefile
@@ -47,12 +47,12 @@ help: ## Show this help.
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
|
||||
##@ Build no cache
|
||||
build-no-cache-dev:
|
||||
docker compose -f docker-compose-dev.yml build --no-cache api-dev worker-dev worker-beat mcp-server
|
||||
build-no-cache-dev:
|
||||
docker compose -f docker-compose-dev.yml build --no-cache api-dev worker-dev worker-beat
|
||||
|
||||
##@ Development Environment
|
||||
run-api-dev: ## Start development environment with API, PostgreSQL, Valkey, MCP, and workers
|
||||
docker compose -f docker-compose-dev.yml up api-dev postgres valkey worker-dev worker-beat mcp-server
|
||||
run-api-dev: ## Start development environment with API, PostgreSQL, Valkey, and workers
|
||||
docker compose -f docker-compose-dev.yml up api-dev postgres valkey worker-dev worker-beat
|
||||
|
||||
##@ Development Environment
|
||||
build-and-run-api-dev: build-no-cache-dev run-api-dev
|
||||
|
||||
@@ -277,12 +277,11 @@ python prowler-cli.py -v
|
||||
# ✏️ High level architecture
|
||||
|
||||
## Prowler App
|
||||
**Prowler App** is composed of four key components:
|
||||
**Prowler App** is composed of three key components:
|
||||
|
||||
- **Prowler UI**: A web-based interface, built with Next.js, providing a user-friendly experience for executing Prowler scans and visualizing results.
|
||||
- **Prowler API**: A backend service, developed with Django REST Framework, responsible for running Prowler scans and storing the generated results.
|
||||
- **Prowler SDK**: A Python SDK designed to extend the functionality of the Prowler CLI for advanced capabilities.
|
||||
- **Prowler MCP Server**: A Model Context Protocol server that provides AI tools for Lighthouse, the AI-powered security assistant. This is a critical dependency for Lighthouse functionality.
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -2,12 +2,11 @@
|
||||
|
||||
All notable changes to the **Prowler API** are documented in this file.
|
||||
|
||||
## [1.17.0] (Prowler v5.16.0)
|
||||
## [1.17.0] (Prowler UNRELEASED)
|
||||
|
||||
### Added
|
||||
- New endpoint to retrieve and overview of the categories based on finding severities [(#9529)](https://github.com/prowler-cloud/prowler/pull/9529)
|
||||
- Endpoints `GET /findings` and `GET /findings/latests` can now use the category filter [(#9529)](https://github.com/prowler-cloud/prowler/pull/9529)
|
||||
- Account id, alias and provider name to PDF reporting table [(#9574)](https://github.com/prowler-cloud/prowler/pull/9574)
|
||||
|
||||
### Changed
|
||||
- Endpoint `GET /overviews/attack-surfaces` no longer returns the related check IDs [(#9529)](https://github.com/prowler-cloud/prowler/pull/9529)
|
||||
@@ -15,8 +14,7 @@ All notable changes to the **Prowler API** are documented in this file.
|
||||
- Increased execution delay for the first scheduled scan tasks to 5 seconds[(#9558)](https://github.com/prowler-cloud/prowler/pull/9558)
|
||||
|
||||
### Fixed
|
||||
- Made `scan_id` a required filter in the compliance overview endpoint [(#9560)](https://github.com/prowler-cloud/prowler/pull/9560)
|
||||
- Reduced unnecessary UPDATE resources operations by only saving when tag mappings change, lowering write load during scans [(#9569)](https://github.com/prowler-cloud/prowler/pull/9569)
|
||||
- Make `scan_id` a required filter in the compliance overview endpoint [(#9560)](https://github.com/prowler-cloud/prowler/pull/9560)
|
||||
|
||||
---
|
||||
|
||||
|
||||
785
api/poetry.lock
generated
785
api/poetry.lock
generated
@@ -12,18 +12,6 @@ files = [
|
||||
{file = "about_time-4.2.1-py3-none-any.whl", hash = "sha256:8bbf4c75fe13cbd3d72f49a03b02c5c7dca32169b6d49117c257e7eb3eaee341"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aiofiles"
|
||||
version = "24.1.0"
|
||||
description = "File support for asyncio."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"},
|
||||
{file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aiohappyeyeballs"
|
||||
version = "2.6.1"
|
||||
@@ -160,480 +148,6 @@ files = [
|
||||
frozenlist = ">=1.1.0"
|
||||
typing-extensions = {version = ">=4.2", markers = "python_version < \"3.13\""}
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-actiontrail20200706"
|
||||
version = "2.4.1"
|
||||
description = "Alibaba Cloud ActionTrail (20200706) SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_actiontrail20200706-2.4.1-py3-none-any.whl", hash = "sha256:5dee0009db9b7cba182fbac742820f6a949287a8faafb843b5107f7dc89136da"},
|
||||
{file = "alibabacloud_actiontrail20200706-2.4.1.tar.gz", hash = "sha256:b65c6b37a96443fbe625dd5a4dd1be52a7476006a411db75206908b11588ffa8"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-endpoint-util = ">=0.0.4,<1.0.0"
|
||||
alibabacloud-openapi-util = ">=0.2.2,<1.0.0"
|
||||
alibabacloud-tea-openapi = ">=0.3.16,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-credentials"
|
||||
version = "1.0.3"
|
||||
description = "The alibabacloud credentials module of alibabaCloud Python SDK."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud-credentials-1.0.3.tar.gz", hash = "sha256:9d8707e96afc6f348e23f5677ed15a21c2dfce7cfe6669776548ee4c80e1dfaf"},
|
||||
{file = "alibabacloud_credentials-1.0.3-py3-none-any.whl", hash = "sha256:30c8302f204b663c655d97e1c283ee9f9f84a6257d7901b931477d6cf34445a8"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
aiofiles = ">=22.1.0,<25.0.0"
|
||||
alibabacloud-credentials-api = ">=1.0.0,<2.0.0"
|
||||
alibabacloud-tea = ">=0.4.0"
|
||||
APScheduler = ">=3.10.0,<4.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-credentials-api"
|
||||
version = "1.0.0"
|
||||
description = "Alibaba Cloud Gateway SPI SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud-credentials-api-1.0.0.tar.gz", hash = "sha256:8c340038d904f0218d7214a8f4088c31912bfcf279af2cbc7d9be4897a97dd2f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-cs20151215"
|
||||
version = "6.1.0"
|
||||
description = "Alibaba Cloud CS (20151215) SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_cs20151215-6.1.0-py3-none-any.whl", hash = "sha256:75e90b1bb9acca2236244bb0e44234ca4805d456ea4303ba4225ac15152a458e"},
|
||||
{file = "alibabacloud_cs20151215-6.1.0.tar.gz", hash = "sha256:5b3d99306701bf499ddd57cd9f2905b7721cb1bb4bb38ffe4d051f7b4e80e355"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-endpoint-util = ">=0.0.4,<1.0.0"
|
||||
alibabacloud-openapi-util = ">=0.2.2,<1.0.0"
|
||||
alibabacloud-tea-openapi = ">=0.3.16,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-darabonba-array"
|
||||
version = "0.1.0"
|
||||
description = "Alibaba Cloud Darabonba Array SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_darabonba_array-0.1.0.tar.gz", hash = "sha256:7f9a7c632518ff4f0cebb0d4e825a48c12e7cf0b9016ea25054dd73732e155aa"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-darabonba-encode-util"
|
||||
version = "0.0.2"
|
||||
description = "Darabonba Util Library for Alibaba Cloud Python SDK"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_darabonba_encode_util-0.0.2.tar.gz", hash = "sha256:f1c484f276d60450fa49b4b2987194e741fcb2f7faae7f287c0ae65abc85fd4d"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-darabonba-map"
|
||||
version = "0.0.1"
|
||||
description = "Alibaba Cloud Darabonba Map SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_darabonba_map-0.0.1.tar.gz", hash = "sha256:adb17384658a1a8f72418f1838d4b6a5fd2566bfd392a3ef06d9dbb0a595a23f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-darabonba-signature-util"
|
||||
version = "0.0.4"
|
||||
description = "Darabonba Util Library for Alibaba Cloud Python SDK"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_darabonba_signature_util-0.0.4.tar.gz", hash = "sha256:71d79b2ae65957bcfbf699ced894fda782b32f9635f1616635533e5a90d5feb0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
cryptography = ">=3.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-darabonba-string"
|
||||
version = "0.0.4"
|
||||
description = "Alibaba Cloud Darabonba String Library for Python"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud-darabonba-string-0.0.4.tar.gz", hash = "sha256:ec6614c0448dadcbc5e466485838a1f8cfdd911135bea739e20b14511270c6f7"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-darabonba-time"
|
||||
version = "0.0.1"
|
||||
description = "Alibaba Cloud Darabonba Time SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_darabonba_time-0.0.1.tar.gz", hash = "sha256:0ad9c7b0696570d1a3f40106cc7777f755fd92baa0d1dcab5b7df78dde5b922d"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-ecs20140526"
|
||||
version = "7.2.5"
|
||||
description = "Alibaba Cloud Elastic Compute Service (20140526) SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_ecs20140526-7.2.5-py3-none-any.whl", hash = "sha256:10bda5e185f6ba899e7d51477373595c629d66db7530a8a37433fb4e9034a96f"},
|
||||
{file = "alibabacloud_ecs20140526-7.2.5.tar.gz", hash = "sha256:2abbe630ce42d69061821f38950b938c5982cc31902ccd7132d05be328765a55"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-endpoint-util = ">=0.0.4,<1.0.0"
|
||||
alibabacloud-openapi-util = ">=0.2.2,<1.0.0"
|
||||
alibabacloud-tea-openapi = ">=0.3.16,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-endpoint-util"
|
||||
version = "0.0.4"
|
||||
description = "The endpoint-util module of alibabaCloud Python SDK."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_endpoint_util-0.0.4.tar.gz", hash = "sha256:a593eb8ddd8168d5dc2216cd33111b144f9189fcd6e9ca20e48f358a739bbf90"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-gateway-oss"
|
||||
version = "0.0.17"
|
||||
description = "Alibaba Cloud OSS SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_gateway_oss-0.0.17.tar.gz", hash = "sha256:8c4b66c8c7dd285fc210ee232ab3f062b5573258752804d19382000746531e29"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud_credentials = ">=0.3.5"
|
||||
alibabacloud_darabonba_array = ">=0.1.0,<1.0.0"
|
||||
alibabacloud_darabonba_encode_util = ">=0.0.2,<1.0.0"
|
||||
alibabacloud_darabonba_map = ">=0.0.1,<1.0.0"
|
||||
alibabacloud_darabonba_signature_util = ">=0.0.4,<1.0.0"
|
||||
alibabacloud_darabonba_string = ">=0.0.4,<1.0.0"
|
||||
alibabacloud_darabonba_time = ">=0.0.1,<1.0.0"
|
||||
alibabacloud_gateway_oss_util = ">=0.0.3,<1.0.0"
|
||||
alibabacloud_gateway_spi = ">=0.0.1,<1.0.0"
|
||||
alibabacloud_openapi_util = ">=0.2.1,<1.0.0"
|
||||
alibabacloud_oss_util = ">=0.0.5,<1.0.0"
|
||||
alibabacloud_tea_util = ">=0.3.11,<1.0.0"
|
||||
alibabacloud_tea_xml = ">=0.0.2,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-gateway-oss-util"
|
||||
version = "0.0.3"
|
||||
description = "Alibaba Cloud OSS Util Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_gateway_oss_util-0.0.3.tar.gz", hash = "sha256:5eb7fa450dc7350d5c71577974b9d7f489479e5c5ec7efc1c5376385e8c1c0a5"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-gateway-sls"
|
||||
version = "0.4.0"
|
||||
description = "Alibaba Cloud SLS Gateway Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_gateway_sls-0.4.0-py3-none-any.whl", hash = "sha256:a0299a83a5528025983b42b7533a28028461bced5e180a66f97999e0134760a6"},
|
||||
{file = "alibabacloud_gateway_sls-0.4.0.tar.gz", hash = "sha256:9d2aceb377c9b3ed0558149fda16fe39fa114cc0a22e22a88dc76efdda34633b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-credentials = ">=1.0.2,<2.0.0"
|
||||
alibabacloud-darabonba-array = ">=0.1.0,<1.0.0"
|
||||
alibabacloud-darabonba-encode-util = ">=0.0.2,<1.0.0"
|
||||
alibabacloud-darabonba-map = ">=0.0.1,<1.0.0"
|
||||
alibabacloud-darabonba-signature-util = ">=0.0.4,<1.0.0"
|
||||
alibabacloud-darabonba-string = ">=0.0.4,<1.0.0"
|
||||
alibabacloud-gateway-sls-util = ">=0.4.0,<1.0.0"
|
||||
alibabacloud-gateway-spi = ">=0.0.2,<1.0.0"
|
||||
alibabacloud-openapi-util = ">=0.2.2,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-gateway-sls-util"
|
||||
version = "0.4.0"
|
||||
description = "Alibaba Cloud SLS Util Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_gateway_sls_util-0.4.0-py3-none-any.whl", hash = "sha256:c91ab7fe55af526a01d25b0d431088c4d241b160db055da3d8cb7330bd74595a"},
|
||||
{file = "alibabacloud_gateway_sls_util-0.4.0.tar.gz", hash = "sha256:f8b683a36a2ae3fe9a8225d3d97773ea769bdf9cdf4f4d033eab2eb6062ddd1f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
aliyun-log-fastpb = ">=0.2.0"
|
||||
lz4 = ">=4.3.2"
|
||||
zstd = ">=1.5.5.1"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-gateway-spi"
|
||||
version = "0.0.3"
|
||||
description = "Alibaba Cloud Gateway SPI SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_gateway_spi-0.0.3.tar.gz", hash = "sha256:10d1c53a3fc5f87915fbd6b4985b98338a776e9b44a0263f56643c5048223b8b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud_credentials = ">=0.3.4"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-openapi-util"
|
||||
version = "0.2.2"
|
||||
description = "Aliyun Tea OpenApi Library for Python"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_openapi_util-0.2.2.tar.gz", hash = "sha256:ebbc3906f554cb4bf8f513e43e8a33e8b6a3d4a0ef13617a0e14c3dda8ef52a8"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud_tea_util = ">=0.0.2"
|
||||
cryptography = ">=3.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-oss-util"
|
||||
version = "0.0.6"
|
||||
description = "The oss util module of alibabaCloud Python SDK."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_oss_util-0.0.6.tar.gz", hash = "sha256:d3ecec36632434bd509a113e8cf327dc23e830ac8d9dd6949926f4e334c8b5d6"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-tea = "*"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-oss20190517"
|
||||
version = "1.0.6"
|
||||
description = "Alibaba Cloud Object Storage Service (20190517) SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_oss20190517-1.0.6-py3-none-any.whl", hash = "sha256:365fda353de6658a1a289f4d70dcd0394e2a8e2921b6b5834ba6d9772121d2f6"},
|
||||
{file = "alibabacloud_oss20190517-1.0.6.tar.gz", hash = "sha256:7cd0fb16af613ceb38d2e0e529aa1f58038c7cf59eb67c8c8775ae44ea717852"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-gateway-oss = ">=0.0.9,<1.0.0"
|
||||
alibabacloud-gateway-spi = ">=0.0.1,<1.0.0"
|
||||
alibabacloud-openapi-util = ">=0.2.1,<1.0.0"
|
||||
alibabacloud-tea-openapi = ">=0.3.6,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.11,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-ram20150501"
|
||||
version = "1.2.0"
|
||||
description = "Alibaba Cloud Resource Access Management (20150501) SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_ram20150501-1.2.0-py3-none-any.whl", hash = "sha256:03a0f2a0259848787c1f74e802b486184a88e04183486bd9398766971e5eb00a"},
|
||||
{file = "alibabacloud_ram20150501-1.2.0.tar.gz", hash = "sha256:6253513c8880769f4fd5b36fedddb362a9ca628ad9ae9c05c0eeacf5fbc95b42"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-endpoint-util = ">=0.0.4,<1.0.0"
|
||||
alibabacloud-openapi-util = ">=0.2.2,<1.0.0"
|
||||
alibabacloud-tea-openapi = ">=0.3.15,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-rds20140815"
|
||||
version = "12.0.0"
|
||||
description = "Alibaba Cloud rds (20140815) SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_rds20140815-12.0.0-py3-none-any.whl", hash = "sha256:0bd7e2018a428d86b1b0681087336e74665b48fc3eb0a13c4f4377ed5eab2b08"},
|
||||
{file = "alibabacloud_rds20140815-12.0.0.tar.gz", hash = "sha256:e7421d94f18a914c0a06b0e7fad0daff557713f1c97d415d463a78c1270e9b98"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-endpoint-util = ">=0.0.4,<1.0.0"
|
||||
alibabacloud-openapi-util = ">=0.2.2,<1.0.0"
|
||||
alibabacloud-tea-openapi = ">=0.3.15,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-sas20181203"
|
||||
version = "6.1.0"
|
||||
description = "Alibaba Cloud Threat Detection (20181203) SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_sas20181203-6.1.0-py3-none-any.whl", hash = "sha256:1ad735332c50c7961be036b17420d56b5ec3b5557e3aea1daa19491e8b75da20"},
|
||||
{file = "alibabacloud_sas20181203-6.1.0.tar.gz", hash = "sha256:e49ffd53e630274a8bf5a8299ca753023ad118510c80f6d9c6fb018b7479bf37"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-endpoint-util = ">=0.0.4,<1.0.0"
|
||||
alibabacloud-openapi-util = ">=0.2.2,<1.0.0"
|
||||
alibabacloud-tea-openapi = ">=0.3.16,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-sls20201230"
|
||||
version = "5.9.0"
|
||||
description = "Alibaba Cloud Log Service (20201230) SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_sls20201230-5.9.0-py3-none-any.whl", hash = "sha256:c4ae14096817a9686af5a0ae2389f1f6a8781e60b9edb8643445250cf15c26f1"},
|
||||
{file = "alibabacloud_sls20201230-5.9.0.tar.gz", hash = "sha256:bea830b64fbc7ed1719ba386ceeefb120f08d705f03eb0e02409dc6f12a291da"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-gateway-sls = ">=0.3.0,<1.0.0"
|
||||
alibabacloud-openapi-util = ">=0.2.2,<1.0.0"
|
||||
alibabacloud-tea-openapi = ">=0.3.16,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-sts20150401"
|
||||
version = "1.1.6"
|
||||
description = "Alibaba Cloud Sts (20150401) SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_sts20150401-1.1.6-py3-none-any.whl", hash = "sha256:627f5ca1f86e19b0bf8ce0e99071a36fb65579fad9256fbee38fdc8d500598e9"},
|
||||
{file = "alibabacloud_sts20150401-1.1.6.tar.gz", hash = "sha256:c2529b41e0e4531e21cb393e4df346e19fd6d54cc6337d1138dbcd2191438d4c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-endpoint-util = ">=0.0.4,<1.0.0"
|
||||
alibabacloud-openapi-util = ">=0.2.2,<1.0.0"
|
||||
alibabacloud-tea-openapi = ">=0.3.15,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-tea"
|
||||
version = "0.4.3"
|
||||
description = "The tea module of alibabaCloud Python SDK."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud-tea-0.4.3.tar.gz", hash = "sha256:ec8053d0aa8d43ebe1deb632d5c5404339b39ec9a18a0707d57765838418504a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
aiohttp = ">=3.7.0,<4.0.0"
|
||||
requests = ">=2.21.0,<3.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-tea-openapi"
|
||||
version = "0.4.1"
|
||||
description = "Alibaba Cloud openapi SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_tea_openapi-0.4.1-py3-none-any.whl", hash = "sha256:e46bfa3ca34086d2c357d217a0b7284ecbd4b3bab5c88e075e73aec637b0e4a0"},
|
||||
{file = "alibabacloud_tea_openapi-0.4.1.tar.gz", hash = "sha256:2384b090870fdb089c3c40f3fb8cf0145b8c7d6c14abbac521f86a01abb5edaf"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-credentials = ">=1.0.2,<2.0.0"
|
||||
alibabacloud-gateway-spi = ">=0.0.2,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
|
||||
cryptography = ">=3.0.0,<45.0.0"
|
||||
darabonba-core = ">=1.0.3,<2.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-tea-util"
|
||||
version = "0.3.14"
|
||||
description = "The tea-util module of alibabaCloud Python SDK."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_tea_util-0.3.14-py3-none-any.whl", hash = "sha256:10d3e5c340d8f7ec69dd27345eb2fc5a1dab07875742525edf07bbe86db93bfe"},
|
||||
{file = "alibabacloud_tea_util-0.3.14.tar.gz", hash = "sha256:708e7c9f64641a3c9e0e566365d2f23675f8d7c2a3e2971d9402ceede0408cdb"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-tea = ">=0.3.3"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-tea-xml"
|
||||
version = "0.0.3"
|
||||
description = "The tea-xml module of alibabaCloud Python SDK."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_tea_xml-0.0.3.tar.gz", hash = "sha256:979cb51fadf43de77f41c69fc69c12529728919f849723eb0cd24eb7b048a90c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-tea = ">=0.4.0"
|
||||
|
||||
[[package]]
|
||||
name = "alibabacloud-vpc20160428"
|
||||
version = "6.13.0"
|
||||
description = "Alibaba Cloud Virtual Private Cloud (20160428) SDK Library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "alibabacloud_vpc20160428-6.13.0-py3-none-any.whl", hash = "sha256:933cf1e74322a20a2df27ca6323760d857744a4246eeadc9fb3eae01322fb1c6"},
|
||||
{file = "alibabacloud_vpc20160428-6.13.0.tar.gz", hash = "sha256:daf00679a83d422799f9fcf263739fe1f360641675843cbfbe623833fc8b1681"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud-endpoint-util = ">=0.0.4,<1.0.0"
|
||||
alibabacloud-openapi-util = ">=0.2.2,<1.0.0"
|
||||
alibabacloud-tea-openapi = ">=0.3.16,<1.0.0"
|
||||
alibabacloud-tea-util = ">=0.3.13,<1.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "alive-progress"
|
||||
version = "3.3.0"
|
||||
@@ -650,32 +164,6 @@ files = [
|
||||
about-time = "4.2.1"
|
||||
graphemeu = "0.7.2"
|
||||
|
||||
[[package]]
|
||||
name = "aliyun-log-fastpb"
|
||||
version = "0.2.0"
|
||||
description = "Fast protobuf serialization for Aliyun Log using PyO3 and quick-protobuf"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:51633d92d2b349aed4843c0b503454fb4f7d73eeaaa54f82aa5a36c10c064ef5"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:d2984aafc61ccbbf1db2589ce90b6d5a26e72dba137fb1fdf7f61ce3faa967c0"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:181fc61ac9934f58b0880fa5617a4a4dc709dba09f8be95b5a71e828f2e48053"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:12b8bfddf0bc5450f16f1954c6387a73da124fae10d1205a17a0117e66bb56db"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8fbc83cbaa51d332e5e68871c1200014f1f3de54a8cba4fb55a634ee145cd4e4"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:42a86a6e11dd227d595fa23f69d30588446af19d045d1003bd1b66b5c9a55485"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd92c0b84ba300c1d1c227204c5f2fff243cea80bc3f9399293385e87c82ee3e"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7c07a6d81a3eab6666949240da305236ed2350c305154d7e39fcc121fc52291"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2cff4fbdd0edff94adcee1dcabf16daacb5d336a12fc897887aa6e4f0ad25152"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5a451809e2a062accbb8dae8750e507e58806e4a8da48d69215cdeef428e9d63"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:61f09df30232f1f5628d13310cf0e175171399ea1c75a8470e9f9d97b045bfb5"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:a5fbf0d41d8c0c964a3dc8dd0ee2e732f876b803e0ed3432550ef3b84dde84f1"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ae2f84ed0777e00045791044a56413f370afbd5b061505f5ded540c04b19c58e"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-win32.whl", hash = "sha256:967f9656c805602fd9be07d8c2756ad89204c852c99689c3c71aa035416ef42a"},
|
||||
{file = "aliyun_log_fastpb-0.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:bbdcf7b85f0f3437c2a8e8a1db0ef5584d21468b7c7a358269a4c651c84f4a54"},
|
||||
{file = "aliyun_log_fastpb-0.2.0.tar.gz", hash = "sha256:91c714e76fb941c9a0db6b1aa1f4c56cb1626254ff5444c1179860f5e5b63d93"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "amqp"
|
||||
version = "5.3.1"
|
||||
@@ -723,34 +211,6 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
|
||||
[package.extras]
|
||||
trio = ["trio (>=0.26.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "apscheduler"
|
||||
version = "3.11.1"
|
||||
description = "In-process task scheduler with Cron-like capabilities"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "apscheduler-3.11.1-py3-none-any.whl", hash = "sha256:6162cb5683cb09923654fa9bdd3130c4be4bfda6ad8990971c9597ecd52965d2"},
|
||||
{file = "apscheduler-3.11.1.tar.gz", hash = "sha256:0db77af6400c84d1747fe98a04b8b58f0080c77d11d338c4f507a9752880f221"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
tzlocal = ">=3.0"
|
||||
|
||||
[package.extras]
|
||||
doc = ["packaging", "sphinx", "sphinx-rtd-theme (>=1.3.0)"]
|
||||
etcd = ["etcd3", "protobuf (<=3.21.0)"]
|
||||
gevent = ["gevent"]
|
||||
mongodb = ["pymongo (>=3.0)"]
|
||||
redis = ["redis (>=3.0)"]
|
||||
rethinkdb = ["rethinkdb (>=2.4.0)"]
|
||||
sqlalchemy = ["sqlalchemy (>=1.4)"]
|
||||
test = ["APScheduler[etcd,mongodb,redis,rethinkdb,sqlalchemy,tornado,zookeeper]", "PySide6 ; platform_python_implementation == \"CPython\" and python_version < \"3.14\"", "anyio (>=4.5.2)", "gevent ; python_version < \"3.14\"", "pytest", "pytz", "twisted ; python_version < \"3.14\""]
|
||||
tornado = ["tornado (>=4.3)"]
|
||||
twisted = ["twisted"]
|
||||
zookeeper = ["kazoo"]
|
||||
|
||||
[[package]]
|
||||
name = "asgiref"
|
||||
version = "3.9.1"
|
||||
@@ -2068,22 +1528,6 @@ files = [
|
||||
docs = ["ipython", "matplotlib", "numpydoc", "sphinx"]
|
||||
tests = ["pytest", "pytest-cov", "pytest-xdist"]
|
||||
|
||||
[[package]]
|
||||
name = "darabonba-core"
|
||||
version = "1.0.5"
|
||||
description = "The darabonba module of alibabaCloud Python SDK."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "darabonba_core-1.0.5-py3-none-any.whl", hash = "sha256:671ab8dbc4edc2a8f88013da71646839bb8914f1259efc069353243ef52ea27c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
aiohttp = ">=3.7.0,<4.0.0"
|
||||
alibabacloud-tea = "*"
|
||||
requests = ">=2.21.0,<3.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "dash"
|
||||
version = "3.1.1"
|
||||
@@ -4039,78 +3483,6 @@ html5 = ["html5lib"]
|
||||
htmlsoup = ["BeautifulSoup4"]
|
||||
source = ["Cython (>=3.0.11,<3.1.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "lz4"
|
||||
version = "4.4.5"
|
||||
description = "LZ4 Bindings for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "lz4-4.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d221fa421b389ab2345640a508db57da36947a437dfe31aeddb8d5c7b646c22d"},
|
||||
{file = "lz4-4.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7dc1e1e2dbd872f8fae529acd5e4839efd0b141eaa8ae7ce835a9fe80fbad89f"},
|
||||
{file = "lz4-4.4.5-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e928ec2d84dc8d13285b4a9288fd6246c5cde4f5f935b479f50d986911f085e3"},
|
||||
{file = "lz4-4.4.5-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:daffa4807ef54b927451208f5f85750c545a4abbff03d740835fc444cd97f758"},
|
||||
{file = "lz4-4.4.5-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2a2b7504d2dffed3fd19d4085fe1cc30cf221263fd01030819bdd8d2bb101cf1"},
|
||||
{file = "lz4-4.4.5-cp310-cp310-win32.whl", hash = "sha256:0846e6e78f374156ccf21c631de80967e03cc3c01c373c665789dc0c5431e7fc"},
|
||||
{file = "lz4-4.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:7c4e7c44b6a31de77d4dc9772b7d2561937c9588a734681f70ec547cfbc51ecd"},
|
||||
{file = "lz4-4.4.5-cp310-cp310-win_arm64.whl", hash = "sha256:15551280f5656d2206b9b43262799c89b25a25460416ec554075a8dc568e4397"},
|
||||
{file = "lz4-4.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d6da84a26b3aa5da13a62e4b89ab36a396e9327de8cd48b436a3467077f8ccd4"},
|
||||
{file = "lz4-4.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61d0ee03e6c616f4a8b69987d03d514e8896c8b1b7cc7598ad029e5c6aedfd43"},
|
||||
{file = "lz4-4.4.5-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:33dd86cea8375d8e5dd001e41f321d0a4b1eb7985f39be1b6a4f466cd480b8a7"},
|
||||
{file = "lz4-4.4.5-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:609a69c68e7cfcfa9d894dc06be13f2e00761485b62df4e2472f1b66f7b405fb"},
|
||||
{file = "lz4-4.4.5-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:75419bb1a559af00250b8f1360d508444e80ed4b26d9d40ec5b09fe7875cb989"},
|
||||
{file = "lz4-4.4.5-cp311-cp311-win32.whl", hash = "sha256:12233624f1bc2cebc414f9efb3113a03e89acce3ab6f72035577bc61b270d24d"},
|
||||
{file = "lz4-4.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:8a842ead8ca7c0ee2f396ca5d878c4c40439a527ebad2b996b0444f0074ed004"},
|
||||
{file = "lz4-4.4.5-cp311-cp311-win_arm64.whl", hash = "sha256:83bc23ef65b6ae44f3287c38cbf82c269e2e96a26e560aa551735883388dcc4b"},
|
||||
{file = "lz4-4.4.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:df5aa4cead2044bab83e0ebae56e0944cc7fcc1505c7787e9e1057d6d549897e"},
|
||||
{file = "lz4-4.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6d0bf51e7745484d2092b3a51ae6eb58c3bd3ce0300cf2b2c14f76c536d5697a"},
|
||||
{file = "lz4-4.4.5-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7b62f94b523c251cf32aa4ab555f14d39bd1a9df385b72443fd76d7c7fb051f5"},
|
||||
{file = "lz4-4.4.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2c3ea562c3af274264444819ae9b14dbbf1ab070aff214a05e97db6896c7597e"},
|
||||
{file = "lz4-4.4.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:24092635f47538b392c4eaeff14c7270d2c8e806bf4be2a6446a378591c5e69e"},
|
||||
{file = "lz4-4.4.5-cp312-cp312-win32.whl", hash = "sha256:214e37cfe270948ea7eb777229e211c601a3e0875541c1035ab408fbceaddf50"},
|
||||
{file = "lz4-4.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:713a777de88a73425cf08eb11f742cd2c98628e79a8673d6a52e3c5f0c116f33"},
|
||||
{file = "lz4-4.4.5-cp312-cp312-win_arm64.whl", hash = "sha256:a88cbb729cc333334ccfb52f070463c21560fca63afcf636a9f160a55fac3301"},
|
||||
{file = "lz4-4.4.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6bb05416444fafea170b07181bc70640975ecc2a8c92b3b658c554119519716c"},
|
||||
{file = "lz4-4.4.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b424df1076e40d4e884cfcc4c77d815368b7fb9ebcd7e634f937725cd9a8a72a"},
|
||||
{file = "lz4-4.4.5-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:216ca0c6c90719731c64f41cfbd6f27a736d7e50a10b70fad2a9c9b262ec923d"},
|
||||
{file = "lz4-4.4.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:533298d208b58b651662dd972f52d807d48915176e5b032fb4f8c3b6f5fe535c"},
|
||||
{file = "lz4-4.4.5-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:451039b609b9a88a934800b5fc6ee401c89ad9c175abf2f4d9f8b2e4ef1afc64"},
|
||||
{file = "lz4-4.4.5-cp313-cp313-win32.whl", hash = "sha256:a5f197ffa6fc0e93207b0af71b302e0a2f6f29982e5de0fbda61606dd3a55832"},
|
||||
{file = "lz4-4.4.5-cp313-cp313-win_amd64.whl", hash = "sha256:da68497f78953017deb20edff0dba95641cc86e7423dfadf7c0264e1ac60dc22"},
|
||||
{file = "lz4-4.4.5-cp313-cp313-win_arm64.whl", hash = "sha256:c1cfa663468a189dab510ab231aad030970593f997746d7a324d40104db0d0a9"},
|
||||
{file = "lz4-4.4.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:67531da3b62f49c939e09d56492baf397175ff39926d0bd5bd2d191ac2bff95f"},
|
||||
{file = "lz4-4.4.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a1acbbba9edbcbb982bc2cac5e7108f0f553aebac1040fbec67a011a45afa1ba"},
|
||||
{file = "lz4-4.4.5-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a482eecc0b7829c89b498fda883dbd50e98153a116de612ee7c111c8bcf82d1d"},
|
||||
{file = "lz4-4.4.5-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e099ddfaa88f59dd8d36c8a3c66bd982b4984edf127eb18e30bb49bdba68ce67"},
|
||||
{file = "lz4-4.4.5-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a2af2897333b421360fdcce895c6f6281dc3fab018d19d341cf64d043fc8d90d"},
|
||||
{file = "lz4-4.4.5-cp313-cp313t-win32.whl", hash = "sha256:66c5de72bf4988e1b284ebdd6524c4bead2c507a2d7f172201572bac6f593901"},
|
||||
{file = "lz4-4.4.5-cp313-cp313t-win_amd64.whl", hash = "sha256:cdd4bdcbaf35056086d910d219106f6a04e1ab0daa40ec0eeef1626c27d0fddb"},
|
||||
{file = "lz4-4.4.5-cp313-cp313t-win_arm64.whl", hash = "sha256:28ccaeb7c5222454cd5f60fcd152564205bcb801bd80e125949d2dfbadc76bbd"},
|
||||
{file = "lz4-4.4.5-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c216b6d5275fc060c6280936bb3bb0e0be6126afb08abccde27eed23dead135f"},
|
||||
{file = "lz4-4.4.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c8e71b14938082ebaf78144f3b3917ac715f72d14c076f384a4c062df96f9df6"},
|
||||
{file = "lz4-4.4.5-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9b5e6abca8df9f9bdc5c3085f33ff32cdc86ed04c65e0355506d46a5ac19b6e9"},
|
||||
{file = "lz4-4.4.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3b84a42da86e8ad8537aabef062e7f661f4a877d1c74d65606c49d835d36d668"},
|
||||
{file = "lz4-4.4.5-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bba042ec5a61fa77c7e380351a61cb768277801240249841defd2ff0a10742f"},
|
||||
{file = "lz4-4.4.5-cp314-cp314-win32.whl", hash = "sha256:bd85d118316b53ed73956435bee1997bd06cc66dd2fa74073e3b1322bd520a67"},
|
||||
{file = "lz4-4.4.5-cp314-cp314-win_amd64.whl", hash = "sha256:92159782a4502858a21e0079d77cdcaade23e8a5d252ddf46b0652604300d7be"},
|
||||
{file = "lz4-4.4.5-cp314-cp314-win_arm64.whl", hash = "sha256:d994b87abaa7a88ceb7a37c90f547b8284ff9da694e6afcfaa8568d739faf3f7"},
|
||||
{file = "lz4-4.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f6538aaaedd091d6e5abdaa19b99e6e82697d67518f114721b5248709b639fad"},
|
||||
{file = "lz4-4.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:13254bd78fef50105872989a2dc3418ff09aefc7d0765528adc21646a7288294"},
|
||||
{file = "lz4-4.4.5-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e64e61f29cf95afb43549063d8433b46352baf0c8a70aa45e2585618fcf59d86"},
|
||||
{file = "lz4-4.4.5-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ff1b50aeeec64df5603f17984e4b5be6166058dcf8f1e26a3da40d7a0f6ab547"},
|
||||
{file = "lz4-4.4.5-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1dd4d91d25937c2441b9fc0f4af01704a2d09f30a38c5798bc1d1b5a15ec9581"},
|
||||
{file = "lz4-4.4.5-cp39-cp39-win32.whl", hash = "sha256:d64141085864918392c3159cdad15b102a620a67975c786777874e1e90ef15ce"},
|
||||
{file = "lz4-4.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:f32b9e65d70f3684532358255dc053f143835c5f5991e28a5ac4c93ce94b9ea7"},
|
||||
{file = "lz4-4.4.5-cp39-cp39-win_arm64.whl", hash = "sha256:f9b8bde9909a010c75b3aea58ec3910393b758f3c219beed67063693df854db0"},
|
||||
{file = "lz4-4.4.5.tar.gz", hash = "sha256:5f0b9e53c1e82e88c10d7c180069363980136b9d7a8306c4dca4f760d60c39f0"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (>=1.6.0)", "sphinx_bootstrap_theme"]
|
||||
flake8 = ["flake8"]
|
||||
tests = ["psutil", "pytest (!=3.3.0)", "pytest-cov"]
|
||||
|
||||
[[package]]
|
||||
name = "markdown"
|
||||
version = "3.9"
|
||||
@@ -5408,7 +4780,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "prowler"
|
||||
version = "5.16.0"
|
||||
version = "5.14.0"
|
||||
description = "Prowler is an Open Source security tool to perform AWS, GCP and Azure security best practices assessments, audits, incident response, continuous monitoring, hardening and forensics readiness. It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, FedRAMP, PCI-DSS, GDPR, HIPAA, FFIEC, SOC2, GXP, AWS Well-Architected Framework Security Pillar, AWS Foundational Technical Review (FTR), ENS (Spanish National Security Scheme) and your custom security frameworks."
|
||||
optional = false
|
||||
python-versions = ">3.9.1,<3.13"
|
||||
@@ -5417,19 +4789,6 @@ files = []
|
||||
develop = false
|
||||
|
||||
[package.dependencies]
|
||||
alibabacloud_actiontrail20200706 = "2.4.1"
|
||||
alibabacloud_credentials = "1.0.3"
|
||||
alibabacloud_cs20151215 = "6.1.0"
|
||||
alibabacloud_ecs20140526 = "7.2.5"
|
||||
alibabacloud-gateway-oss-util = "0.0.3"
|
||||
alibabacloud_oss20190517 = "1.0.6"
|
||||
alibabacloud_ram20150501 = "1.2.0"
|
||||
alibabacloud-rds20140815 = "12.0.0"
|
||||
alibabacloud_sas20181203 = "6.1.0"
|
||||
alibabacloud-sls20201230 = "5.9.0"
|
||||
alibabacloud_sts20150401 = "1.1.6"
|
||||
alibabacloud_tea_openapi = "0.4.1"
|
||||
alibabacloud_vpc20160428 = "6.13.0"
|
||||
alive-progress = "3.3.0"
|
||||
awsipranges = "0.3.3"
|
||||
azure-identity = "1.21.0"
|
||||
@@ -5493,8 +4852,8 @@ tzlocal = "5.3.1"
|
||||
[package.source]
|
||||
type = "git"
|
||||
url = "https://github.com/prowler-cloud/prowler.git"
|
||||
reference = "v5.16"
|
||||
resolved_reference = "f0e59bcb13383d7bb1aa9804906ac99aed820a09"
|
||||
reference = "master"
|
||||
resolved_reference = "de5aba6d4db54eed4c95cb7629443da186c17afd"
|
||||
|
||||
[[package]]
|
||||
name = "psutil"
|
||||
@@ -7707,143 +7066,7 @@ docs = ["Sphinx", "furo", "repoze.sphinx.autointerface"]
|
||||
test = ["coverage[toml]", "zope.event", "zope.testing"]
|
||||
testing = ["coverage[toml]", "zope.event", "zope.testing"]
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
version = "1.5.7.2"
|
||||
description = "ZSTD Bindings for Python"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "zstd-1.5.7.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:e17104d0e88367a7571dde4286e233126c8551691ceff11f9ae2e3a3ac1bb483"},
|
||||
{file = "zstd-1.5.7.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:d6ee5dfada4c8fa32f43cc092fcf7d8482da6ad242c22fdf780f7eebd0febcc7"},
|
||||
{file = "zstd-1.5.7.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:ae1100776cb400100e2d2f427b50dc983c005c38cd59502eb56d2cfea3402ad5"},
|
||||
{file = "zstd-1.5.7.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:489a0ff15caf7640851e63f85b680c4279c99094cd500a29c7ed3ab82505fce0"},
|
||||
{file = "zstd-1.5.7.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:92590cf54318849d492445c885f1a42b9dbb47cdc070659c7cb61df6e8531047"},
|
||||
{file = "zstd-1.5.7.2-cp27-cp27mu-manylinux_2_4_i686.whl", hash = "sha256:2bc21650f7b9c058a3c4cb503e906fe9cce293941ec1b48bc5d005c3b4422b42"},
|
||||
{file = "zstd-1.5.7.2-cp27-cp27mu-manylinux_2_4_x86_64.whl", hash = "sha256:7b13e7eef9aa192804d38bf413924d347c6f6c6ac07f5a0c1ae4a6d7b3af70f0"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d3f14c5c405ea353b68fe105236780494eb67c756ecd346fd295498f5eab6d24"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07d2061df22a3efc06453089e6e8b96e58f5bb7a0c4074dcfd0b0ce243ddde72"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:27e55aa2043ba7d8a08aba0978c652d4d5857338a8188aa84522569f3586c7bb"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8e97933addfd71ea9608306f18dc18e7d2a5e64212ba2bb9a4ccb6d714f9f280"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-manylinux_2_4_i686.whl", hash = "sha256:27e2ed58b64001c9ef0a8e028625477f1a6ed4ca949412ff6548544945cc59c2"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-manylinux_2_4_x86_64.whl", hash = "sha256:92f072819fc0c7e8445f51a232c9ad76642027c069d2f36470cdb5e663839cdb"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:2a653cdd2c52d60c28e519d44bde8d759f2c1837f0ff8e8e1b0045ca62fcf70e"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:047803d87d910f4905f48d99aeff1e0539ec2e4f4bf17d077701b5d0b2392a95"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0d8c1dc947e5ccea3bd81043080213685faf1d43886c27c51851fabf325f05c0"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8291d393321fac30604c6bbf40067103fee315aa476647a5eaecf877ee53496f"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-win32.whl", hash = "sha256:6922ceac5f2d60bb57a7875168c8aa442477b83e8951f2206cf1e9be788b0a6e"},
|
||||
{file = "zstd-1.5.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:346d1e4774d89a77d67fc70d53964bfca57c0abecfd885a4e00f87fd7c71e074"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f799c1e9900ad77e7a3d994b9b5146d7cfd1cbd1b61c3db53a697bf21ffcc57b"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1ff4c667f29101566a7b71f06bbd677a63192818396003354131f586383db042"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8526a32fa9f67b07fd09e62474e345f8ca1daf3e37a41137643d45bd1bc90773"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:2cec2472760d48a7a3445beaba509d3f7850e200fed65db15a1a66e315baec6a"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-manylinux_2_4_i686.whl", hash = "sha256:a200c479ee1bb661bc45518e016a1fdc215a1d8f7e4bf6c7de0af254976cfdf6"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-manylinux_2_4_x86_64.whl", hash = "sha256:f5d159e57a13147aa8293c0f14803a75e9039fd8afdf6cf1c8c2289fb4d2333a"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:7206934a2bd390080e972a1fed5a897e184dfd71dbb54e978dc11c6b295e1806"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7e0027b20f296d1c9a8e85b8436834cf46560240a29d623aa8eaa8911832eb58"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d6b17e5581dd1a13437079bd62838d2635db8eb8aca9c0e9251faa5d4d40a6d7"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b13285c99cc710f60dd270785ec75233018870a1831f5655d862745470a0ca29"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-win32.whl", hash = "sha256:cdb5ec80da299f63f8aeccec0bff3247e96252d4c8442876363ff1b438d8049b"},
|
||||
{file = "zstd-1.5.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:4f6861c8edceb25fda37cdaf422fc5f15dcc88ced37c6a5b3c9011eda51aa218"},
|
||||
{file = "zstd-1.5.7.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2ebe3e60dbace52525fa7aa604479e231dc3e4fcc76d0b4c54d8abce5e58734"},
|
||||
{file = "zstd-1.5.7.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ef201b6f7d3a6751d85cc52f9e6198d4d870e83d490172016b64a6dd654a9583"},
|
||||
{file = "zstd-1.5.7.2-cp312-cp312-manylinux_2_14_x86_64.whl", hash = "sha256:ac7bdfedda51b1fcdcf0ab69267d01256fc97ddf666ce894fde0fae9f3630eac"},
|
||||
{file = "zstd-1.5.7.2-cp312-cp312-manylinux_2_4_i686.whl", hash = "sha256:b835405cc4080b378e45029f2fe500e408d1eaedfba7dd7402aba27af16955f9"},
|
||||
{file = "zstd-1.5.7.2-cp312-cp312-win32.whl", hash = "sha256:e4cf97bb97ed6dbb62d139d68fd42fa1af51fd26fd178c501f7b62040e897c50"},
|
||||
{file = "zstd-1.5.7.2-cp312-cp312-win_amd64.whl", hash = "sha256:55e2edc4560a5cf8ee9908595e90a15b1f47536ea9aad4b2889f0e6165890a38"},
|
||||
{file = "zstd-1.5.7.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6e684e27064b6550aa2e7dc85d171ea1b62cb5930a2c99b3df9b30bf620b5c06"},
|
||||
{file = "zstd-1.5.7.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:fd6262788a98807d6b2befd065d127db177c1cd76bb8e536e0dded419eb7c7fb"},
|
||||
{file = "zstd-1.5.7.2-cp313-cp313-manylinux_2_14_x86_64.whl", hash = "sha256:53948be45f286a1b25c07a6aa2aca5c902208eb3df9fe36cf891efa0394c8b71"},
|
||||
{file = "zstd-1.5.7.2-cp313-cp313-win32.whl", hash = "sha256:edf816c218e5978033b7bb47dcb453dfb71038cb8a9bf4877f3f823e74d58174"},
|
||||
{file = "zstd-1.5.7.2-cp313-cp313-win_amd64.whl", hash = "sha256:eea9bddf06f3f5e1e450fd647665c86df048a45e8b956d53522387c1dff41b7a"},
|
||||
{file = "zstd-1.5.7.2-cp313-cp313t-manylinux_2_14_x86_64.whl", hash = "sha256:1d71f9f92b3abe18b06b5f0aefa5b9c42112beef3bff27e36028d147cb4426a6"},
|
||||
{file = "zstd-1.5.7.2-cp314-cp314-manylinux_2_14_x86_64.whl", hash = "sha256:a6105b8fa21dbc59e05b6113e8e5d5aaf56c5d2886aa5778d61030af3256bbb7"},
|
||||
{file = "zstd-1.5.7.2-cp314-cp314t-manylinux_2_14_x86_64.whl", hash = "sha256:d0b0ca097efb5f67157c61a744c926848dcccf6e913df2f814e719aa78197a4b"},
|
||||
{file = "zstd-1.5.7.2-cp34-cp34m-manylinux_2_4_i686.whl", hash = "sha256:a371274668182ae06be2e321089b207fa0a75a58ae2fd4dfb7eafded9e041b2f"},
|
||||
{file = "zstd-1.5.7.2-cp34-cp34m-manylinux_2_4_x86_64.whl", hash = "sha256:74c3f006c9a3a191ed454183f0fb78172444f5cb431be04d85044a27f1b58c7b"},
|
||||
{file = "zstd-1.5.7.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:f19a3e658d92b6b52020c4c6d4c159480bcd3b47658773ea0e8d343cee849f33"},
|
||||
{file = "zstd-1.5.7.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:d9d1bcb6441841c599883139c1b0e47bddb262cce04b37dc2c817da5802c1158"},
|
||||
{file = "zstd-1.5.7.2-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:bb1cb423fc40468cc9b7ab51a5b33c618eefd2c910a5bffed6ed76fe1cbb20b0"},
|
||||
{file = "zstd-1.5.7.2-cp35-cp35m-manylinux_2_14_x86_64.whl", hash = "sha256:e2476ba12597e58c5fc7a3ae547ee1bef9dd6b9d5ea80cf8d4034930c5a336e0"},
|
||||
{file = "zstd-1.5.7.2-cp35-cp35m-manylinux_2_4_i686.whl", hash = "sha256:2bf6447373782a2a9df3015121715f6d0b80a49a884c2d7d4518c9571e9fca16"},
|
||||
{file = "zstd-1.5.7.2-cp35-cp35m-win32.whl", hash = "sha256:a59a136a9eaa1849d715c004e30344177e85ad6e7bc4a5d0b6ad2495c5402675"},
|
||||
{file = "zstd-1.5.7.2-cp35-cp35m-win_amd64.whl", hash = "sha256:114115af8c68772a3205414597f626b604c7879f6662a2a79c88312e0f50361f"},
|
||||
{file = "zstd-1.5.7.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f576ec00e99db124309dac1e1f34bc320eb69624189f5fdaf9ebe1dc81581a84"},
|
||||
{file = "zstd-1.5.7.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:f97d8593da0e23a47f148a1cb33300dccd513fb0df9f7911c274e228a8c1a300"},
|
||||
{file = "zstd-1.5.7.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:a130243e875de5aeda6099d12b11bc2fcf548dce618cf6b17f731336ba5338e4"},
|
||||
{file = "zstd-1.5.7.2-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:73cec37649fda383348dc8b3b5fba535f1dbb1bbaeb60fd36f4c145820208619"},
|
||||
{file = "zstd-1.5.7.2-cp36-cp36m-manylinux_2_14_x86_64.whl", hash = "sha256:883e7b77a3124011b8badd0c7c9402af3884700a3431d07877972e157d85afb8"},
|
||||
{file = "zstd-1.5.7.2-cp36-cp36m-manylinux_2_4_i686.whl", hash = "sha256:b5af6aa041b5515934afef2ef4af08566850875c3c890109088eedbe190eeefb"},
|
||||
{file = "zstd-1.5.7.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:53abf577aec7b30afa3c024143f4866676397c846b44f1b30d8097b5e4f5c7d7"},
|
||||
{file = "zstd-1.5.7.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:660945ba16c16957c94dafc40aff1db02a57af0489aa3a896866239d47bb44b0"},
|
||||
{file = "zstd-1.5.7.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:3e220d2d7005822bb72a52e76410ca4634f941d8062c08e8e3285733c63b1db7"},
|
||||
{file = "zstd-1.5.7.2-cp37-cp37m-manylinux_2_4_i686.whl", hash = "sha256:7e998f86a9d1e576c0158bf0b0a6a5c4685679d74ba0053a2e87f684f9bdc8eb"},
|
||||
{file = "zstd-1.5.7.2-cp37-cp37m-manylinux_2_4_x86_64.whl", hash = "sha256:70d0c4324549073e05aa72e9eb6a593f89cba59da804b946d325d68467b93ad5"},
|
||||
{file = "zstd-1.5.7.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:b9518caabf59405eddd667bbb161d9ae7f13dbf96967fd998d095589c8d41c86"},
|
||||
{file = "zstd-1.5.7.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:30d339d8e5c4b14c2015b50371fcdb8a93b451ca6d3ef813269ccbb8b3b3ef7d"},
|
||||
{file = "zstd-1.5.7.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:6f5539a10b838ee576084870eed65b63c13845e30a5b552cfe40f7e6b621e61a"},
|
||||
{file = "zstd-1.5.7.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:5540ce1c99fa0b59dad2eff771deb33872754000da875be50ac8c2beab42b433"},
|
||||
{file = "zstd-1.5.7.2-cp37-cp37m-win32.whl", hash = "sha256:56c4b8cd0a88fd721213661c28b87b64fbd14b6019df39b21b0117a68162b0f2"},
|
||||
{file = "zstd-1.5.7.2-cp37-cp37m-win_amd64.whl", hash = "sha256:594f256fa72852ade60e3acb909f983d5cf6839b9fc79728dd4b48b31112058f"},
|
||||
{file = "zstd-1.5.7.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9dc05618eb0abceb296b77e5f608669c12abc69cbf447d08151bcb14d290ab07"},
|
||||
{file = "zstd-1.5.7.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:70231ba799d681b6fc17456c3e39895c493b5dff400aa7842166322a952b7f2a"},
|
||||
{file = "zstd-1.5.7.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5a73f0f20f71d4eef970a3fed7baac64d9a2a00b238acc4eca2bd7172bd7effb"},
|
||||
{file = "zstd-1.5.7.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0a470f8938f69f632b8f88b96578a5e8825c18ddbbea7de63493f74874f963ef"},
|
||||
{file = "zstd-1.5.7.2-cp38-cp38-manylinux_2_4_i686.whl", hash = "sha256:d104f1cb2a7c142007c29a2a62dfe633155c648317a465674e583c295e5f792d"},
|
||||
{file = "zstd-1.5.7.2-cp38-cp38-manylinux_2_4_x86_64.whl", hash = "sha256:70f29e0504fc511d4b9f921e69637fca79c050e618ba23732a3f75c044814d89"},
|
||||
{file = "zstd-1.5.7.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:a62c2f6f7b8fc69767392084828740bd6faf35ff54d4ccb2e90e199327c64140"},
|
||||
{file = "zstd-1.5.7.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f2dda0c76f87723fb7f75d7ad3bbd90f7fb47b75051978d22535099325111b41"},
|
||||
{file = "zstd-1.5.7.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f9cf09c2aa6f67750fe9f33fdd122f021b1a23bf7326064a8e21f7af7e77faee"},
|
||||
{file = "zstd-1.5.7.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:910bd9eac2488439f597504756b03c74aa63ed71b21e5d0aa2c7e249b3f1c13f"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9838ec7eb9f1beb2f611b9bcac7a169cb3de708ccf779aead29787e4482fe232"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:83a36bb1fd574422a77b36ccf3315ab687aef9a802b0c3312ca7006b74eeb109"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:6f8189bc58415758bbbd419695012194f5e5e22c34553712d9a3eb009c09808d"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:632e3c1b7e1ebb0580f6d92b781a8f7901d367cf72725d5642e6d3a32e404e45"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-manylinux_2_4_i686.whl", hash = "sha256:df8083c40fdbfe970324f743f0b5ecc244c37736e5f3ad2670de61dde5e0b024"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-manylinux_2_4_x86_64.whl", hash = "sha256:300db1ede4d10f8b9b3b99ca52b22f0e2303dc4f1cf6994d1f8345ce22dd5a7e"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:97b908ccb385047b0c020ce3dc55e6f51078c9790722fdb3620c076be4a69ecf"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c59218bd36a7431a40591504f299de836ea0d63bc68ea76d58c4cf5262f0fa3c"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4d5a85344193ec967d05da8e2c10aed400e2d83e16041d2fdfb713cfc8caceeb"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ebf6c1d7f0ceb0af5a383d2a1edc8ab9ace655e62a41c8a4ed5a031ee2ef8006"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-win32.whl", hash = "sha256:44a5142123d59a0dbbd9ba9720c23521be57edbc24202223a5e17405c3bdd4a6"},
|
||||
{file = "zstd-1.5.7.2-cp39-cp39-win_amd64.whl", hash = "sha256:8dc542a9818712a9fb37563fa88cdbbbb2b5f8733111d412b718fa602b83ba45"},
|
||||
{file = "zstd-1.5.7.2-pp27-pypy_73-manylinux1_x86_64.whl", hash = "sha256:24371a7b0475eef7d933c72067d363c5dc17282d2aa5d4f5837774378718509e"},
|
||||
{file = "zstd-1.5.7.2-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:c21d44981b068551f13097be3809fadb7f81617d0c21b2c28a7d04653dde958f"},
|
||||
{file = "zstd-1.5.7.2-pp27-pypy_73-manylinux_2_14_x86_64.whl", hash = "sha256:b011bf4cfad78cdf9116d6731234ff181deb9560645ffdcc8d54861ae5d1edfc"},
|
||||
{file = "zstd-1.5.7.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:426e5c6b7b3e2401b734bfd08050b071e17c15df5e3b31e63651d1fd9ba4c751"},
|
||||
{file = "zstd-1.5.7.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:53375b23f2f39359ade944169bbd88f8895eed91290ee608ccbc28810ac360ba"},
|
||||
{file = "zstd-1.5.7.2-pp310-pypy310_pp73-manylinux_2_14_x86_64.whl", hash = "sha256:1b301b2f9dbb0e848093127fb10cbe6334a697dc3aea6740f0bb726450ee9a34"},
|
||||
{file = "zstd-1.5.7.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5414c9ae27069ab3ec8420fe8d005cb1b227806cbc874a7b4c73a96b4697a633"},
|
||||
{file = "zstd-1.5.7.2-pp311-pypy311_pp73-manylinux_2_14_x86_64.whl", hash = "sha256:5fb2ff5718fe89181223c23ce7308bd0b4a427239379e2566294da805d8df68a"},
|
||||
{file = "zstd-1.5.7.2-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:9714d5642867fceb22e4ab74aebf81a2e62dc9206184d603cb39277b752d5885"},
|
||||
{file = "zstd-1.5.7.2-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:6584fd081a6e7d92dffa8e7373d1fced6b3cbf473154b82c17a99438c5e1de51"},
|
||||
{file = "zstd-1.5.7.2-pp36-pypy36_pp73-manylinux_2_14_x86_64.whl", hash = "sha256:52f27a198e2a72632bae12ec63ebaa31b10e3d5f3dd3df2e01376979b168e2e6"},
|
||||
{file = "zstd-1.5.7.2-pp36-pypy36_pp73-win32.whl", hash = "sha256:3b14793d2a2cb3a7ddd1cf083321b662dd20bc11143abc719456e9bfd22a32aa"},
|
||||
{file = "zstd-1.5.7.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:faf3fd38ba26167c5a085c04b8c931a216f1baf072709db7a38e61dea52e316e"},
|
||||
{file = "zstd-1.5.7.2-pp37-pypy37_pp73-manylinux_2_14_x86_64.whl", hash = "sha256:d17ac6d2584168247796174e599d4adbee00153246287e68881efaf8d48a6970"},
|
||||
{file = "zstd-1.5.7.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:9a24d492c63555b55e6bc73a9e82a38bf7c3e8f7cde600f079210ed19cb061f2"},
|
||||
{file = "zstd-1.5.7.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c6abf4ab9a9d1feb14bc3cbcc32d723d340ce43b79b1812805916f3ac069b073"},
|
||||
{file = "zstd-1.5.7.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:d7131bb4e55d075cb7847555a1e17fca5b816a550c9b9ac260c01799b6f8e8d9"},
|
||||
{file = "zstd-1.5.7.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:a03608499794148f39c932c508d4eb3622e79ca2411b1d0438a2ee8cafdc0111"},
|
||||
{file = "zstd-1.5.7.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:86e64c71b4d00bf28be50e4941586e7874bdfa74858274d9f7571dd5dda92086"},
|
||||
{file = "zstd-1.5.7.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:0f79492bf86aef6e594b11e29c5589ddd13253db3ada0c7a14fb176b132fb65e"},
|
||||
{file = "zstd-1.5.7.2-pp38-pypy38_pp73-manylinux_2_14_x86_64.whl", hash = "sha256:8c3f4bb8508bc54c00532931da4a5261f08493363da14a5526c986765973e35d"},
|
||||
{file = "zstd-1.5.7.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:787bcf55cefc08d27aca34c6dcaae1a24940963d1a73d4cec894ee458c541ac4"},
|
||||
{file = "zstd-1.5.7.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0f97f872cb78a4fd60b6c1024a65a4c52a971e9d991f33c7acd833ee73050f85"},
|
||||
{file = "zstd-1.5.7.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:5e530b75452fdcff4ea67268d9e7cb37a38e7abbac84fa845205f0b36da81aaf"},
|
||||
{file = "zstd-1.5.7.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7c1cc65fc2789dd97a98202df840537de186ed04fd1804a17fcb15d1232442c4"},
|
||||
{file = "zstd-1.5.7.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:05604a693fa53b60ca083992324b08dafd15a4ac37ac4cffe4b43b9eb93d4440"},
|
||||
{file = "zstd-1.5.7.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:baf4e8b46d8934d4e85373f303eb048c63897fc4191d8ab301a1bbdf30b7a3cc"},
|
||||
{file = "zstd-1.5.7.2-pp39-pypy39_pp73-manylinux_2_14_x86_64.whl", hash = "sha256:8cc35cc25e2d4a0f68020f05cba96912a2881ebaca890d990abe37aa3aa27045"},
|
||||
{file = "zstd-1.5.7.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:ceae57e369e1b821b8f2b4c59bc08acd27d8e4bf9687bfa5211bc4cdb080fe7b"},
|
||||
{file = "zstd-1.5.7.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5189fb44c44ab9b6c45f734bd7093a67686193110dc90dcfaf0e3a31b2385f38"},
|
||||
{file = "zstd-1.5.7.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:f51a965871b25911e06d421212f9be7f7bcd3cedc43ea441a8a73fad9952baa0"},
|
||||
{file = "zstd-1.5.7.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:624022851c51dd6d6b31dbfd793347c4bd6339095e8383e2f74faf4f990b04c6"},
|
||||
{file = "zstd-1.5.7.2.tar.gz", hash = "sha256:6d8684c69009be49e1b18ec251a5eb0d7e24f93624990a8a124a1da66a92fc8a"},
|
||||
]
|
||||
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.11,<3.13"
|
||||
content-hash = "c3f69105de7e604d4978c53877203d69c59d22276e8d7c751f4960764a5f926c"
|
||||
content-hash = "77ef098291cb8631565a1ab5027ce33e7fcb5a04883dc7160bf373eac9e1fb49"
|
||||
|
||||
@@ -24,7 +24,7 @@ dependencies = [
|
||||
"drf-spectacular-jsonapi==0.5.1",
|
||||
"gunicorn==23.0.0",
|
||||
"lxml==5.3.2",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@v5.16",
|
||||
"prowler @ git+https://github.com/prowler-cloud/prowler.git@master",
|
||||
"psycopg2-binary==2.9.9",
|
||||
"pytest-celery[redis] (>=1.0.1,<2.0.0)",
|
||||
"sentry-sdk[django] (>=2.20.0,<3.0.0)",
|
||||
@@ -44,7 +44,7 @@ name = "prowler-api"
|
||||
package-mode = false
|
||||
# Needed for the SDK compatibility
|
||||
requires-python = ">=3.11,<3.13"
|
||||
version = "1.17.0"
|
||||
version = "1.16.0"
|
||||
|
||||
[project.scripts]
|
||||
celery = "src.backend.config.settings.celery"
|
||||
|
||||
@@ -26,11 +26,8 @@ class Migration(migrations.Migration):
|
||||
),
|
||||
),
|
||||
(
|
||||
"tenant",
|
||||
models.ForeignKey(
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
to="api.tenant",
|
||||
),
|
||||
"tenant_id",
|
||||
models.UUIDField(db_index=True, editable=False),
|
||||
),
|
||||
(
|
||||
"inserted_at",
|
||||
@@ -59,6 +56,7 @@ class Migration(migrations.Migration):
|
||||
("low", "Low"),
|
||||
("informational", "Informational"),
|
||||
],
|
||||
max_length=50,
|
||||
),
|
||||
),
|
||||
(
|
||||
@@ -84,7 +82,6 @@ class Migration(migrations.Migration):
|
||||
],
|
||||
options={
|
||||
"db_table": "scan_category_summaries",
|
||||
"abstract": False,
|
||||
},
|
||||
),
|
||||
migrations.AddIndex(
|
||||
|
||||
@@ -16,7 +16,6 @@ class Migration(migrations.Migration):
|
||||
blank=True,
|
||||
null=True,
|
||||
size=None,
|
||||
help_text="Categories from check metadata for efficient filtering",
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
@@ -716,19 +716,14 @@ class Resource(RowLevelSecurityProtectedModel):
|
||||
self.clear_tags()
|
||||
return
|
||||
|
||||
# Add new relationships with the tenant_id field; avoid touching the
|
||||
# Resource row unless a mapping is actually created to prevent noisy
|
||||
# updates during scans.
|
||||
mapping_created = False
|
||||
# Add new relationships with the tenant_id field
|
||||
for tag in tags:
|
||||
_, created = ResourceTagMapping.objects.update_or_create(
|
||||
ResourceTagMapping.objects.update_or_create(
|
||||
tag=tag, resource=self, tenant_id=self.tenant_id
|
||||
)
|
||||
mapping_created = mapping_created or created
|
||||
|
||||
if mapping_created:
|
||||
# Only bump updated_at when the tag set truly changed
|
||||
self.save(update_fields=["updated_at"])
|
||||
# Save the instance
|
||||
self.save()
|
||||
|
||||
class Meta(RowLevelSecurityProtectedModel.Meta):
|
||||
db_table = "resources"
|
||||
|
||||
@@ -243,28 +243,15 @@ def _safe_getattr(obj, attr: str, default: str = "N/A") -> str:
|
||||
|
||||
|
||||
def _create_info_table_style() -> TableStyle:
|
||||
"""Create a reusable table style for information/metadata tables.
|
||||
|
||||
ReportLab TableStyle coordinate system:
|
||||
- Format: (COMMAND, (start_col, start_row), (end_col, end_row), value)
|
||||
- Coordinates use (column, row) format, starting at (0, 0) for top-left cell
|
||||
- Negative indices work like Python slicing: -1 means "last row/column"
|
||||
- (0, 0) to (0, -1) = entire first column (all rows)
|
||||
- (0, 0) to (-1, 0) = entire first row (all columns)
|
||||
- (0, 0) to (-1, -1) = entire table
|
||||
- Styles are applied in order; later rules override earlier ones
|
||||
"""
|
||||
"""Create a reusable table style for information/metadata tables."""
|
||||
return TableStyle(
|
||||
[
|
||||
# Column 0 (labels): blue background with white text
|
||||
("BACKGROUND", (0, 0), (0, -1), COLOR_BLUE),
|
||||
("TEXTCOLOR", (0, 0), (0, -1), COLOR_WHITE),
|
||||
("FONTNAME", (0, 0), (0, -1), "FiraCode"),
|
||||
# Column 1 (values): light blue background with gray text
|
||||
("BACKGROUND", (1, 0), (1, -1), COLOR_BG_BLUE),
|
||||
("TEXTCOLOR", (1, 0), (1, -1), COLOR_GRAY),
|
||||
("FONTNAME", (1, 0), (1, -1), "PlusJakartaSans"),
|
||||
# Apply to entire table
|
||||
("ALIGN", (0, 0), (-1, -1), "LEFT"),
|
||||
("VALIGN", (0, 0), (-1, -1), "TOP"),
|
||||
("FONTSIZE", (0, 0), (-1, -1), 11),
|
||||
@@ -278,30 +265,19 @@ def _create_info_table_style() -> TableStyle:
|
||||
|
||||
|
||||
def _create_header_table_style(header_color: colors.Color = None) -> TableStyle:
|
||||
"""Create a reusable table style for tables with headers.
|
||||
|
||||
ReportLab TableStyle coordinate system:
|
||||
- Format: (COMMAND, (start_col, start_row), (end_col, end_row), value)
|
||||
- (0, 0) to (-1, 0) = entire first row (header row)
|
||||
- (1, 1) to (-1, -1) = all data cells (excludes header row and first column)
|
||||
- See _create_info_table_style() for full coordinate system documentation
|
||||
"""
|
||||
"""Create a reusable table style for tables with headers."""
|
||||
if header_color is None:
|
||||
header_color = COLOR_BLUE
|
||||
|
||||
return TableStyle(
|
||||
[
|
||||
# Header row (row 0): colored background with white text
|
||||
("BACKGROUND", (0, 0), (-1, 0), header_color),
|
||||
("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
|
||||
("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
|
||||
("FONTSIZE", (0, 0), (-1, 0), 10),
|
||||
# Apply to entire table
|
||||
("ALIGN", (0, 0), (-1, -1), "CENTER"),
|
||||
("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
|
||||
# Data cells (excluding header): smaller font
|
||||
("FONTSIZE", (1, 1), (-1, -1), 9),
|
||||
# Apply to entire table
|
||||
("GRID", (0, 0), (-1, -1), 1, COLOR_GRID_GRAY),
|
||||
("LEFTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
|
||||
("RIGHTPADDING", (0, 0), (-1, -1), PADDING_MEDIUM),
|
||||
@@ -312,30 +288,18 @@ def _create_header_table_style(header_color: colors.Color = None) -> TableStyle:
|
||||
|
||||
|
||||
def _create_findings_table_style() -> TableStyle:
|
||||
"""Create a reusable table style for findings tables.
|
||||
|
||||
ReportLab TableStyle coordinate system:
|
||||
- Format: (COMMAND, (start_col, start_row), (end_col, end_row), value)
|
||||
- (0, 0) to (-1, 0) = entire first row (header row)
|
||||
- (0, 0) to (0, 0) = only the top-left cell
|
||||
- See _create_info_table_style() for full coordinate system documentation
|
||||
"""
|
||||
"""Create a reusable table style for findings tables."""
|
||||
return TableStyle(
|
||||
[
|
||||
# Header row (row 0): colored background with white text
|
||||
("BACKGROUND", (0, 0), (-1, 0), COLOR_BLUE),
|
||||
("TEXTCOLOR", (0, 0), (-1, 0), COLOR_WHITE),
|
||||
("FONTNAME", (0, 0), (-1, 0), "FiraCode"),
|
||||
# Only top-left cell centered (for index/number column)
|
||||
("ALIGN", (0, 0), (0, 0), "CENTER"),
|
||||
# Apply to entire table
|
||||
("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
|
||||
("FONTSIZE", (0, 0), (-1, -1), 9),
|
||||
("GRID", (0, 0), (-1, -1), 0.1, COLOR_BORDER_GRAY),
|
||||
# Remove padding only from top-left cell
|
||||
("LEFTPADDING", (0, 0), (0, 0), 0),
|
||||
("RIGHTPADDING", (0, 0), (0, 0), 0),
|
||||
# Apply to entire table
|
||||
("TOPPADDING", (0, 0), (-1, -1), PADDING_SMALL),
|
||||
("BOTTOMPADDING", (0, 0), (-1, -1), PADDING_SMALL),
|
||||
]
|
||||
@@ -1139,15 +1103,11 @@ def generate_threatscore_report(
|
||||
elements.append(Spacer(1, 0.5 * inch))
|
||||
|
||||
# Add compliance information table
|
||||
provider_alias = provider_obj.alias or "N/A"
|
||||
info_data = [
|
||||
["Framework:", compliance_framework],
|
||||
["ID:", compliance_id],
|
||||
["Name:", Paragraph(compliance_name, normal_center)],
|
||||
["Version:", compliance_version],
|
||||
["Provider:", provider_type.upper()],
|
||||
["Account ID:", provider_obj.uid],
|
||||
["Alias:", provider_alias],
|
||||
["Scan ID:", scan_id],
|
||||
["Description:", Paragraph(compliance_description, normal_center)],
|
||||
]
|
||||
@@ -2099,15 +2059,12 @@ def generate_ens_report(
|
||||
elements.append(Spacer(1, 0.5 * inch))
|
||||
|
||||
# Add compliance information table
|
||||
provider_alias = provider_obj.alias or "N/A"
|
||||
info_data = [
|
||||
["Framework:", compliance_framework],
|
||||
["ID:", compliance_id],
|
||||
["Nombre:", Paragraph(compliance_name, normal_center)],
|
||||
["Versión:", compliance_version],
|
||||
["Proveedor:", provider_type.upper()],
|
||||
["Account ID:", provider_obj.uid],
|
||||
["Alias:", provider_alias],
|
||||
["Scan ID:", scan_id],
|
||||
["Descripción:", Paragraph(compliance_description, normal_center)],
|
||||
]
|
||||
@@ -2115,12 +2072,12 @@ def generate_ens_report(
|
||||
info_table.setStyle(
|
||||
TableStyle(
|
||||
[
|
||||
("BACKGROUND", (0, 0), (0, -1), colors.Color(0.2, 0.4, 0.6)),
|
||||
("TEXTCOLOR", (0, 0), (0, -1), colors.white),
|
||||
("FONTNAME", (0, 0), (0, -1), "FiraCode"),
|
||||
("BACKGROUND", (1, 0), (1, -1), colors.Color(0.95, 0.97, 1.0)),
|
||||
("TEXTCOLOR", (1, 0), (1, -1), colors.Color(0.2, 0.2, 0.2)),
|
||||
("FONTNAME", (1, 0), (1, -1), "PlusJakartaSans"),
|
||||
("BACKGROUND", (0, 0), (0, 6), colors.Color(0.2, 0.4, 0.6)),
|
||||
("TEXTCOLOR", (0, 0), (0, 6), colors.white),
|
||||
("FONTNAME", (0, 0), (0, 6), "FiraCode"),
|
||||
("BACKGROUND", (1, 0), (1, 6), colors.Color(0.95, 0.97, 1.0)),
|
||||
("TEXTCOLOR", (1, 0), (1, 6), colors.Color(0.2, 0.2, 0.2)),
|
||||
("FONTNAME", (1, 0), (1, 6), "PlusJakartaSans"),
|
||||
("ALIGN", (0, 0), (-1, -1), "LEFT"),
|
||||
("VALIGN", (0, 0), (-1, -1), "TOP"),
|
||||
("FONTSIZE", (0, 0), (-1, -1), 11),
|
||||
@@ -3040,14 +2997,11 @@ def generate_nis2_report(
|
||||
elements.append(Spacer(1, 0.3 * inch))
|
||||
|
||||
# Compliance metadata table
|
||||
provider_alias = provider_obj.alias or "N/A"
|
||||
metadata_data = [
|
||||
["Framework:", compliance_framework],
|
||||
["Name:", Paragraph(compliance_name, normal_center)],
|
||||
["Version:", compliance_version or "N/A"],
|
||||
["Provider:", provider_type.upper()],
|
||||
["Account ID:", provider_obj.uid],
|
||||
["Alias:", provider_alias],
|
||||
["Scan ID:", scan_id],
|
||||
["Description:", Paragraph(compliance_description, normal_center)],
|
||||
]
|
||||
|
||||
@@ -41,9 +41,6 @@ services:
|
||||
volumes:
|
||||
- "./ui:/app"
|
||||
- "/app/node_modules"
|
||||
depends_on:
|
||||
mcp-server:
|
||||
condition: service_healthy
|
||||
|
||||
postgres:
|
||||
image: postgres:16.3-alpine3.20
|
||||
@@ -60,11 +57,7 @@ services:
|
||||
ports:
|
||||
- "${POSTGRES_PORT:-5432}:${POSTGRES_PORT:-5432}"
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"sh -c 'pg_isready -U ${POSTGRES_ADMIN_USER} -d ${POSTGRES_DB}'",
|
||||
]
|
||||
test: ["CMD-SHELL", "sh -c 'pg_isready -U ${POSTGRES_ADMIN_USER} -d ${POSTGRES_DB}'"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
@@ -125,32 +118,6 @@ services:
|
||||
- "../docker-entrypoint.sh"
|
||||
- "beat"
|
||||
|
||||
mcp-server:
|
||||
build:
|
||||
context: ./mcp_server
|
||||
dockerfile: Dockerfile
|
||||
environment:
|
||||
- PROWLER_MCP_TRANSPORT_MODE=http
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- ./mcp_server/prowler_mcp_server:/app/prowler_mcp_server
|
||||
- ./mcp_server/pyproject.toml:/app/pyproject.toml
|
||||
- ./mcp_server/entrypoint.sh:/app/entrypoint.sh
|
||||
command: ["uvicorn", "--host", "0.0.0.0", "--port", "8000"]
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"wget -q -O /dev/null http://127.0.0.1:8000/health || exit 1",
|
||||
]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
volumes:
|
||||
outputs:
|
||||
driver: local
|
||||
|
||||
@@ -1,9 +1,3 @@
|
||||
# Production Docker Compose configuration
|
||||
# Uses pre-built images from Docker Hub (prowlercloud/*)
|
||||
#
|
||||
# For development with local builds and hot-reload, use docker-compose-dev.yml instead:
|
||||
# docker compose -f docker-compose-dev.yml up
|
||||
#
|
||||
services:
|
||||
api:
|
||||
hostname: "prowler-api"
|
||||
@@ -32,9 +26,6 @@ services:
|
||||
required: false
|
||||
ports:
|
||||
- ${UI_PORT:-3000}:${UI_PORT:-3000}
|
||||
depends_on:
|
||||
mcp-server:
|
||||
condition: service_healthy
|
||||
|
||||
postgres:
|
||||
image: postgres:16.3-alpine3.20
|
||||
@@ -102,22 +93,6 @@ services:
|
||||
- "../docker-entrypoint.sh"
|
||||
- "beat"
|
||||
|
||||
mcp-server:
|
||||
image: prowlercloud/prowler-mcp:${PROWLER_MCP_VERSION:-stable}
|
||||
environment:
|
||||
- PROWLER_MCP_TRANSPORT_MODE=http
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
ports:
|
||||
- "8000:8000"
|
||||
command: ["uvicorn", "--host", "0.0.0.0", "--port", "8000"]
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -q -O /dev/null http://127.0.0.1:8000/health || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
volumes:
|
||||
output:
|
||||
driver: local
|
||||
|
||||
@@ -10,7 +10,7 @@ Complete reference guide for all tools available in the Prowler MCP Server. Tool
|
||||
|----------|------------|------------------------|
|
||||
| Prowler Hub | 10 tools | No |
|
||||
| Prowler Documentation | 2 tools | No |
|
||||
| Prowler Cloud/App | 24 tools | Yes |
|
||||
| Prowler Cloud/App | 22 tools | Yes |
|
||||
|
||||
## Tool Naming Convention
|
||||
|
||||
@@ -80,24 +80,16 @@ Tools for managing finding muting, including pattern-based bulk muting (mutelist
|
||||
- **`prowler_app_update_mute_rule`** - Update a mute rule's name, reason, or enabled status
|
||||
- **`prowler_app_delete_mute_rule`** - Delete a mute rule from the system
|
||||
|
||||
### Compliance Management
|
||||
|
||||
Tools for viewing compliance status and framework details across all cloud providers.
|
||||
|
||||
- **`prowler_app_get_compliance_overview`** - Get high-level compliance status across all frameworks for a specific scan or provider, including pass/fail statistics per framework
|
||||
- **`prowler_app_get_compliance_framework_state_details`** - Get detailed requirement-level breakdown for a specific compliance framework, including failed requirements and associated finding IDs
|
||||
|
||||
## Prowler Hub Tools
|
||||
|
||||
Access Prowler's security check catalog and compliance frameworks. **No authentication required.**
|
||||
|
||||
Tools follow a **two-tier pattern**: lightweight listing for browsing + detailed retrieval for complete information.
|
||||
### Check Discovery
|
||||
|
||||
### Check Discovery and Details
|
||||
|
||||
- **`prowler_hub_list_checks`** - List security checks with lightweight data (id, title, severity, provider) and advanced filtering options
|
||||
- **`prowler_hub_semantic_search_checks`** - Full-text search across check metadata with lightweight results
|
||||
- **`prowler_hub_get_check_details`** - Get comprehensive details for a specific check including risk, remediation guidance, and compliance mappings
|
||||
- **`prowler_hub_get_checks`** - List security checks with advanced filtering options
|
||||
- **`prowler_hub_get_check_filters`** - Return available filter values for checks (providers, services, severities, categories, compliances)
|
||||
- **`prowler_hub_search_checks`** - Full-text search across check metadata
|
||||
- **`prowler_hub_get_check_raw_metadata`** - Fetch raw check metadata in JSON format
|
||||
|
||||
### Check Code
|
||||
|
||||
@@ -106,21 +98,20 @@ Tools follow a **two-tier pattern**: lightweight listing for browsing + detailed
|
||||
|
||||
### Compliance Frameworks
|
||||
|
||||
- **`prowler_hub_list_compliances`** - List compliance frameworks with lightweight data (id, name, provider) and filtering options
|
||||
- **`prowler_hub_semantic_search_compliances`** - Full-text search across compliance frameworks with lightweight results
|
||||
- **`prowler_hub_get_compliance_details`** - Get comprehensive compliance details including requirements and mapped checks
|
||||
- **`prowler_hub_get_compliance_frameworks`** - List and filter compliance frameworks
|
||||
- **`prowler_hub_search_compliance_frameworks`** - Full-text search across compliance frameworks
|
||||
|
||||
### Providers Information
|
||||
### Provider Information
|
||||
|
||||
- **`prowler_hub_list_providers`** - List Prowler official providers
|
||||
- **`prowler_hub_get_provider_services`** - Get available services for a specific provider
|
||||
- **`prowler_hub_list_providers`** - List Prowler official providers and their services
|
||||
- **`prowler_hub_get_artifacts_count`** - Get total count of checks and frameworks in Prowler Hub
|
||||
|
||||
## Prowler Documentation Tools
|
||||
|
||||
Search and access official Prowler documentation. **No authentication required.**
|
||||
|
||||
- **`prowler_docs_search`** - Search the official Prowler documentation using full-text search with the `term` parameter
|
||||
- **`prowler_docs_get_document`** - Retrieve the full markdown content of a specific documentation file using the path from search results
|
||||
- **`prowler_docs_search`** - Search the official Prowler documentation using full-text search
|
||||
- **`prowler_docs_get_document`** - Retrieve the full markdown content of a specific documentation file
|
||||
|
||||
## Usage Tips
|
||||
|
||||
|
||||
@@ -115,15 +115,10 @@ To update the environment file:
|
||||
Edit the `.env` file and change version values:
|
||||
|
||||
```env
|
||||
PROWLER_UI_VERSION="5.15.0"
|
||||
PROWLER_API_VERSION="5.15.0"
|
||||
PROWLER_UI_VERSION="5.9.0"
|
||||
PROWLER_API_VERSION="5.9.0"
|
||||
```
|
||||
|
||||
<Note>
|
||||
You can find the latest versions of Prowler App in the [Releases Github section](https://github.com/prowler-cloud/prowler/releases) or in the [Container Versions](#container-versions) section of this documentation.
|
||||
</Note>
|
||||
|
||||
|
||||
#### Option 2: Using Docker Compose Pull
|
||||
|
||||
```bash
|
||||
|
||||
@@ -6,7 +6,7 @@ title: "Overview"
|
||||
|
||||
**Why this matters**: Every engineer has asked, “What does this check actually do?” Prowler Hub answers that question in one place, lets you pin to a specific version, and pulls definitions into your own tools or dashboards.
|
||||
|
||||

|
||||

|
||||
|
||||
<Card title="Go to Prowler Hub" href="https://hub.prowler.com" />
|
||||
|
||||
@@ -14,4 +14,4 @@ Prowler Hub also provides a fully documented public API that you can integrate i
|
||||
|
||||
📚 Explore the API docs at: https://hub.prowler.com/api/docs
|
||||
|
||||
Whether you’re customizing policies, managing compliance, or enhancing visibility, Prowler Hub is built to support your security operations.
|
||||
Whether you’re customizing policies, managing compliance, or enhancing visibility, Prowler Hub is built to support your security operations.
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 256 KiB |
BIN
docs/images/products/prowler-hub.webp
Normal file
BIN
docs/images/products/prowler-hub.webp
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 210 KiB |
@@ -2,16 +2,11 @@
|
||||
|
||||
All notable changes to the **Prowler MCP Server** are documented in this file.
|
||||
|
||||
## [0.3.0] (Prowler v5.16.0)
|
||||
|
||||
### Added
|
||||
|
||||
- Add new MCP Server tools for Prowler Compliance Framework Management [(#9568)](https://github.com/prowler-cloud/prowler/pull/9568)
|
||||
## [0.2.1] (UNRELEASED)
|
||||
|
||||
### Changed
|
||||
|
||||
- Update API base URL environment variable to include complete path [(#9542)](https://github.com/prowler-cloud/prowler/pull/9542)
|
||||
- Standardize Prowler Hub and Docs tools format for AI optimization [(#9578)](https://github.com/prowler-cloud/prowler/pull/9578)
|
||||
- Update API base URL environment variable to include complete path [(#9542)](https://github.com/prowler-cloud/prowler/pull/9300)
|
||||
|
||||
## [0.2.0] (Prowler v5.15.0)
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ Full access to Prowler Cloud platform and self-managed Prowler App for:
|
||||
- **Scan Orchestration**: Trigger on-demand scans and schedule recurring security assessments
|
||||
- **Resource Inventory**: Search and view detailed information about your audited resources
|
||||
- **Muting Management**: Create and manage muting rules to suppress non-critical findings
|
||||
- **Compliance Reporting**: View compliance status across frameworks and drill into requirement-level details
|
||||
|
||||
### Prowler Hub
|
||||
|
||||
@@ -23,7 +22,7 @@ Access to Prowler's comprehensive security knowledge base:
|
||||
- **Check Implementation**: View the Python code that powers each security check
|
||||
- **Automated Fixers**: Access remediation scripts for common security issues
|
||||
- **Compliance Frameworks**: Explore mappings to **over 70 compliance standards and frameworks**
|
||||
- **Provider Services**: View available services and checks for all supported Prowler providers
|
||||
- **Provider Services**: View available services and checks for each cloud provider
|
||||
|
||||
### Prowler Documentation
|
||||
|
||||
|
||||
@@ -1,240 +0,0 @@
|
||||
"""Pydantic models for simplified compliance responses."""
|
||||
|
||||
from typing import Any, Literal
|
||||
|
||||
from prowler_mcp_server.prowler_app.models.base import MinimalSerializerMixin
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
SerializerFunctionWrapHandler,
|
||||
model_serializer,
|
||||
)
|
||||
|
||||
|
||||
class ComplianceRequirementAttribute(MinimalSerializerMixin, BaseModel):
|
||||
"""Requirement attributes including associated check IDs.
|
||||
|
||||
Used to map requirements to the checks that validate them.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(frozen=True)
|
||||
|
||||
id: str = Field(
|
||||
description="Requirement identifier within the framework (e.g., '1.1', '2.1.1')"
|
||||
)
|
||||
name: str = Field(default="", description="Human-readable name of the requirement")
|
||||
description: str = Field(
|
||||
default="", description="Detailed description of the requirement"
|
||||
)
|
||||
check_ids: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="List of Prowler check IDs that validate this requirement",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "ComplianceRequirementAttribute":
|
||||
"""Transform JSON:API compliance requirement attributes response to simplified format."""
|
||||
attributes = data.get("attributes", {})
|
||||
|
||||
# Extract check_ids from the nested attributes structure
|
||||
nested_attributes = attributes.get("attributes", {})
|
||||
check_ids = nested_attributes.get("check_ids", [])
|
||||
|
||||
return cls(
|
||||
id=attributes.get("id", data.get("id", "")),
|
||||
name=attributes.get("name", ""),
|
||||
description=attributes.get("description", ""),
|
||||
check_ids=check_ids if check_ids else [],
|
||||
)
|
||||
|
||||
|
||||
class ComplianceRequirementAttributesListResponse(BaseModel):
|
||||
"""Response for compliance requirement attributes list with check_ids mappings."""
|
||||
|
||||
model_config = ConfigDict(frozen=True)
|
||||
|
||||
requirements: list[ComplianceRequirementAttribute] = Field(
|
||||
description="List of requirements with their associated check IDs"
|
||||
)
|
||||
total_count: int = Field(description="Total number of requirements")
|
||||
|
||||
@classmethod
|
||||
def from_api_response(
|
||||
cls, response: dict
|
||||
) -> "ComplianceRequirementAttributesListResponse":
|
||||
"""Transform JSON:API response to simplified format."""
|
||||
data = response.get("data", [])
|
||||
|
||||
requirements = [
|
||||
ComplianceRequirementAttribute.from_api_response(item) for item in data
|
||||
]
|
||||
|
||||
return cls(
|
||||
requirements=requirements,
|
||||
total_count=len(requirements),
|
||||
)
|
||||
|
||||
|
||||
class ComplianceFrameworkSummary(MinimalSerializerMixin, BaseModel):
|
||||
"""Simplified compliance framework overview for list operations.
|
||||
|
||||
Used by get_compliance_overview() to show high-level compliance status
|
||||
per framework.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(frozen=True)
|
||||
|
||||
id: str = Field(description="Unique identifier for this compliance overview entry")
|
||||
compliance_id: str = Field(
|
||||
description="Compliance framework identifier (e.g., 'cis_1.5_aws', 'pci_dss_v4.0_aws')"
|
||||
)
|
||||
framework: str = Field(
|
||||
description="Human-readable framework name (e.g., 'CIS', 'PCI-DSS', 'HIPAA')"
|
||||
)
|
||||
version: str = Field(description="Framework version (e.g., '1.5', '4.0')")
|
||||
total_requirements: int = Field(
|
||||
default=0, description="Total number of requirements in this framework"
|
||||
)
|
||||
requirements_passed: int = Field(
|
||||
default=0, description="Number of requirements that passed"
|
||||
)
|
||||
requirements_failed: int = Field(
|
||||
default=0, description="Number of requirements that failed"
|
||||
)
|
||||
requirements_manual: int = Field(
|
||||
default=0, description="Number of requirements requiring manual verification"
|
||||
)
|
||||
|
||||
@property
|
||||
def pass_percentage(self) -> float:
|
||||
"""Calculate pass percentage based on passed requirements."""
|
||||
if self.total_requirements == 0:
|
||||
return 0.0
|
||||
return round((self.requirements_passed / self.total_requirements) * 100, 1)
|
||||
|
||||
@property
|
||||
def fail_percentage(self) -> float:
|
||||
"""Calculate fail percentage based on failed requirements."""
|
||||
if self.total_requirements == 0:
|
||||
return 0.0
|
||||
return round((self.requirements_failed / self.total_requirements) * 100, 1)
|
||||
|
||||
@model_serializer(mode="wrap")
|
||||
def _serialize(self, handler: SerializerFunctionWrapHandler) -> dict[str, Any]:
|
||||
"""Serialize with calculated percentages included."""
|
||||
data = handler(self)
|
||||
# Filter out None/empty values
|
||||
data = {k: v for k, v in data.items() if v is not None and v != "" and v != []}
|
||||
# Add calculated percentages
|
||||
data["pass_percentage"] = self.pass_percentage
|
||||
data["fail_percentage"] = self.fail_percentage
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "ComplianceFrameworkSummary":
|
||||
"""Transform JSON:API compliance overview response to simplified format."""
|
||||
attributes = data.get("attributes", {})
|
||||
|
||||
# The compliance_id field may be in attributes or use the "id" field from attributes
|
||||
compliance_id = attributes.get("id", data.get("id", ""))
|
||||
|
||||
return cls(
|
||||
id=data["id"],
|
||||
compliance_id=compliance_id,
|
||||
framework=attributes.get("framework", ""),
|
||||
version=attributes.get("version", ""),
|
||||
total_requirements=attributes.get("total_requirements", 0),
|
||||
requirements_passed=attributes.get("requirements_passed", 0),
|
||||
requirements_failed=attributes.get("requirements_failed", 0),
|
||||
requirements_manual=attributes.get("requirements_manual", 0),
|
||||
)
|
||||
|
||||
|
||||
class ComplianceRequirement(MinimalSerializerMixin, BaseModel):
|
||||
"""Individual compliance requirement with its status.
|
||||
|
||||
Used by get_compliance_framework_state_details() to show requirement-level breakdown.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(frozen=True)
|
||||
|
||||
id: str = Field(
|
||||
description="Requirement identifier within the framework (e.g., '1.1', '2.1.1')"
|
||||
)
|
||||
description: str = Field(
|
||||
description="Human-readable description of the requirement"
|
||||
)
|
||||
status: Literal["FAIL", "PASS", "MANUAL"] = Field(
|
||||
description="Requirement status: FAIL (not compliant), PASS (compliant), MANUAL (requires manual verification)"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, data: dict) -> "ComplianceRequirement":
|
||||
"""Transform JSON:API compliance requirement response to simplified format."""
|
||||
attributes = data.get("attributes", {})
|
||||
|
||||
return cls(
|
||||
id=attributes.get("id", data.get("id", "")),
|
||||
description=attributes.get("description", ""),
|
||||
status=attributes.get("status", "MANUAL"),
|
||||
)
|
||||
|
||||
|
||||
class ComplianceFrameworksListResponse(BaseModel):
|
||||
"""Response for compliance frameworks list with aggregated statistics."""
|
||||
|
||||
model_config = ConfigDict(frozen=True)
|
||||
|
||||
frameworks: list[ComplianceFrameworkSummary] = Field(
|
||||
description="List of compliance frameworks with their status"
|
||||
)
|
||||
total_count: int = Field(description="Total number of frameworks returned")
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, response: dict) -> "ComplianceFrameworksListResponse":
|
||||
"""Transform JSON:API response to simplified format."""
|
||||
data = response.get("data", [])
|
||||
|
||||
frameworks = [
|
||||
ComplianceFrameworkSummary.from_api_response(item) for item in data
|
||||
]
|
||||
|
||||
return cls(
|
||||
frameworks=frameworks,
|
||||
total_count=len(frameworks),
|
||||
)
|
||||
|
||||
|
||||
class ComplianceRequirementsListResponse(BaseModel):
|
||||
"""Response for compliance requirements list queries."""
|
||||
|
||||
model_config = ConfigDict(frozen=True)
|
||||
|
||||
requirements: list[ComplianceRequirement] = Field(
|
||||
description="List of requirements with their status"
|
||||
)
|
||||
total_count: int = Field(description="Total number of requirements")
|
||||
passed_count: int = Field(description="Number of requirements with PASS status")
|
||||
failed_count: int = Field(description="Number of requirements with FAIL status")
|
||||
manual_count: int = Field(description="Number of requirements with MANUAL status")
|
||||
|
||||
@classmethod
|
||||
def from_api_response(cls, response: dict) -> "ComplianceRequirementsListResponse":
|
||||
"""Transform JSON:API response to simplified format."""
|
||||
data = response.get("data", [])
|
||||
|
||||
requirements = [ComplianceRequirement.from_api_response(item) for item in data]
|
||||
|
||||
# Calculate counts
|
||||
passed = sum(1 for r in requirements if r.status == "PASS")
|
||||
failed = sum(1 for r in requirements if r.status == "FAIL")
|
||||
manual = sum(1 for r in requirements if r.status == "MANUAL")
|
||||
|
||||
return cls(
|
||||
requirements=requirements,
|
||||
total_count=len(requirements),
|
||||
passed_count=passed,
|
||||
failed_count=failed,
|
||||
manual_count=manual,
|
||||
)
|
||||
@@ -1,409 +0,0 @@
|
||||
"""Compliance framework tools for Prowler App MCP Server.
|
||||
|
||||
This module provides tools for viewing compliance status and requirement details
|
||||
across all cloud providers.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from prowler_mcp_server.prowler_app.models.compliance import (
|
||||
ComplianceFrameworksListResponse,
|
||||
ComplianceRequirementAttributesListResponse,
|
||||
ComplianceRequirementsListResponse,
|
||||
)
|
||||
from prowler_mcp_server.prowler_app.tools.base import BaseTool
|
||||
from pydantic import Field
|
||||
|
||||
|
||||
class ComplianceTools(BaseTool):
|
||||
"""Tools for compliance framework operations.
|
||||
|
||||
Provides tools for:
|
||||
- get_compliance_overview: Get high-level compliance status across all frameworks
|
||||
- get_compliance_framework_state_details: Get detailed requirement-level breakdown for a specific framework
|
||||
"""
|
||||
|
||||
async def _get_latest_scan_id_for_provider(self, provider_id: str) -> str:
|
||||
"""Get the latest completed scan_id for a given provider.
|
||||
|
||||
Args:
|
||||
provider_id: Prowler's internal UUID for the provider
|
||||
|
||||
Returns:
|
||||
The scan_id of the latest completed scan for the provider.
|
||||
|
||||
Raises:
|
||||
ValueError: If no completed scans are found for the provider.
|
||||
"""
|
||||
scan_params = {
|
||||
"filter[provider]": provider_id,
|
||||
"filter[state]": "completed",
|
||||
"sort": "-inserted_at",
|
||||
"page[size]": 1,
|
||||
"page[number]": 1,
|
||||
}
|
||||
clean_scan_params = self.api_client.build_filter_params(scan_params)
|
||||
scans_response = await self.api_client.get("/scans", params=clean_scan_params)
|
||||
|
||||
scans_data = scans_response.get("data", [])
|
||||
if not scans_data:
|
||||
raise ValueError(
|
||||
f"No completed scans found for provider {provider_id}. "
|
||||
"Run a scan first using prowler_app_trigger_scan."
|
||||
)
|
||||
|
||||
scan_id = scans_data[0]["id"]
|
||||
return scan_id
|
||||
|
||||
async def get_compliance_overview(
|
||||
self,
|
||||
scan_id: str | None = Field(
|
||||
default=None,
|
||||
description="UUID of a specific scan to get compliance data for. Required if provider_id is not specified. Use `prowler_app_list_scans` to find scan IDs.",
|
||||
),
|
||||
provider_id: str | None = Field(
|
||||
default=None,
|
||||
description="Prowler's internal UUID (v4) for a specific provider. If provided without scan_id, the tool will automatically find the latest completed scan for this provider. Use `prowler_app_search_providers` tool to find provider IDs.",
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""Get high-level compliance overview across all frameworks for a specific scan.
|
||||
|
||||
This tool provides a HIGH-LEVEL OVERVIEW of compliance status across all frameworks.
|
||||
Use this when you need to understand overall compliance posture before drilling into
|
||||
specific framework details.
|
||||
|
||||
You have two options to specify the scan context:
|
||||
1. Provide a specific scan_id to get compliance data for that scan.
|
||||
2. Provide a provider_id to get compliance data from the latest completed scan for that provider.
|
||||
|
||||
The markdown report includes:
|
||||
|
||||
1. Summary Statistics:
|
||||
- Total number of compliance frameworks evaluated
|
||||
- Overall compliance metrics across all frameworks
|
||||
|
||||
2. Per-Framework Breakdown:
|
||||
- Framework name, version, and compliance ID
|
||||
- Requirements passed/failed/manual counts
|
||||
- Pass percentage for quick assessment
|
||||
|
||||
Workflow:
|
||||
1. Use this tool to get an overview of all compliance frameworks
|
||||
2. Use prowler_app_get_compliance_framework_state_details with a specific compliance_id to see which requirements failed
|
||||
"""
|
||||
if not scan_id and not provider_id:
|
||||
return {
|
||||
"error": "Either scan_id or provider_id must be provided. Use prowler_app_search_providers to find provider IDs or prowler_app_list_scans to find scan IDs."
|
||||
}
|
||||
elif scan_id and provider_id:
|
||||
return {
|
||||
"error": "Provide either scan_id or provider_id, not both. To get compliance data for a specific scan, use scan_id. To get data for the latest scan of a provider, use provider_id."
|
||||
}
|
||||
elif not scan_id and provider_id:
|
||||
try:
|
||||
scan_id = await self._get_latest_scan_id_for_provider(provider_id)
|
||||
except ValueError as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
params: dict[str, Any] = {"filter[scan_id]": scan_id}
|
||||
|
||||
clean_params = self.api_client.build_filter_params(params)
|
||||
|
||||
# Get API response
|
||||
api_response = await self.api_client.get(
|
||||
"/compliance-overviews", params=clean_params
|
||||
)
|
||||
frameworks_response = ComplianceFrameworksListResponse.from_api_response(
|
||||
api_response
|
||||
)
|
||||
|
||||
# Build markdown report
|
||||
frameworks = frameworks_response.frameworks
|
||||
total_frameworks = frameworks_response.total_count
|
||||
|
||||
if total_frameworks == 0:
|
||||
return {"report": "# Compliance Overview\n\nNo compliance frameworks found"}
|
||||
|
||||
# Calculate aggregate statistics
|
||||
total_requirements = sum(f.total_requirements for f in frameworks)
|
||||
total_passed = sum(f.requirements_passed for f in frameworks)
|
||||
total_failed = sum(f.requirements_failed for f in frameworks)
|
||||
total_manual = sum(f.requirements_manual for f in frameworks)
|
||||
overall_pass_pct = (
|
||||
round((total_passed / total_requirements) * 100, 1)
|
||||
if total_requirements > 0
|
||||
else 0
|
||||
)
|
||||
|
||||
# Build report
|
||||
report_lines = [
|
||||
"# Compliance Overview",
|
||||
"",
|
||||
"## Summary Statistics",
|
||||
f"- **Frameworks Evaluated**: {total_frameworks}",
|
||||
f"- **Total Requirements**: {total_requirements:,}",
|
||||
f"- **Passed**: {total_passed:,} ({overall_pass_pct}%)",
|
||||
f"- **Failed**: {total_failed:,}",
|
||||
f"- **Manual Review**: {total_manual:,}",
|
||||
"",
|
||||
"## Framework Breakdown",
|
||||
"",
|
||||
]
|
||||
|
||||
# Sort frameworks by fail count (most failures first)
|
||||
sorted_frameworks = sorted(
|
||||
frameworks, key=lambda f: f.requirements_failed, reverse=True
|
||||
)
|
||||
|
||||
for fw in sorted_frameworks:
|
||||
status_indicator = "PASS" if fw.requirements_failed == 0 else "FAIL"
|
||||
|
||||
report_lines.append(f"### {fw.framework} {fw.version}")
|
||||
report_lines.append(f"- **Compliance ID**: `{fw.compliance_id}`")
|
||||
report_lines.append(f"- **Status**: {status_indicator}")
|
||||
report_lines.append(
|
||||
f"- **Requirements**: {fw.requirements_passed}/{fw.total_requirements} passed ({fw.pass_percentage}%)"
|
||||
)
|
||||
if fw.requirements_failed > 0:
|
||||
report_lines.append(f"- **Failed**: {fw.requirements_failed}")
|
||||
if fw.requirements_manual > 0:
|
||||
report_lines.append(f"- **Manual Review**: {fw.requirements_manual}")
|
||||
report_lines.append("")
|
||||
|
||||
return {"report": "\n".join(report_lines)}
|
||||
|
||||
async def _get_requirement_check_ids_mapping(
|
||||
self, compliance_id: str
|
||||
) -> dict[str, list[str]]:
|
||||
"""Get mapping of requirement IDs to their associated check IDs.
|
||||
|
||||
Args:
|
||||
compliance_id: The compliance framework ID.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping requirement ID to list of check IDs.
|
||||
"""
|
||||
params: dict[str, Any] = {
|
||||
"filter[compliance_id]": compliance_id,
|
||||
"fields[compliance-requirements-attributes]": "id,attributes",
|
||||
}
|
||||
|
||||
clean_params = self.api_client.build_filter_params(params)
|
||||
|
||||
api_response = await self.api_client.get(
|
||||
"/compliance-overviews/attributes", params=clean_params
|
||||
)
|
||||
attributes_response = (
|
||||
ComplianceRequirementAttributesListResponse.from_api_response(api_response)
|
||||
)
|
||||
|
||||
# Build mapping: requirement_id -> [check_ids]
|
||||
return {req.id: req.check_ids for req in attributes_response.requirements}
|
||||
|
||||
async def _get_failed_finding_ids_for_checks(
|
||||
self,
|
||||
check_ids: list[str],
|
||||
scan_id: str,
|
||||
) -> list[str]:
|
||||
"""Get all failed finding IDs for a list of check IDs.
|
||||
|
||||
Args:
|
||||
check_ids: List of Prowler check IDs.
|
||||
scan_id: The scan ID to filter findings.
|
||||
|
||||
Returns:
|
||||
List of all finding IDs with FAIL status.
|
||||
"""
|
||||
if not check_ids:
|
||||
return []
|
||||
|
||||
all_finding_ids: list[str] = []
|
||||
page_number = 1
|
||||
page_size = 100
|
||||
|
||||
while True:
|
||||
# Query findings endpoint with check_id filter and FAIL status
|
||||
params: dict[str, Any] = {
|
||||
"filter[scan]": scan_id,
|
||||
"filter[check_id__in]": ",".join(check_ids),
|
||||
"filter[status]": "FAIL",
|
||||
"fields[findings]": "uid",
|
||||
"page[size]": page_size,
|
||||
"page[number]": page_number,
|
||||
}
|
||||
|
||||
clean_params = self.api_client.build_filter_params(params)
|
||||
|
||||
api_response = await self.api_client.get("/findings", params=clean_params)
|
||||
|
||||
findings = api_response.get("data", [])
|
||||
if not findings:
|
||||
break
|
||||
|
||||
all_finding_ids.extend([f["id"] for f in findings])
|
||||
|
||||
# Check if we've reached the last page
|
||||
if len(findings) < page_size:
|
||||
break
|
||||
|
||||
page_number += 1
|
||||
|
||||
return all_finding_ids
|
||||
|
||||
async def get_compliance_framework_state_details(
|
||||
self,
|
||||
compliance_id: str = Field(
|
||||
description="Compliance framework ID to get details for (e.g., 'cis_1.5_aws', 'pci_dss_v4.0_aws'). You can get compliance IDs from prowler_app_get_compliance_overview or consulting Prowler Hub/Prowler Documentation that you can also find in form of tools in this MCP Server",
|
||||
),
|
||||
scan_id: str | None = Field(
|
||||
default=None,
|
||||
description="UUID of a specific scan to get compliance data for. Required if provider_id is not specified.",
|
||||
),
|
||||
provider_id: str | None = Field(
|
||||
default=None,
|
||||
description="Prowler's internal UUID (v4) for a specific provider. If provided without scan_id, the tool will automatically find the latest completed scan for this provider. Use `prowler_app_search_providers` tool to find provider IDs.",
|
||||
),
|
||||
) -> dict[str, Any]:
|
||||
"""Get detailed requirement-level breakdown for a specific compliance framework.
|
||||
|
||||
IMPORTANT: This tool returns DETAILED requirement information for a single compliance framework,
|
||||
focusing on FAILED requirements and their associated FAILED finding IDs.
|
||||
Use this after prowler_app_get_compliance_overview to drill down into specific frameworks.
|
||||
|
||||
The markdown report includes:
|
||||
|
||||
1. Framework Summary:
|
||||
- Compliance ID and scan ID used
|
||||
- Overall pass/fail/manual counts
|
||||
|
||||
2. Failed Requirements Breakdown:
|
||||
- Each failed requirement's ID and description
|
||||
- Associated failed finding IDs for each failed requirement
|
||||
- Use prowler_app_get_finding_details with these finding IDs for more details and remediation guidance
|
||||
|
||||
Default behavior:
|
||||
- Requires either scan_id OR provider_id
|
||||
- With provider_id (no scan_id): Automatically finds the latest completed scan for that provider
|
||||
- With scan_id: Uses that specific scan's compliance data
|
||||
- Only shows failed requirements with their associated failed finding IDs
|
||||
|
||||
Workflow:
|
||||
1. Use prowler_app_get_compliance_overview to identify frameworks with failures
|
||||
2. Use this tool with the compliance_id to see failed requirements and their finding IDs
|
||||
3. Use prowler_app_get_finding_details with the finding IDs to get remediation guidance
|
||||
"""
|
||||
# Validate that either scan_id or provider_id is provided
|
||||
if not scan_id and not provider_id:
|
||||
return {
|
||||
"error": "Either scan_id or provider_id must be provided. Use prowler_app_search_providers to find provider IDs or prowler_app_list_scans to find scan IDs."
|
||||
}
|
||||
|
||||
# Resolve provider_id to latest scan_id if needed
|
||||
resolved_scan_id = scan_id
|
||||
if not scan_id and provider_id:
|
||||
try:
|
||||
resolved_scan_id = await self._get_latest_scan_id_for_provider(
|
||||
provider_id
|
||||
)
|
||||
except ValueError as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
# Build params for requirements endpoint
|
||||
params: dict[str, Any] = {
|
||||
"filter[scan_id]": resolved_scan_id,
|
||||
"filter[compliance_id]": compliance_id,
|
||||
}
|
||||
|
||||
params["fields[compliance-requirements-details]"] = "id,description,status"
|
||||
|
||||
clean_params = self.api_client.build_filter_params(params)
|
||||
|
||||
# Get API response
|
||||
api_response = await self.api_client.get(
|
||||
"/compliance-overviews/requirements", params=clean_params
|
||||
)
|
||||
requirements_response = ComplianceRequirementsListResponse.from_api_response(
|
||||
api_response
|
||||
)
|
||||
|
||||
requirements = requirements_response.requirements
|
||||
|
||||
if not requirements:
|
||||
return {
|
||||
"report": f"# Compliance Framework Details\n\n**Compliance ID**: `{compliance_id}`\n\nNo requirements found for this compliance framework and scan combination."
|
||||
}
|
||||
|
||||
# Get failed requirements
|
||||
failed_reqs = [r for r in requirements if r.status == "FAIL"]
|
||||
|
||||
# Get requirement -> check_ids mapping from attributes endpoint
|
||||
requirement_check_mapping: dict[str, list[str]] = {}
|
||||
if failed_reqs:
|
||||
requirement_check_mapping = await self._get_requirement_check_ids_mapping(
|
||||
compliance_id
|
||||
)
|
||||
|
||||
# For each failed requirement, get the failed finding IDs
|
||||
failed_req_findings: dict[str, list[str]] = {}
|
||||
for req in failed_reqs:
|
||||
check_ids = requirement_check_mapping.get(req.id, [])
|
||||
if check_ids:
|
||||
finding_ids = await self._get_failed_finding_ids_for_checks(
|
||||
check_ids, resolved_scan_id
|
||||
)
|
||||
failed_req_findings[req.id] = finding_ids
|
||||
|
||||
# Calculate counts
|
||||
total_count = len(requirements)
|
||||
passed_count = sum(1 for r in requirements if r.status == "PASS")
|
||||
failed_count = len(failed_reqs)
|
||||
manual_count = sum(1 for r in requirements if r.status == "MANUAL")
|
||||
|
||||
# Build markdown report
|
||||
pass_pct = (
|
||||
round((passed_count / total_count) * 100, 1) if total_count > 0 else 0
|
||||
)
|
||||
|
||||
report_lines = [
|
||||
"# Compliance Framework Details",
|
||||
"",
|
||||
f"**Compliance ID**: `{compliance_id}`",
|
||||
f"**Scan ID**: `{resolved_scan_id}`",
|
||||
"",
|
||||
"## Summary",
|
||||
f"- **Total Requirements**: {total_count}",
|
||||
f"- **Passed**: {passed_count} ({pass_pct}%)",
|
||||
f"- **Failed**: {failed_count}",
|
||||
f"- **Manual Review**: {manual_count}",
|
||||
"",
|
||||
]
|
||||
|
||||
# Show failed requirements with their finding IDs (most actionable)
|
||||
if failed_reqs:
|
||||
report_lines.append("## Failed Requirements")
|
||||
report_lines.append("")
|
||||
for req in failed_reqs:
|
||||
report_lines.append(f"### {req.id}")
|
||||
report_lines.append(f"**Description**: {req.description}")
|
||||
finding_ids = failed_req_findings.get(req.id, [])
|
||||
if finding_ids:
|
||||
report_lines.append(f"**Failed Finding IDs** ({len(finding_ids)}):")
|
||||
for fid in finding_ids:
|
||||
report_lines.append(f" - `{fid}`")
|
||||
else:
|
||||
report_lines.append("**Failed Finding IDs**: None found")
|
||||
report_lines.append("")
|
||||
report_lines.append(
|
||||
"*Use `prowler_app_get_finding_details` with these finding IDs to get remediation guidance.*"
|
||||
)
|
||||
report_lines.append("")
|
||||
|
||||
if manual_count > 0:
|
||||
manual_reqs = [r for r in requirements if r.status == "MANUAL"]
|
||||
report_lines.append("## Requirements Requiring Manual Review")
|
||||
report_lines.append("")
|
||||
for req in manual_reqs:
|
||||
report_lines.append(f"- **{req.id}**: {req.description}")
|
||||
report_lines.append("")
|
||||
|
||||
return {"report": "\n".join(report_lines)}
|
||||
@@ -1,3 +1,5 @@
|
||||
from typing import List, Optional
|
||||
|
||||
import httpx
|
||||
from prowler_mcp_server import __version__
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -9,7 +11,7 @@ class SearchResult(BaseModel):
|
||||
path: str = Field(description="Document path")
|
||||
title: str = Field(description="Document title")
|
||||
url: str = Field(description="Documentation URL")
|
||||
highlights: list[str] = Field(
|
||||
highlights: List[str] = Field(
|
||||
description="Highlighted content snippets showing query matches with <mark><b> tags",
|
||||
default_factory=list,
|
||||
)
|
||||
@@ -52,7 +54,7 @@ class ProwlerDocsSearchEngine:
|
||||
},
|
||||
)
|
||||
|
||||
def search(self, query: str, page_size: int = 5) -> list[SearchResult]:
|
||||
def search(self, query: str, page_size: int = 5) -> List[SearchResult]:
|
||||
"""
|
||||
Search documentation using Mintlify API.
|
||||
|
||||
@@ -61,7 +63,7 @@ class ProwlerDocsSearchEngine:
|
||||
page_size: Maximum number of results to return
|
||||
|
||||
Returns:
|
||||
list of search results
|
||||
List of search results
|
||||
"""
|
||||
try:
|
||||
# Construct request body
|
||||
@@ -137,7 +139,7 @@ class ProwlerDocsSearchEngine:
|
||||
print(f"Search error: {e}")
|
||||
return []
|
||||
|
||||
def get_document(self, doc_path: str) -> str | None:
|
||||
def get_document(self, doc_path: str) -> Optional[str]:
|
||||
"""
|
||||
Get full document content from Mintlify documentation.
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
from typing import Any
|
||||
from typing import Any, List
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from pydantic import Field
|
||||
|
||||
from prowler_mcp_server.prowler_documentation.search_engine import (
|
||||
ProwlerDocsSearchEngine,
|
||||
)
|
||||
@@ -14,44 +12,46 @@ prowler_docs_search_engine = ProwlerDocsSearchEngine()
|
||||
|
||||
@docs_mcp_server.tool()
|
||||
def search(
|
||||
term: str = Field(description="The term to search for in the documentation"),
|
||||
page_size: int = Field(
|
||||
5,
|
||||
description="Number of top results to return to return. It must be between 1 and 20.",
|
||||
gt=1,
|
||||
lt=20,
|
||||
),
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Search in Prowler documentation.
|
||||
query: str,
|
||||
page_size: int = 5,
|
||||
) -> List[dict[str, Any]]:
|
||||
"""
|
||||
Search in Prowler documentation.
|
||||
|
||||
This tool searches through the official Prowler documentation
|
||||
to find relevant information about everything related to Prowler.
|
||||
to find relevant information about security checks, cloud providers,
|
||||
compliance frameworks, and usage instructions.
|
||||
|
||||
Uses fulltext search to find the most relevant documentation pages
|
||||
based on your query.
|
||||
|
||||
Args:
|
||||
query: The search query
|
||||
page_size: Number of top results to return (default: 5)
|
||||
|
||||
Returns:
|
||||
List of search results with highlights showing matched terms (in <mark><b> tags)
|
||||
"""
|
||||
return prowler_docs_search_engine.search(term, page_size) # type: ignore In the hint we cannot put SearchResult type because JSON API MCP Generator cannot handle Pydantic models yet
|
||||
return prowler_docs_search_engine.search(query, page_size)
|
||||
|
||||
|
||||
@docs_mcp_server.tool()
|
||||
def get_document(
|
||||
doc_path: str = Field(
|
||||
description="Path to the documentation file to retrieve. It is the same as the 'path' field of the search results. Use `prowler_docs_search` to find the path first."
|
||||
),
|
||||
) -> dict[str, str]:
|
||||
"""Retrieve the full content of a Prowler documentation file.
|
||||
doc_path: str,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieve the full content of a Prowler documentation file.
|
||||
|
||||
Use this after searching to get the complete content of a specific
|
||||
documentation file.
|
||||
|
||||
Args:
|
||||
doc_path: Path to the documentation file. It is the same as the "path" field of the search results.
|
||||
|
||||
Returns:
|
||||
Full content of the documentation file in markdown format.
|
||||
Full content of the documentation file
|
||||
"""
|
||||
content: str | None = prowler_docs_search_engine.get_document(doc_path)
|
||||
content = prowler_docs_search_engine.get_document(doc_path)
|
||||
if content is None:
|
||||
return {"error": f"Document '{doc_path}' not found."}
|
||||
else:
|
||||
return {"content": content}
|
||||
raise ValueError(f"Document not found: {doc_path}")
|
||||
return content
|
||||
|
||||
@@ -4,10 +4,10 @@ Prowler Hub MCP module
|
||||
Provides access to Prowler Hub API for security checks and compliance frameworks.
|
||||
"""
|
||||
|
||||
from typing import Any, Optional
|
||||
|
||||
import httpx
|
||||
from fastmcp import FastMCP
|
||||
from pydantic import Field
|
||||
|
||||
from prowler_mcp_server import __version__
|
||||
|
||||
# Initialize FastMCP for Prowler Hub
|
||||
@@ -55,90 +55,109 @@ def github_check_path(provider_id: str, check_id: str, suffix: str) -> str:
|
||||
return f"{GITHUB_RAW_BASE}/{provider_id}/services/{service_id}/{check_id}/{check_id}{suffix}"
|
||||
|
||||
|
||||
# Security Check Tools
|
||||
@hub_mcp_server.tool()
|
||||
async def list_checks(
|
||||
providers: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by Prowler provider IDs. Example: ['aws', 'azure']. Use `prowler_hub_list_providers` to get available provider IDs.",
|
||||
),
|
||||
services: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by provider services. Example: ['s3', 'ec2', 'keyvault']. Use `prowler_hub_get_provider_services` to get available services for a provider.",
|
||||
),
|
||||
severities: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by severity levels. Example: ['high', 'critical']. Available: 'low', 'medium', 'high', 'critical'.",
|
||||
),
|
||||
categories: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by security categories. Example: ['encryption', 'internet-exposed'].",
|
||||
),
|
||||
compliances: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by compliance framework IDs. Example: ['cis_4.0_aws', 'ens_rd2022_azure']. Use `prowler_hub_list_compliances` to get available compliance IDs.",
|
||||
),
|
||||
) -> dict:
|
||||
"""List security Prowler Checks with filtering capabilities.
|
||||
|
||||
IMPORTANT: This tool returns LIGHTWEIGHT check data. Use this for fast browsing and filtering.
|
||||
For complete details including risk, remediation guidance, and categories use `prowler_hub_get_check_details`.
|
||||
|
||||
IMPORTANT: An unfiltered request returns 1000+ checks. Use filters to narrow results.
|
||||
async def get_check_filters() -> dict[str, Any]:
|
||||
"""
|
||||
Get available values for filtering for tool `get_checks`. Recommended to use before calling `get_checks` to get the available values for the filters.
|
||||
|
||||
Returns:
|
||||
Available filter options including providers, types, services, severities,
|
||||
categories, and compliance frameworks with their respective counts
|
||||
"""
|
||||
try:
|
||||
response = prowler_hub_client.get("/check/filters")
|
||||
response.raise_for_status()
|
||||
filters = response.json()
|
||||
|
||||
return {"filters": filters}
|
||||
except httpx.HTTPStatusError as e:
|
||||
return {
|
||||
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
|
||||
# Security Check Tools
|
||||
@hub_mcp_server.tool()
|
||||
async def get_checks(
|
||||
providers: Optional[str] = None,
|
||||
types: Optional[str] = None,
|
||||
services: Optional[str] = None,
|
||||
severities: Optional[str] = None,
|
||||
categories: Optional[str] = None,
|
||||
compliances: Optional[str] = None,
|
||||
ids: Optional[str] = None,
|
||||
fields: Optional[str] = "id,service,severity,title,description,risk",
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
List security Prowler Checks. The list can be filtered by the parameters defined for the tool.
|
||||
It is recommended to use the tool `get_check_filters` to get the available values for the filters.
|
||||
A not filtered request will return more than 1000 checks, so it is recommended to use the filters.
|
||||
|
||||
Args:
|
||||
providers: Filter by Prowler provider IDs. Example: "aws,azure". Use the tool `list_providers` to get the available providers IDs.
|
||||
types: Filter by check types.
|
||||
services: Filter by provider services IDs. Example: "s3,keyvault". Use the tool `list_providers` to get the available services IDs in a provider.
|
||||
severities: Filter by severity levels. Example: "medium,high". Available values are "low", "medium", "high", "critical".
|
||||
categories: Filter by categories. Example: "cluster-security,encryption".
|
||||
compliances: Filter by compliance framework IDs. Example: "cis_4.0_aws,ens_rd2022_azure".
|
||||
ids: Filter by specific check IDs. Example: "s3_bucket_level_public_access_block".
|
||||
fields: Specify which fields from checks metadata to return (id is always included). Example: "id,title,description,risk".
|
||||
Available values are "id", "title", "description", "provider", "type", "service", "subservice", "severity", "risk", "reference", "remediation", "services_required", "aws_arn_template", "notes", "categories", "default_value", "resource_type", "related_url", "depends_on", "related_to", "fixer".
|
||||
The default parameters are "id,title,description".
|
||||
If null, all fields will be returned.
|
||||
|
||||
Returns:
|
||||
List of security checks matching the filters. The structure is as follows:
|
||||
{
|
||||
"count": N,
|
||||
"checks": [
|
||||
{
|
||||
"id": "check_id",
|
||||
"provider": "provider_id",
|
||||
"title": "Human-readable check title",
|
||||
"severity": "critical|high|medium|low",
|
||||
},
|
||||
{"id": "check_id_1", "title": "check_title_1", "description": "check_description_1", ...},
|
||||
{"id": "check_id_2", "title": "check_title_2", "description": "check_description_2", ...},
|
||||
{"id": "check_id_3", "title": "check_title_3", "description": "check_description_3", ...},
|
||||
...
|
||||
]
|
||||
}
|
||||
|
||||
Useful Example Workflow:
|
||||
1. Use `prowler_hub_list_providers` to see available Prowler providers
|
||||
2. Use `prowler_hub_get_provider_services` to see services for a provider
|
||||
3. Use this tool with filters to find relevant checks
|
||||
4. Use `prowler_hub_get_check_details` to get complete information for a specific check
|
||||
"""
|
||||
# Lightweight fields for listing
|
||||
lightweight_fields = "id,title,severity,provider"
|
||||
|
||||
params: dict[str, str] = {"fields": lightweight_fields}
|
||||
params: dict[str, str] = {}
|
||||
|
||||
if providers:
|
||||
params["providers"] = ",".join(providers)
|
||||
params["providers"] = providers
|
||||
if types:
|
||||
params["types"] = types
|
||||
if services:
|
||||
params["services"] = ",".join(services)
|
||||
params["services"] = services
|
||||
if severities:
|
||||
params["severities"] = ",".join(severities)
|
||||
params["severities"] = severities
|
||||
if categories:
|
||||
params["categories"] = ",".join(categories)
|
||||
params["categories"] = categories
|
||||
if compliances:
|
||||
params["compliances"] = ",".join(compliances)
|
||||
params["compliances"] = compliances
|
||||
if ids:
|
||||
params["ids"] = ids
|
||||
if fields:
|
||||
params["fields"] = fields
|
||||
|
||||
try:
|
||||
response = prowler_hub_client.get("/check", params=params)
|
||||
response.raise_for_status()
|
||||
checks = response.json()
|
||||
|
||||
# Return checks as a lightweight list
|
||||
checks_list = []
|
||||
checks_dict = {}
|
||||
for check in checks:
|
||||
check_data = {
|
||||
"id": check["id"],
|
||||
"provider": check["provider"],
|
||||
"title": check["title"],
|
||||
"severity": check["severity"],
|
||||
}
|
||||
checks_list.append(check_data)
|
||||
check_data = {}
|
||||
# Always include the id field as it's mandatory for the response structure
|
||||
if "id" in check:
|
||||
check_data["id"] = check["id"]
|
||||
|
||||
return {"count": len(checks), "checks": checks_list}
|
||||
# Include other requested fields
|
||||
for field in fields.split(","):
|
||||
if field != "id" and field in check: # Skip id since it's already added
|
||||
check_data[field] = check[field]
|
||||
checks_dict[check["id"]] = check_data
|
||||
|
||||
return {"count": len(checks), "checks": checks_dict}
|
||||
except httpx.HTTPStatusError as e:
|
||||
return {
|
||||
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
|
||||
@@ -148,220 +167,60 @@ async def list_checks(
|
||||
|
||||
|
||||
@hub_mcp_server.tool()
|
||||
async def semantic_search_checks(
|
||||
term: str = Field(
|
||||
description="Search term. Examples: 'public access', 'encryption', 'MFA', 'logging'.",
|
||||
),
|
||||
) -> dict:
|
||||
"""Search for security checks using free-text search across all metadata.
|
||||
async def get_check_raw_metadata(
|
||||
provider_id: str,
|
||||
check_id: str,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Fetch the raw check metadata JSON, this is a low level version of the tool `get_checks`.
|
||||
It is recommended to use the tool `get_checks` filtering about the `ids` parameter instead of using this tool.
|
||||
|
||||
IMPORTANT: This tool returns LIGHTWEIGHT check data. Use this for discovering checks by topic.
|
||||
For complete details including risk, remediation guidance, and categories use `prowler_hub_get_check_details`.
|
||||
|
||||
Searches across check titles, descriptions, risk statements, remediation guidance,
|
||||
and other text fields. Use this when you don't know the exact check ID or want to
|
||||
explore checks related to a topic.
|
||||
Args:
|
||||
provider_id: Prowler provider ID (e.g., "aws", "azure").
|
||||
check_id: Prowler check ID (folder and base filename).
|
||||
|
||||
Returns:
|
||||
{
|
||||
"count": N,
|
||||
"checks": [
|
||||
{
|
||||
"id": "check_id",
|
||||
"provider": "provider_id",
|
||||
"title": "Human-readable check title",
|
||||
"severity": "critical|high|medium|low",
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
|
||||
Useful Example Workflow:
|
||||
1. Use this tool to search for checks by keyword or topic
|
||||
2. Use `prowler_hub_list_checks` with filters for more targeted browsing
|
||||
3. Use `prowler_hub_get_check_details` to get complete information for a specific check
|
||||
Raw metadata JSON as stored in Prowler.
|
||||
"""
|
||||
try:
|
||||
response = prowler_hub_client.get("/check/search", params={"term": term})
|
||||
response.raise_for_status()
|
||||
checks = response.json()
|
||||
|
||||
# Return checks as a lightweight list
|
||||
checks_list = []
|
||||
for check in checks:
|
||||
check_data = {
|
||||
"id": check["id"],
|
||||
"provider": check["provider"],
|
||||
"title": check["title"],
|
||||
"severity": check["severity"],
|
||||
if provider_id and check_id:
|
||||
url = github_check_path(provider_id, check_id, ".metadata.json")
|
||||
try:
|
||||
resp = github_raw_client.get(url)
|
||||
resp.raise_for_status()
|
||||
return resp.json()
|
||||
except httpx.HTTPStatusError as e:
|
||||
if e.response.status_code == 404:
|
||||
return {
|
||||
"error": f"Check {check_id} not found in Prowler",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"error": f"Error fetching check {check_id} from Prowler: {str(e)}",
|
||||
}
|
||||
checks_list.append(check_data)
|
||||
|
||||
return {"count": len(checks), "checks": checks_list}
|
||||
except httpx.HTTPStatusError as e:
|
||||
else:
|
||||
return {
|
||||
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
|
||||
"error": "Provider ID and check ID are required",
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
|
||||
@hub_mcp_server.tool()
|
||||
async def get_check_details(
|
||||
check_id: str = Field(
|
||||
description="The check ID to retrieve details for. Example: 's3_bucket_level_public_access_block'"
|
||||
),
|
||||
) -> dict:
|
||||
"""Retrieve comprehensive details about a specific security check by its ID.
|
||||
|
||||
IMPORTANT: This tool returns COMPLETE check details.
|
||||
Use this after finding a specific check ID, you can get it via `prowler_hub_list_checks` or `prowler_hub_semantic_search_checks`.
|
||||
|
||||
Returns:
|
||||
{
|
||||
"id": "string",
|
||||
"title": "string",
|
||||
"description": "string",
|
||||
"provider": "string",
|
||||
"service": "string",
|
||||
"severity": "low",
|
||||
"risk": "string",
|
||||
"reference": [
|
||||
"string"
|
||||
],
|
||||
"additional_urls": [
|
||||
"string"
|
||||
],
|
||||
"remediation": {
|
||||
"cli": {
|
||||
"description": "string"
|
||||
},
|
||||
"terraform": {
|
||||
"description": "string"
|
||||
},
|
||||
"nativeiac": {
|
||||
"description": "string"
|
||||
},
|
||||
"other": {
|
||||
"description": "string"
|
||||
},
|
||||
"wui": {
|
||||
"description": "string",
|
||||
"reference": "string"
|
||||
}
|
||||
},
|
||||
"services_required": [
|
||||
"string"
|
||||
],
|
||||
"notes": "string",
|
||||
"compliances": [
|
||||
{
|
||||
"name": "string",
|
||||
"id": "string"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"string"
|
||||
],
|
||||
"resource_type": "string",
|
||||
"related_url": "string",
|
||||
"fixer": bool
|
||||
}
|
||||
|
||||
Useful Example Workflow:
|
||||
1. Use `prowler_hub_list_checks` or `prowler_hub_search_checks` to find check IDs
|
||||
2. Use this tool with the check 'id' to get complete information including remediation guidance
|
||||
"""
|
||||
try:
|
||||
response = prowler_hub_client.get(f"/check/{check_id}")
|
||||
response.raise_for_status()
|
||||
check = response.json()
|
||||
|
||||
if not check:
|
||||
return {"error": f"Check '{check_id}' not found"}
|
||||
|
||||
# Build response with only non-empty fields to save tokens
|
||||
result = {}
|
||||
|
||||
# Core fields
|
||||
result["id"] = check["id"]
|
||||
if check.get("title"):
|
||||
result["title"] = check["title"]
|
||||
if check.get("description"):
|
||||
result["description"] = check["description"]
|
||||
if check.get("provider"):
|
||||
result["provider"] = check["provider"]
|
||||
if check.get("service"):
|
||||
result["service"] = check["service"]
|
||||
if check.get("severity"):
|
||||
result["severity"] = check["severity"]
|
||||
if check.get("risk"):
|
||||
result["risk"] = check["risk"]
|
||||
if check.get("resource_type"):
|
||||
result["resource_type"] = check["resource_type"]
|
||||
|
||||
# List fields
|
||||
if check.get("reference"):
|
||||
result["reference"] = check["reference"]
|
||||
if check.get("additional_urls"):
|
||||
result["additional_urls"] = check["additional_urls"]
|
||||
if check.get("services_required"):
|
||||
result["services_required"] = check["services_required"]
|
||||
if check.get("categories"):
|
||||
result["categories"] = check["categories"]
|
||||
if check.get("compliances"):
|
||||
result["compliances"] = check["compliances"]
|
||||
|
||||
# Other fields
|
||||
if check.get("notes"):
|
||||
result["notes"] = check["notes"]
|
||||
if check.get("related_url"):
|
||||
result["related_url"] = check["related_url"]
|
||||
if check.get("fixer") is not None:
|
||||
result["fixer"] = check["fixer"]
|
||||
|
||||
# Remediation - filter out empty nested values
|
||||
remediation = check.get("remediation", {})
|
||||
if remediation:
|
||||
filtered_remediation = {}
|
||||
for key, value in remediation.items():
|
||||
if value and isinstance(value, dict):
|
||||
# Filter out empty values within nested dict
|
||||
filtered_value = {k: v for k, v in value.items() if v}
|
||||
if filtered_value:
|
||||
filtered_remediation[key] = filtered_value
|
||||
elif value:
|
||||
filtered_remediation[key] = value
|
||||
if filtered_remediation:
|
||||
result["remediation"] = filtered_remediation
|
||||
|
||||
return result
|
||||
except httpx.HTTPStatusError as e:
|
||||
return {
|
||||
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
|
||||
@hub_mcp_server.tool()
|
||||
async def get_check_code(
|
||||
provider_id: str = Field(
|
||||
description="Prowler Provider ID. Example: 'aws', 'azure', 'gcp', 'kubernetes'. Use `prowler_hub_list_providers` to get available provider IDs.",
|
||||
),
|
||||
check_id: str = Field(
|
||||
description="The check ID. Example: 's3_bucket_public_access'. Get IDs from `prowler_hub_list_checks` or `prowler_hub_search_checks`.",
|
||||
),
|
||||
) -> dict:
|
||||
"""Fetch the Python implementation code of a Prowler security check.
|
||||
provider_id: str,
|
||||
check_id: str,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Fetch the check implementation Python code from Prowler.
|
||||
|
||||
The check code shows exactly how Prowler evaluates resources for security issues.
|
||||
Use this to understand check logic, customize checks, or create new ones.
|
||||
Args:
|
||||
provider_id: Prowler provider ID (e.g., "aws", "azure").
|
||||
check_id: Prowler check ID (e.g., "opensearch_service_domains_not_publicly_accessible").
|
||||
|
||||
Returns:
|
||||
{
|
||||
"content": "Python source code of the check implementation"
|
||||
}
|
||||
Dict with the code content as text.
|
||||
"""
|
||||
if provider_id and check_id:
|
||||
url = github_check_path(provider_id, check_id, ".py")
|
||||
@@ -392,29 +251,18 @@ async def get_check_code(
|
||||
|
||||
@hub_mcp_server.tool()
|
||||
async def get_check_fixer(
|
||||
provider_id: str = Field(
|
||||
description="Prowler Provider ID. Example: 'aws', 'azure', 'gcp', 'kubernetes'. Use `prowler_hub_list_providers` to get available provider IDs.",
|
||||
),
|
||||
check_id: str = Field(
|
||||
description="The check ID. Example: 's3_bucket_public_access'. Get IDs from `prowler_hub_list_checks` or `prowler_hub_search_checks`.",
|
||||
),
|
||||
) -> dict:
|
||||
"""Fetch the auto-remediation (fixer) code for a Prowler security check.
|
||||
provider_id: str,
|
||||
check_id: str,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Fetch the check fixer Python code from Prowler, if it exists.
|
||||
|
||||
IMPORTANT: Not all checks have fixers. A "fixer not found" response means the check
|
||||
doesn't have auto-remediation code - this is normal for many checks.
|
||||
|
||||
Fixer code provides automated remediation that can fix security issues detected by checks.
|
||||
Use this to understand how to programmatically remediate findings.
|
||||
Args:
|
||||
provider_id: Prowler provider ID (e.g., "aws", "azure").
|
||||
check_id: Prowler check ID (e.g., "opensearch_service_domains_not_publicly_accessible").
|
||||
|
||||
Returns:
|
||||
{
|
||||
"content": "Python source code of the auto-remediation implementation"
|
||||
}
|
||||
Or if no fixer exists:
|
||||
{
|
||||
"error": "Fixer not found for check {check_id}"
|
||||
}
|
||||
Dict with fixer content as text if present, existence flag.
|
||||
"""
|
||||
if provider_id and check_id:
|
||||
url = github_check_path(provider_id, check_id, "_fixer.py")
|
||||
@@ -447,66 +295,95 @@ async def get_check_fixer(
|
||||
}
|
||||
|
||||
|
||||
# Compliance Framework Tools
|
||||
@hub_mcp_server.tool()
|
||||
async def list_compliances(
|
||||
provider: list[str] = Field(
|
||||
default=[],
|
||||
description="Filter by cloud provider. Example: ['aws']. Use `prowler_hub_list_providers` to get available provider IDs.",
|
||||
),
|
||||
) -> dict:
|
||||
"""List compliance frameworks supported by Prowler.
|
||||
async def search_checks(term: str) -> dict[str, Any]:
|
||||
"""
|
||||
Search the term across all text properties of check metadata.
|
||||
|
||||
IMPORTANT: This tool returns LIGHTWEIGHT compliance data. Use this for fast browsing and filtering.
|
||||
For complete details including requirements use `prowler_hub_get_compliance_details`.
|
||||
|
||||
Compliance frameworks define sets of security requirements that checks map to.
|
||||
Use this to discover available frameworks for compliance reporting.
|
||||
|
||||
WARNING: An unfiltered request may return a large number of frameworks. Use the provider with not more than 3 different providers to make easier the response handling.
|
||||
Args:
|
||||
term: Search term to find in check titles, descriptions, and other text fields
|
||||
|
||||
Returns:
|
||||
List of checks matching the search term
|
||||
"""
|
||||
try:
|
||||
response = prowler_hub_client.get("/check/search", params={"term": term})
|
||||
response.raise_for_status()
|
||||
checks = response.json()
|
||||
|
||||
return {
|
||||
"count": len(checks),
|
||||
"checks": checks,
|
||||
}
|
||||
except httpx.HTTPStatusError as e:
|
||||
return {
|
||||
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
|
||||
# Compliance Framework Tools
|
||||
@hub_mcp_server.tool()
|
||||
async def get_compliance_frameworks(
|
||||
provider: Optional[str] = None,
|
||||
fields: Optional[
|
||||
str
|
||||
] = "id,framework,provider,description,total_checks,total_requirements",
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
List and filter compliance frameworks. The list can be filtered by the parameters defined for the tool.
|
||||
|
||||
Args:
|
||||
provider: Filter by one Prowler provider ID. Example: "aws". Use the tool `list_providers` to get the available providers IDs.
|
||||
fields: Specify which fields to return (id is always included). Example: "id,provider,description,version".
|
||||
It is recommended to run with the default parameters because the full response is too large.
|
||||
Available values are "id", "framework", "provider", "description", "total_checks", "total_requirements", "created_at", "updated_at".
|
||||
The default parameters are "id,framework,provider,description,total_checks,total_requirements".
|
||||
If null, all fields will be returned.
|
||||
|
||||
Returns:
|
||||
List of compliance frameworks. The structure is as follows:
|
||||
{
|
||||
"count": N,
|
||||
"compliances": [
|
||||
{
|
||||
"id": "cis_4.0_aws",
|
||||
"name": "CIS Amazon Web Services Foundations Benchmark v4.0",
|
||||
"provider": "aws",
|
||||
},
|
||||
...
|
||||
]
|
||||
"frameworks": {
|
||||
"framework_id": {
|
||||
"id": "framework_id",
|
||||
"provider": "provider_id",
|
||||
"description": "framework_description",
|
||||
"version": "framework_version"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Useful Example Workflow:
|
||||
1. Use `prowler_hub_list_providers` to see available cloud providers
|
||||
2. Use this tool to browse compliance frameworks
|
||||
3. Use `prowler_hub_get_compliance_details` with the compliance 'id' to get complete information
|
||||
"""
|
||||
# Lightweight fields for listing
|
||||
lightweight_fields = "id,name,provider"
|
||||
|
||||
params: dict[str, str] = {"fields": lightweight_fields}
|
||||
params = {}
|
||||
|
||||
if provider:
|
||||
params["provider"] = ",".join(provider)
|
||||
params["provider"] = provider
|
||||
if fields:
|
||||
params["fields"] = fields
|
||||
|
||||
try:
|
||||
response = prowler_hub_client.get("/compliance", params=params)
|
||||
response.raise_for_status()
|
||||
compliances = response.json()
|
||||
frameworks = response.json()
|
||||
|
||||
# Return compliances as a lightweight list
|
||||
compliances_list = []
|
||||
for compliance in compliances:
|
||||
compliance_data = {
|
||||
"id": compliance["id"],
|
||||
"name": compliance["name"],
|
||||
"provider": compliance["provider"],
|
||||
}
|
||||
compliances_list.append(compliance_data)
|
||||
frameworks_dict = {}
|
||||
for framework in frameworks:
|
||||
framework_data = {}
|
||||
# Always include the id field as it's mandatory for the response structure
|
||||
if "id" in framework:
|
||||
framework_data["id"] = framework["id"]
|
||||
|
||||
return {"count": len(compliances), "compliances": compliances_list}
|
||||
# Include other requested fields
|
||||
for field in fields.split(","):
|
||||
if (
|
||||
field != "id" and field in framework
|
||||
): # Skip id since it's already added
|
||||
framework_data[field] = framework[field]
|
||||
frameworks_dict[framework["id"]] = framework_data
|
||||
|
||||
return {"count": len(frameworks), "frameworks": frameworks_dict}
|
||||
except httpx.HTTPStatusError as e:
|
||||
return {
|
||||
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
|
||||
@@ -516,140 +393,27 @@ async def list_compliances(
|
||||
|
||||
|
||||
@hub_mcp_server.tool()
|
||||
async def semantic_search_compliances(
|
||||
term: str = Field(
|
||||
description="Search term. Examples: 'CIS', 'HIPAA', 'PCI', 'GDPR', 'SOC2', 'NIST'.",
|
||||
),
|
||||
) -> dict:
|
||||
"""Search for compliance frameworks using free-text search.
|
||||
async def search_compliance_frameworks(term: str) -> dict[str, Any]:
|
||||
"""
|
||||
Search compliance frameworks by term.
|
||||
|
||||
IMPORTANT: This tool returns LIGHTWEIGHT compliance data. Use this for discovering frameworks by topic.
|
||||
For complete details including requirements use `prowler_hub_get_compliance_details`.
|
||||
|
||||
Searches across framework names, descriptions, and metadata. Use this when you
|
||||
want to find frameworks related to a specific regulation, standard, or topic.
|
||||
Args:
|
||||
term: Search term to find in framework names and descriptions
|
||||
|
||||
Returns:
|
||||
{
|
||||
"count": N,
|
||||
"compliances": [
|
||||
{
|
||||
"id": "cis_4.0_aws",
|
||||
"name": "CIS Amazon Web Services Foundations Benchmark v4.0",
|
||||
"provider": "aws",
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
List of compliance frameworks matching the search term
|
||||
"""
|
||||
try:
|
||||
response = prowler_hub_client.get("/compliance/search", params={"term": term})
|
||||
response.raise_for_status()
|
||||
compliances = response.json()
|
||||
frameworks = response.json()
|
||||
|
||||
# Return compliances as a lightweight list
|
||||
compliances_list = []
|
||||
for compliance in compliances:
|
||||
compliance_data = {
|
||||
"id": compliance["id"],
|
||||
"name": compliance["name"],
|
||||
"provider": compliance["provider"],
|
||||
}
|
||||
compliances_list.append(compliance_data)
|
||||
|
||||
return {"count": len(compliances), "compliances": compliances_list}
|
||||
except httpx.HTTPStatusError as e:
|
||||
return {
|
||||
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
|
||||
"count": len(frameworks),
|
||||
"search_term": term,
|
||||
"frameworks": frameworks,
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
|
||||
@hub_mcp_server.tool()
|
||||
async def get_compliance_details(
|
||||
compliance_id: str = Field(
|
||||
description="The compliance framework ID to retrieve details for. Example: 'cis_4.0_aws'. Use `prowler_hub_list_compliances` or `prowler_hub_semantic_search_compliances` to find available compliance IDs.",
|
||||
),
|
||||
) -> dict:
|
||||
"""Retrieve comprehensive details about a specific compliance framework by its ID.
|
||||
|
||||
IMPORTANT: This tool returns COMPLETE compliance details.
|
||||
Use this after finding a specific compliance via `prowler_hub_list_compliances` or `prowler_hub_semantic_search_compliances`.
|
||||
|
||||
Returns:
|
||||
{
|
||||
"id": "string",
|
||||
"name": "string",
|
||||
"framework": "string",
|
||||
"provider": "string",
|
||||
"version": "string",
|
||||
"description": "string",
|
||||
"total_checks": int,
|
||||
"total_requirements": int,
|
||||
"requirements": [
|
||||
{
|
||||
"id": "string",
|
||||
"name": "string",
|
||||
"description": "string",
|
||||
"checks": ["check_id_1", "check_id_2"]
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
try:
|
||||
response = prowler_hub_client.get(f"/compliance/{compliance_id}")
|
||||
response.raise_for_status()
|
||||
compliance = response.json()
|
||||
|
||||
if not compliance:
|
||||
return {"error": f"Compliance '{compliance_id}' not found"}
|
||||
|
||||
# Build response with only non-empty fields to save tokens
|
||||
result = {}
|
||||
|
||||
# Core fields
|
||||
result["id"] = compliance["id"]
|
||||
if compliance.get("name"):
|
||||
result["name"] = compliance["name"]
|
||||
if compliance.get("framework"):
|
||||
result["framework"] = compliance["framework"]
|
||||
if compliance.get("provider"):
|
||||
result["provider"] = compliance["provider"]
|
||||
if compliance.get("version"):
|
||||
result["version"] = compliance["version"]
|
||||
if compliance.get("description"):
|
||||
result["description"] = compliance["description"]
|
||||
|
||||
# Numeric fields
|
||||
if compliance.get("total_checks"):
|
||||
result["total_checks"] = compliance["total_checks"]
|
||||
if compliance.get("total_requirements"):
|
||||
result["total_requirements"] = compliance["total_requirements"]
|
||||
|
||||
# Requirements - filter out empty nested values
|
||||
requirements = compliance.get("requirements", [])
|
||||
if requirements:
|
||||
filtered_requirements = []
|
||||
for req in requirements:
|
||||
filtered_req = {}
|
||||
if req.get("id"):
|
||||
filtered_req["id"] = req["id"]
|
||||
if req.get("name"):
|
||||
filtered_req["name"] = req["name"]
|
||||
if req.get("description"):
|
||||
filtered_req["description"] = req["description"]
|
||||
if req.get("checks"):
|
||||
filtered_req["checks"] = req["checks"]
|
||||
if filtered_req:
|
||||
filtered_requirements.append(filtered_req)
|
||||
if filtered_requirements:
|
||||
result["requirements"] = filtered_requirements
|
||||
|
||||
return result
|
||||
except httpx.HTTPStatusError as e:
|
||||
if e.response.status_code == 404:
|
||||
return {"error": f"Compliance '{compliance_id}' not found"}
|
||||
return {
|
||||
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
|
||||
}
|
||||
@@ -659,28 +423,20 @@ async def get_compliance_details(
|
||||
|
||||
# Provider Tools
|
||||
@hub_mcp_server.tool()
|
||||
async def list_providers() -> dict:
|
||||
"""List all providers supported by Prowler.
|
||||
|
||||
This is a reference tool that shows available providers (aws, azure, gcp, kubernetes, etc.)
|
||||
that can be scanned for finding security issues.
|
||||
|
||||
Use the provider IDs from this tool as filter values in other tools.
|
||||
async def list_providers() -> dict[str, Any]:
|
||||
"""
|
||||
Get all available Prowler providers and their associated services.
|
||||
|
||||
Returns:
|
||||
List of Prowler providers with their associated services. The structure is as follows:
|
||||
{
|
||||
"count": N,
|
||||
"providers": [
|
||||
{
|
||||
"id": "aws",
|
||||
"name": "Amazon Web Services"
|
||||
},
|
||||
{
|
||||
"id": "azure",
|
||||
"name": "Microsoft Azure"
|
||||
},
|
||||
...
|
||||
]
|
||||
"providers": {
|
||||
"provider_id": {
|
||||
"name": "provider_name",
|
||||
"services": ["service_id_1", "service_id_2", "service_id_3", ...]
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
try:
|
||||
@@ -688,16 +444,14 @@ async def list_providers() -> dict:
|
||||
response.raise_for_status()
|
||||
providers = response.json()
|
||||
|
||||
providers_list = []
|
||||
providers_dict = {}
|
||||
for provider in providers:
|
||||
providers_list.append(
|
||||
{
|
||||
"id": provider["id"],
|
||||
"name": provider.get("name", ""),
|
||||
}
|
||||
)
|
||||
providers_dict[provider["id"]] = {
|
||||
"name": provider.get("name", ""),
|
||||
"services": provider.get("services", []),
|
||||
}
|
||||
|
||||
return {"count": len(providers), "providers": providers_list}
|
||||
return {"count": len(providers), "providers": providers_dict}
|
||||
except httpx.HTTPStatusError as e:
|
||||
return {
|
||||
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
|
||||
@@ -706,42 +460,24 @@ async def list_providers() -> dict:
|
||||
return {"error": str(e)}
|
||||
|
||||
|
||||
# Analytics Tools
|
||||
@hub_mcp_server.tool()
|
||||
async def get_provider_services(
|
||||
provider_id: str = Field(
|
||||
description="The provider ID to get services for. Example: 'aws', 'azure', 'gcp', 'kubernetes'. Use `prowler_hub_list_providers` to get available provider IDs.",
|
||||
),
|
||||
) -> dict:
|
||||
"""Get the list of services IDs available for a specific cloud provider.
|
||||
|
||||
Services represent the different resources and capabilities that Prowler can scan
|
||||
within a provider (e.g., s3, ec2, iam for AWS or keyvault, storage for Azure).
|
||||
|
||||
Use service IDs from this tool as filter values in other tools.
|
||||
async def get_artifacts_count() -> dict[str, Any]:
|
||||
"""
|
||||
Get total count of security artifacts (checks + compliance frameworks).
|
||||
|
||||
Returns:
|
||||
{
|
||||
"provider_id": "aws",
|
||||
"provider_name": "Amazon Web Services",
|
||||
"count": N,
|
||||
"services": ["s3", "ec2", "iam", "rds", "lambda", ...]
|
||||
}
|
||||
Total number of artifacts in the Prowler Hub.
|
||||
"""
|
||||
try:
|
||||
response = prowler_hub_client.get("/providers")
|
||||
response = prowler_hub_client.get("/n_artifacts")
|
||||
response.raise_for_status()
|
||||
providers = response.json()
|
||||
data = response.json()
|
||||
|
||||
for provider in providers:
|
||||
if provider["id"] == provider_id:
|
||||
return {
|
||||
"provider_id": provider["id"],
|
||||
"provider_name": provider.get("name", ""),
|
||||
"count": len(provider.get("services", [])),
|
||||
"services": provider.get("services", []),
|
||||
}
|
||||
|
||||
return {"error": f"Provider '{provider_id}' not found"}
|
||||
return {
|
||||
"total_artifacts": data.get("n", 0),
|
||||
"details": "Total count includes both security checks and compliance frameworks",
|
||||
}
|
||||
except httpx.HTTPStatusError as e:
|
||||
return {
|
||||
"error": f"HTTP error {e.response.status_code}: {e.response.text}",
|
||||
|
||||
@@ -11,7 +11,7 @@ description = "MCP server for Prowler ecosystem"
|
||||
name = "prowler-mcp"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.12"
|
||||
version = "0.3.0"
|
||||
version = "0.1.0"
|
||||
|
||||
[project.scripts]
|
||||
generate-prowler-app-mcp-server = "prowler_mcp_server.prowler_app.utils.server_generator:generate_server_file"
|
||||
|
||||
2
mcp_server/uv.lock
generated
2
mcp_server/uv.lock
generated
@@ -603,7 +603,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "prowler-mcp"
|
||||
version = "0.3.0"
|
||||
version = "0.1.0"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "fastmcp" },
|
||||
|
||||
@@ -2,38 +2,26 @@
|
||||
|
||||
All notable changes to the **Prowler SDK** are documented in this file.
|
||||
|
||||
## [5.16.0] (Prowler v5.16.0)
|
||||
## [5.16.0] (Prowler UNRELEASED)
|
||||
|
||||
### Added
|
||||
|
||||
- `privilege-escalation` and `ec2-imdsv1` categories for AWS checks [(#9537)](https://github.com/prowler-cloud/prowler/pull/9537)
|
||||
- `privilege-escalation` and `ec2-imdsv1` categories for AWS checks [(#9536)](https://github.com/prowler-cloud/prowler/pull/9536)
|
||||
- Supported IaC formats and scanner documentation for the IaC provider [(#9553)](https://github.com/prowler-cloud/prowler/pull/9553)
|
||||
|
||||
### Changed
|
||||
|
||||
- Update AWS Glue service metadata to new format [(#9258)](https://github.com/prowler-cloud/prowler/pull/9258)
|
||||
- Update AWS Kafka service metadata to new format [(#9261)](https://github.com/prowler-cloud/prowler/pull/9261)
|
||||
- Update AWS KMS service metadata to new format [(#9263)](https://github.com/prowler-cloud/prowler/pull/9263)
|
||||
- Update AWS MemoryDB service metadata to new format [(#9266)](https://github.com/prowler-cloud/prowler/pull/9266)
|
||||
- Update AWS Inspector v2 service metadata to new format [(#9260)](https://github.com/prowler-cloud/prowler/pull/9260)
|
||||
- Update AWS Service Catalog service metadata to new format [(#9410)](https://github.com/prowler-cloud/prowler/pull/9410)
|
||||
- Update AWS SNS service metadata to new format [(#9428)](https://github.com/prowler-cloud/prowler/pull/9428)
|
||||
- Update AWS Trusted Advisor service metadata to new format [(#9435)](https://github.com/prowler-cloud/prowler/pull/9435)
|
||||
- Update AWS WAF service metadata to new format [(#9480)](https://github.com/prowler-cloud/prowler/pull/9480)
|
||||
- Update AWS WAF v2 service metadata to new format [(#9481)](https://github.com/prowler-cloud/prowler/pull/9481)
|
||||
|
||||
### Fixed
|
||||
- Fix typo `trustboundaries` category to `trust-boundaries` [(#9536)](https://github.com/prowler-cloud/prowler/pull/9536)
|
||||
- Fix incorrect `bedrock-agent` regional availability, now using official AWS docs instead of copying from `bedrock`
|
||||
- Store MongoDB Atlas provider regions as lowercase [(#9554)](https://github.com/prowler-cloud/prowler/pull/9554)
|
||||
- Store GCP Cloud Storage bucket regions as lowercase [(#9567)](https://github.com/prowler-cloud/prowler/pull/9567)
|
||||
|
||||
---
|
||||
|
||||
## [5.15.1] (Prowler v5.15.1)
|
||||
## [5.15.1] (Prowler UNRELEASED)
|
||||
|
||||
### Fixed
|
||||
- Fix false negative in AWS `apigateway_restapi_logging_enabled` check by refining stage logging evaluation to ensure logging level is not set to "OFF" [(#9304)](https://github.com/prowler-cloud/prowler/pull/9304)
|
||||
- Fix typo `trustboundaries` category to `trust-boundaries` [(#9536)](https://github.com/prowler-cloud/prowler/pull/9536)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1426,23 +1426,42 @@
|
||||
"bedrock-agent": {
|
||||
"regions": {
|
||||
"aws": [
|
||||
"af-south-1",
|
||||
"ap-east-2",
|
||||
"ap-northeast-1",
|
||||
"ap-northeast-2",
|
||||
"ap-northeast-3",
|
||||
"ap-south-1",
|
||||
"ap-south-2",
|
||||
"ap-southeast-1",
|
||||
"ap-southeast-2",
|
||||
"ap-southeast-3",
|
||||
"ap-southeast-4",
|
||||
"ap-southeast-5",
|
||||
"ap-southeast-7",
|
||||
"ca-central-1",
|
||||
"ca-west-1",
|
||||
"eu-central-1",
|
||||
"eu-central-2",
|
||||
"eu-north-1",
|
||||
"eu-south-1",
|
||||
"eu-south-2",
|
||||
"eu-west-1",
|
||||
"eu-west-2",
|
||||
"eu-west-3",
|
||||
"il-central-1",
|
||||
"me-central-1",
|
||||
"me-south-1",
|
||||
"mx-central-1",
|
||||
"sa-east-1",
|
||||
"us-east-1",
|
||||
"us-east-2",
|
||||
"us-west-1",
|
||||
"us-west-2"
|
||||
],
|
||||
"aws-cn": [],
|
||||
"aws-us-gov": [
|
||||
"us-gov-east-1",
|
||||
"us-gov-west-1"
|
||||
]
|
||||
}
|
||||
@@ -12564,4 +12583,4 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,39 +1,29 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "inspector2_active_findings_exist",
|
||||
"CheckTitle": "Inspector2 is enabled with no active findings",
|
||||
"CheckTitle": "Check if Inspector2 active findings exist",
|
||||
"CheckAliases": [
|
||||
"inspector2_findings_exist"
|
||||
],
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/Vulnerabilities/CVE",
|
||||
"Software and Configuration Checks/Patch Management",
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Industry and Regulatory Standards/AWS Foundational Security Best Practices"
|
||||
],
|
||||
"CheckType": [],
|
||||
"ServiceName": "inspector2",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceIdTemplate": "arn:aws:inspector2:region:account-id/detector-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Other",
|
||||
"Description": "**Amazon Inspector2** active findings are assessed across eligible resources when the service is `ENABLED`.\n\nIndicates whether any findings remain in the **Active** state versus none.",
|
||||
"Risk": "**Unremediated Inspector2 findings** mean known vulnerabilities or exposures persist on workloads.\n\nThis enables:\n- Unauthorized access and data exfiltration (C)\n- Code tampering and privilege escalation (I)\n- Service disruption via exploitation or malware (A)",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Inspector/amazon-inspector-findings.html",
|
||||
"https://docs.aws.amazon.com/inspector/latest/user/findings-understanding.html",
|
||||
"https://docs.aws.amazon.com/inspector/latest/user/what-is-inspector.html"
|
||||
],
|
||||
"Description": "This check determines if there are any active findings in your AWS account that have been detected by AWS Inspector2. Inspector2 is an automated security assessment service that helps improve the security and compliance of applications deployed on AWS.",
|
||||
"Risk": "Without using AWS Inspector, you may not be aware of all the security vulnerabilities in your AWS resources, which could lead to unauthorized access, data breaches, or other security incidents.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/inspector/latest/user/findings-understanding.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws inspector2 create-filter --name <example_resource_name> --action SUPPRESS --filter-criteria '{\"findingStatus\":[{\"comparison\":\"EQUALS\",\"value\":\"ACTIVE\"}]}'",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Suppress all ACTIVE Inspector findings\nResources:\n <example_resource_name>:\n Type: AWS::InspectorV2::Filter\n Properties:\n Name: <example_resource_name>\n Action: SUPPRESS # critical: converts matching findings to Suppressed, not Active\n FilterCriteria:\n FindingStatus:\n - Comparison: EQUALS\n Value: ACTIVE # critical: targets all active findings\n```",
|
||||
"Other": "1. In the AWS Console, go to Amazon Inspector\n2. Open Suppression rules (or Filters) and click Create suppression rule\n3. Set condition: Finding status = Active\n4. Set action to Suppress and click Create\n5. Verify the Active findings count is 0 on the dashboard",
|
||||
"Terraform": "```hcl\n# Terraform: Suppress all ACTIVE Inspector findings\nresource \"aws_inspector2_filter\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n action = \"SUPPRESS\" # critical: converts matching findings to Suppressed, not Active\n\n filter_criteria {\n finding_status {\n comparison = \"EQUALS\"\n value = \"ACTIVE\" # critical: targets all active findings\n }\n }\n}\n```"
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Inspector/amazon-inspector-findings.html",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Prioritize and remediate **Active findings** quickly: patch hosts and runtimes, update/rebuild images, fix vulnerable code, and close unintended exposure.\n\nApply **least privilege**, use **defense in depth**, and avoid broad suppressions. Integrate findings into CI/CD and vulnerability management for continuous prevention.",
|
||||
"Url": "https://hub.prowler.com/check/inspector2_active_findings_exist"
|
||||
"Text": "Review the active findings from Inspector2",
|
||||
"Url": "https://docs.aws.amazon.com/inspector/latest/user/what-is-inspector.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
|
||||
@@ -1,37 +1,31 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "inspector2_is_enabled",
|
||||
"CheckTitle": "Inspector2 is enabled for Amazon EC2 instances, ECR container images, Lambda functions, and Lambda code",
|
||||
"CheckTitle": "Check if Inspector2 is enabled for Amazon EC2 instances, ECR container images and Lambda functions.",
|
||||
"CheckAliases": [
|
||||
"inspector2_findings_exist"
|
||||
],
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
|
||||
"Software and Configuration Checks/AWS Security Best Practices"
|
||||
],
|
||||
"ServiceName": "inspector2",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:inspector2:region:account-id/detector-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Other",
|
||||
"Description": "**Amazon Inspector 2** activation and coverage across regions, verifying that scanning is active for **EC2**, **ECR**, **Lambda functions**, and **Lambda code** where applicable.\n\nIt flags missing account activation or gaps in any scan type.",
|
||||
"Risk": "Absent or partial coverage leaves **unpatched vulnerabilities**, risky **code dependencies**, and **unintended network exposure** undetected.\n\nAttackers can exploit known CVEs for **remote code execution**, **lateral movement**, and **data exfiltration**, degrading **confidentiality**, **integrity**, and **availability**.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Inspector2/enable-amazon-inspector2.html",
|
||||
"https://docs.aws.amazon.com/inspector/latest/user/findings-understanding.html",
|
||||
"https://docs.aws.amazon.com/inspector/latest/user/getting_started_tutorial.html"
|
||||
],
|
||||
"ResourceType": "AwsAccount",
|
||||
"Description": "Ensure that the new version of Amazon Inspector is enabled in order to help you improve the security and compliance of your AWS cloud environment. Amazon Inspector 2 is a vulnerability management solution that continually scans scans your Amazon EC2 instances, ECR container images, and Lambda functions to identify software vulnerabilities and instances of unintended network exposure.",
|
||||
"Risk": "Without using AWS Inspector, you may not be aware of all the security vulnerabilities in your AWS resources, which could lead to unauthorized access, data breaches, or other security incidents.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/inspector/latest/user/findings-understanding.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws inspector2 enable --resource-types EC2 ECR LAMBDA LAMBDA_CODE",
|
||||
"CLI": "aws inspector2 enable --resource-types 'EC2' 'ECR' 'LAMBDA' 'LAMBDA_CODE'",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Sign in to the AWS Console and open Amazon Inspector (v2)\n2. If not yet activated: click Get started > Activate Amazon Inspector\n3. If already activated: go to Settings > Scans and ensure EC2, ECR, Lambda functions, and Lambda code are all enabled, then Save",
|
||||
"Terraform": "```hcl\nresource \"aws_inspector2_enabler\" \"<example_resource_name>\" {\n resource_types = [\"EC2\", \"ECR\", \"LAMBDA\", \"LAMBDA_CODE\"] # Enables Inspector2 scans for all required resource types\n}\n```"
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Inspector2/enable-amazon-inspector2.html",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Enable **Amazon Inspector 2** across all regions and activate scans for **EC2**, **ECR**, **Lambda**, and **Lambda code**.\n\nApply **defense in depth**: auto-enable coverage for new workloads, integrate findings with patching and CI/CD gates, enforce remediation SLAs, and grant only **least privilege** to process and act on findings.",
|
||||
"Url": "https://hub.prowler.com/check/inspector2_is_enabled"
|
||||
"Text": "Enable Amazon Inspector 2 for your AWS account.",
|
||||
"Url": "https://docs.aws.amazon.com/inspector/latest/user/getting_started_tutorial.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
|
||||
@@ -1,32 +1,28 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "servicecatalog_portfolio_shared_within_organization_only",
|
||||
"CheckTitle": "Service Catalog portfolio is shared only within the AWS Organization",
|
||||
"CheckTitle": "Service Catalog portfolios should be shared within an AWS organization only",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"TTPs/Initial Access/Unauthorized Access"
|
||||
"Software and Configuration Checks/AWS Security Best Practices"
|
||||
],
|
||||
"ServiceName": "servicecatalog",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:servicecatalog:{region}:{account-id}:portfolio/{portfolio-id}",
|
||||
"Severity": "high",
|
||||
"ResourceType": "Other",
|
||||
"Description": "**AWS Service Catalog portfolios** are assessed to confirm sharing occurs via **AWS Organizations** integration, not direct `ACCOUNT` shares. It reviews shared portfolios and identifies those targeted to individual accounts instead of organizational scopes.",
|
||||
"Risk": "Sharing with individual accounts enables recipients to import and launch products outside centralized guardrails, inheriting launch roles. This can cause unauthorized provisioning, data exposure, and configuration drift-impacting confidentiality, integrity, and availability through misused privileges and uncontrolled costs.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/servicecatalog/latest/adminguide/catalogs_portfolios_sharing.html"
|
||||
],
|
||||
"ResourceType": "AwsServiceCatalogPortfolio",
|
||||
"Description": "This control checks whether AWS Service Catalog shares portfolios within an organization when the integration with AWS Organizations is enabled. The control fails if portfolios aren't shared within an organization.",
|
||||
"Risk": "Sharing Service Catalog portfolios outside of an organization may result in access granted to unintended AWS accounts, potentially exposing sensitive resources.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/servicecatalog/latest/adminguide/catalogs_portfolios_sharing.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws servicecatalog create-portfolio-share --portfolio-id <portfolio-id> --organization-ids <org-id>",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Share Service Catalog portfolio only within the AWS Organization\nResources:\n <example_resource_name>:\n Type: AWS::ServiceCatalog::PortfolioShare\n Properties:\n PortfolioId: <example_resource_id>\n OrganizationNode: # CRITICAL: share within AWS Organizations\n Type: ORGANIZATION # Shares the portfolio with the entire org\n Value: <example_resource_id> # e.g., o-xxxxxxxxxx\n```",
|
||||
"Other": "1. In the AWS Console, go to Service Catalog > Portfolios and open the target portfolio\n2. Open the Shares/Sharing tab\n3. Remove every share of Type \"Account\" (stop sharing with each account)\n4. Click Share, choose \"AWS Organizations\", set Type to \"Organization\", enter your Org ID (o-xxxxxxxxxx), and share\n5. Verify no remaining shares of Type \"Account\" exist",
|
||||
"Terraform": "```hcl\n# Share Service Catalog portfolio only within the AWS Organization\nresource \"aws_servicecatalog_portfolio_share\" \"<example_resource_name>\" {\n portfolio_id = \"<example_resource_id>\"\n\n organization_node { # CRITICAL: share within AWS Organizations\n type = \"ORGANIZATION\" # Shares the portfolio with the entire org\n value = \"<example_resource_id>\" # e.g., o-xxxxxxxxxx\n }\n}\n```"
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/servicecatalog/latest/adminguide/catalogs_portfolios_sharing.html",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Prefer **organizational sharing** for portfolios and avoid `ACCOUNT` targets. Enforce **least privilege** on portfolio access and launch roles, and review shares regularly. Apply **separation of duties** and **defense in depth** so only governed accounts consume products and blast radius remains constrained.",
|
||||
"Url": "https://hub.prowler.com/check/servicecatalog_portfolio_shared_within_organization_only"
|
||||
"Text": "Configure AWS Service Catalog to share portfolios only within your AWS Organization for more secure access management.",
|
||||
"Url": "https://docs.aws.amazon.com/servicecatalog/latest/adminguide/catalogs_portfolios_sharing.html"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
|
||||
@@ -1,33 +1,26 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "sns_subscription_not_using_http_endpoints",
|
||||
"CheckTitle": "SNS subscription uses an HTTPS endpoint",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability",
|
||||
"Effects/Data Exposure"
|
||||
],
|
||||
"CheckTitle": "Ensure there are no SNS subscriptions using HTTP endpoints",
|
||||
"CheckType": [],
|
||||
"ServiceName": "sns",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:sns:region:account-id:topic",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AwsSnsTopic",
|
||||
"Description": "Amazon SNS subscriptions are evaluated for endpoint protocol. Subscriptions using `http` are identified, while **HTTPS** endpoints indicate encrypted delivery in transit.",
|
||||
"Risk": "Using **HTTP** leaves SNS deliveries unencrypted, compromising **confidentiality** via eavesdropping. MITM attackers can modify payloads or headers, damaging **integrity**, inject malicious content into downstream systems, or capture subscription data for spoofing and unauthorized actions.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/AWSCloudFormation/latest/TemplateReference/aws-resource-sns-subscription.html",
|
||||
"https://docs.aws.amazon.com/sns/latest/dg/sns-security-best-practices.html#enforce-encryption-data-in-transit"
|
||||
],
|
||||
"Description": "Ensure there are no SNS subscriptions using HTTP endpoints",
|
||||
"Risk": "When you use HTTPS, messages are automatically encrypted during transit, even if the SNS topic itself isn't encrypted. Without HTTPS, a network-based attacker can eavesdrop on network traffic or manipulate it using an attack such as man-in-the-middle.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/sns/latest/dg/sns-security-best-practices.html#enforce-encryption-data-in-transit",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Ensure SNS subscription uses HTTPS\nResources:\n <example_resource_name>:\n Type: AWS::SNS::Subscription\n Properties:\n TopicArn: <example_resource_id>\n Protocol: https # Critical: use HTTPS protocol to remediate HTTP usage\n Endpoint: https://<example_endpoint> # Critical: HTTPS endpoint URL\n```",
|
||||
"Other": "1. Open the Amazon SNS console and go to Subscriptions\n2. Select the subscription with Protocol set to HTTP and click Delete\n3. Click Create subscription\n4. Choose the same Topic ARN, set Protocol to HTTPS, and enter your HTTPS endpoint URL\n5. Create the subscription and confirm it from your endpoint if required",
|
||||
"Terraform": "```hcl\n# Terraform: Ensure SNS subscription uses HTTPS\nresource \"aws_sns_topic_subscription\" \"<example_resource_name>\" {\n topic_arn = \"<example_resource_id>\"\n protocol = \"https\" # Critical: enforce HTTPS protocol\n endpoint = \"https://<example_endpoint>\" # Critical: HTTPS endpoint URL\n}\n```"
|
||||
"NativeIaC": "",
|
||||
"Other": "",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Require **HTTPS** for all SNS subscription endpoints. Prefer domain-based endpoints, verify SNS message signatures, and apply **least privilege**. Enforce TLS using IAM conditions like `aws:SecureTransport`, and use private connectivity (VPC endpoints) where possible for defense in depth.",
|
||||
"Url": "https://hub.prowler.com/check/sns_subscription_not_using_http_endpoints"
|
||||
"Text": "To enforce only encrypted connections over HTTPS, add the aws:SecureTransport condition in the IAM policy that's attached to unencrypted SNS topics. This forces message publishers to use HTTPS instead of HTTP",
|
||||
"Url": "https://docs.aws.amazon.com/sns/latest/dg/sns-security-best-practices.html#enforce-encryption-data-in-transit"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
|
||||
@@ -1,37 +1,26 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "sns_topics_kms_encryption_at_rest_enabled",
|
||||
"CheckTitle": "SNS topic is encrypted at rest with KMS",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls (USA)",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST CSF Controls (USA)",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/PCI-DSS",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/ISO 27001 Controls"
|
||||
],
|
||||
"CheckTitle": "Ensure there are no SNS Topics unencrypted",
|
||||
"CheckType": [],
|
||||
"ServiceName": "sns",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:sns:region:account-id:topic",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AwsSnsTopic",
|
||||
"Description": "**Amazon SNS topics** are assessed for **server-side encryption** with **AWS KMS**. Topics lacking a configured KMS key (e.g., missing `kms_master_key_id`) are identified as unencrypted at rest.",
|
||||
"Risk": "Without KMS-backed SSE, SNS stores message bodies unencrypted at rest, undermining **confidentiality**.\n\nPrivileged insiders or compromised service components could access plaintext during persistence windows, causing data exposure. You also lose KMS controls such as key policies, rotation, and detailed audit trails.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SNS/topic-encrypted-with-kms-customer-master-keys.html",
|
||||
"https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html"
|
||||
],
|
||||
"Description": "Ensure there are no SNS Topics unencrypted",
|
||||
"Risk": "If not enabled sensitive information at rest is not protected.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws sns set-topic-attributes --topic-arn <TOPIC_ARN> --attribute-name KmsMasterKeyId --attribute-value alias/aws/sns",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Enable SSE for an SNS topic\nResources:\n <example_resource_name>:\n Type: AWS::SNS::Topic\n Properties:\n KmsMasterKeyId: alias/aws/sns # Critical: Enables encryption at rest with AWS managed KMS key\n```",
|
||||
"Other": "1. Open the AWS Console and go to Amazon SNS > Topics\n2. Select the topic and click Edit\n3. Under Encryption, enable encryption and choose the AWS managed key for SNS (alias/aws/sns)\n4. Click Save changes",
|
||||
"Terraform": "```hcl\n# Enable SSE for an SNS topic\nresource \"aws_sns_topic\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n kms_master_key_id = \"alias/aws/sns\" # Critical: Enables encryption at rest\n}\n```"
|
||||
"CLI": "aws sns set-topic-attributes --topic-arn <TOPIC_ARN> --attribute-name 'KmsMasterKeyId' --attribute-value <KEY>",
|
||||
"NativeIaC": "https://docs.prowler.com/checks/aws/general-policies/general_15#cloudformation",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SNS/topic-encrypted-with-kms-customer-master-keys.html",
|
||||
"Terraform": "https://docs.prowler.com/checks/aws/general-policies/general_15#terraform"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Enable **server-side encryption** on all SNS topics with **AWS KMS**; prefer **customer-managed keys** for control.\n\nApply **least privilege** on key use, enforce rotation, and monitor key/access logs. Minimize sensitive data in messages and use end-to-end encryption *where feasible* to add defense in depth.",
|
||||
"Url": "https://hub.prowler.com/check/sns_topics_kms_encryption_at_rest_enabled"
|
||||
"Text": "Use Amazon SNS with AWS KMS.",
|
||||
"Url": "https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
|
||||
@@ -1,35 +1,26 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "sns_topics_not_publicly_accessible",
|
||||
"CheckTitle": "SNS topic is not publicly accessible",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
|
||||
"Effects/Data Exposure",
|
||||
"TTPs/Initial Access"
|
||||
],
|
||||
"CheckTitle": "Check if SNS topics have policy set as Public",
|
||||
"CheckType": [],
|
||||
"ServiceName": "sns",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:sns:region:account-id:topic",
|
||||
"Severity": "high",
|
||||
"ResourceType": "AwsSnsTopic",
|
||||
"Description": "**SNS topic policies** are analyzed for **public principals** (e.g., `*`). Topics that grant access without restrictive conditions such as `aws:SourceArn`, `aws:SourceAccount`, `aws:PrincipalOrgID`, or `sns:Endpoint` scoping are treated as publicly accessible.",
|
||||
"Risk": "**Public SNS topics** allow anyone or unknown accounts to:\n- **Subscribe** and siphon messages (confidentiality)\n- **Publish** spoofed payloads that alter workflows (integrity)\n- **Flood** messages causing outages and costs (availability)\nThey also enable cross-account abuse and bypass expected trust boundaries.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SNS/topics-everyone-publish.html",
|
||||
"https://docs.aws.amazon.com/config/latest/developerguide/sns-topic-policy.html"
|
||||
],
|
||||
"Description": "Check if SNS topics have policy set as Public",
|
||||
"Risk": "Publicly accessible services could expose sensitive data to bad actors.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/config/latest/developerguide/sns-topic-policy.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws sns set-topic-attributes --topic-arn <TOPIC_ARN> --attribute-name Policy --attribute-value '{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::<ACCOUNT_ID>:root\"},\"Action\":\"sns:Publish\",\"Resource\":\"<TOPIC_ARN>\"}]}'",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: restrict SNS topic policy to the account (not public)\nResources:\n <example_resource_name>:\n Type: AWS::SNS::TopicPolicy\n Properties:\n Topics:\n - arn:aws:sns:<region>:<account_id>:<example_resource_name>\n PolicyDocument:\n Version: '2012-10-17'\n Statement:\n - Effect: Allow\n Action: sns:Publish\n Resource: arn:aws:sns:<region>:<account_id>:<example_resource_name>\n Principal:\n AWS: arn:aws:iam::<account_id>:root # Critical: restrict to account root to remove public access\n```",
|
||||
"Other": "1. Open the Amazon SNS console and select Topics\n2. Choose the topic and go to the Access policy tab\n3. Edit the policy and remove any Principal set to \"*\" (Everyone/Public)\n4. Add a statement allowing only your account root: Principal = arn:aws:iam::<ACCOUNT_ID>:root with Action sns:Publish and Resource set to the topic ARN\n5. Save changes",
|
||||
"Terraform": "```hcl\n# Restrict SNS topic policy to the account (not public)\nresource \"aws_sns_topic_policy\" \"<example_resource_name>\" {\n arn = \"<TOPIC_ARN>\"\n policy = jsonencode({\n Version = \"2012-10-17\"\n Statement = [{\n Effect = \"Allow\"\n Action = \"sns:Publish\"\n Resource = \"<TOPIC_ARN>\"\n Principal = { AWS = \"arn:aws:iam::<ACCOUNT_ID>:root\" } # Critical: restrict principal to the account to remove public access\n }]\n })\n}\n```"
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SNS/topics-everyone-publish.html",
|
||||
"Terraform": "https://docs.prowler.com/checks/aws/general-policies/ensure-sns-topic-policy-is-not-public-by-only-allowing-specific-services-or-principals-to-access-it#terraform"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Restrict the **topic policy** to specific principals and minimal actions:\n- Avoid `Principal:*`\n- Allow only needed actions (e.g., `sns:Publish`)\n- Add conditions like `aws:SourceArn`, `aws:SourceAccount`, `aws:PrincipalOrgID`, or `sns:Endpoint`\nApply **least privilege**, separate duties, and review policies regularly.",
|
||||
"Url": "https://hub.prowler.com/check/sns_topics_not_publicly_accessible"
|
||||
"Text": "Ensure there is a business requirement for service to be public.",
|
||||
"Url": "https://docs.aws.amazon.com/config/latest/developerguide/sns-topic-policy.html"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
|
||||
@@ -1,32 +1,26 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "trustedadvisor_errors_and_warnings",
|
||||
"CheckTitle": "Trusted Advisor check has no errors or warnings",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices"
|
||||
],
|
||||
"CheckTitle": "Check Trusted Advisor for errors and warnings.",
|
||||
"CheckType": [],
|
||||
"ServiceName": "trustedadvisor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:service:region:account-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "Other",
|
||||
"Description": "**AWS Trusted Advisor** check statuses are assessed to identify items in `warning` or `error`. The finding reflects the state reported by Trusted Advisor across categories such as **Security**, **Fault Tolerance**, **Service Limits**, and **Cost**, indicating where configurations or quotas require attention.",
|
||||
"Risk": "Unaddressed **warnings/errors** can leave misconfigurations that impact CIA:\n- **Confidentiality**: public access or weak auth exposes data\n- **Integrity**: overly permissive settings allow unwanted changes\n- **Availability**: limit exhaustion or poor resilience triggers outages\nThey can also increase unnecessary cost.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://aws.amazon.com/premiumsupport/technology/trusted-advisor/best-practice-checklist/",
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/TrustedAdvisor/checks.html"
|
||||
],
|
||||
"Description": "Check Trusted Advisor for errors and warnings.",
|
||||
"Risk": "Improve the security of your application by closing gaps, enabling various AWS security features and examining your permissions.",
|
||||
"RelatedUrl": "https://aws.amazon.com/premiumsupport/technology/trusted-advisor/best-practice-checklist/",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Sign in to the AWS Console and open Trusted Advisor\n2. Go to Checks and filter Status to Warning and Error\n3. Open each failing check and click View details/Recommended actions\n4. Apply the listed fix to the affected resources\n5. Click Refresh on the check and repeat until all checks show OK",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/TrustedAdvisor/checks.html",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Adopt a continuous process to remediate Trusted Advisor findings:\n- Prioritize **`error`** then `warning`\n- Assign ownership and SLAs\n- Integrate alerts with workflows\n- Enforce **least privilege**, segmentation, encryption, MFA, and tested backups\n- Reassess regularly to confirm fixes and prevent regression",
|
||||
"Url": "https://hub.prowler.com/check/trustedadvisor_errors_and_warnings"
|
||||
"Text": "Review and act upon its recommendations.",
|
||||
"Url": "https://aws.amazon.com/premiumsupport/technology/trusted-advisor/best-practice-checklist/"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
|
||||
@@ -1,37 +1,29 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "trustedadvisor_premium_support_plan_subscribed",
|
||||
"CheckTitle": "AWS account is subscribed to an AWS Premium Support plan",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices"
|
||||
],
|
||||
"CheckTitle": "Check if a Premium support plan is subscribed",
|
||||
"CheckType": [],
|
||||
"ServiceName": "trustedadvisor",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:iam::AWS_ACCOUNT_NUMBER:root",
|
||||
"Severity": "low",
|
||||
"ResourceType": "Other",
|
||||
"Description": "**AWS account** is subscribed to an **AWS Premium Support plan** (e.g., Business or Enterprise)",
|
||||
"Risk": "Without **Premium Support**, critical incidents face slower response, reducing **availability** and delaying containment of security events. Limited Trusted Advisor coverage lets **misconfigurations** persist, risking **data exposure** and **privilege misuse**. Lack of expert guidance increases change risk during production impacts.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/Support/support-plan.html",
|
||||
"https://aws.amazon.com/premiumsupport/plans/"
|
||||
],
|
||||
"Description": "Check if a Premium support plan is subscribed.",
|
||||
"Risk": "Ensure that the appropriate support level is enabled for the necessary AWS accounts. For example, if an AWS account is being used to host production systems and environments, it is highly recommended that the minimum AWS Support Plan should be Business.",
|
||||
"RelatedUrl": "https://aws.amazon.com/premiumsupport/plans/",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. Sign in to the AWS Management Console as the account root user\n2. Open https://console.aws.amazon.com/support/home#/plans\n3. Click \"Change plan\"\n4. Select \"Business Support\" (or higher) and click \"Continue\"\n5. Review and confirm the upgrade",
|
||||
"Other": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/Support/support-plan.html",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Adopt **Business** or higher for production and mission-critical accounts.\n- Integrate Support into IR with defined contacts/severity\n- Enforce **least privilege** for case access\n- Use Trusted Advisor for proactive hardening\n- If opting out, ensure an equivalent 24/7 support and escalation path",
|
||||
"Url": "https://hub.prowler.com/check/trustedadvisor_premium_support_plan_subscribed"
|
||||
"Text": "It is recommended that you subscribe to the AWS Business Support tier or higher for all of your AWS production accounts. If you don't have premium support, you must have an action plan to handle issues which require help from AWS Support. AWS Support provides a mix of tools and technology, people, and programs designed to proactively help you optimize performance, lower costs, and innovate faster.",
|
||||
"Url": "https://www.trendmicro.com/cloudoneconformity-staging/knowledge-base/aws/Support/support-plan.html"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"resilience"
|
||||
],
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
|
||||
@@ -1,40 +1,31 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "waf_global_rule_with_conditions",
|
||||
"CheckTitle": "AWS WAF Classic Global rule has at least one condition",
|
||||
"CheckTitle": "AWS WAF Classic Global Rules Should Have at Least One Condition.",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls"
|
||||
],
|
||||
"ServiceName": "waf",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:waf:account-id:rule/rule-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsWafRule",
|
||||
"Description": "**AWS WAF Classic global rules** contain at least one **condition** that matches HTTP(S) requests the rule evaluates for action (e.g., `allow`, `block`, `count`).",
|
||||
"Risk": "**No-condition rules** never match traffic, providing no filtering. Malicious requests (SQLi/XSS, bots) can reach origins, impacting **confidentiality** (data exfiltration), **integrity** (tampering), and **availability** (service disruption). They may also create a false sense of coverage.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/classic-web-acl-rules-editing.html",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-6",
|
||||
"https://docs.aws.amazon.com/config/latest/developerguide/waf-global-rule-not-empty.html"
|
||||
],
|
||||
"Description": "Ensure that every AWS WAF Classic Global Rule contains at least one condition.",
|
||||
"Risk": "An AWS WAF Classic Global rule without any conditions cannot inspect or filter traffic, potentially allowing malicious requests to pass unchecked.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/config/latest/developerguide/waf-global-rule-not-empty.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws waf update-rule --rule-id <example_resource_id> --change-token <example_change_token> --updates '[{\"Action\":\"INSERT\",\"Predicate\":{\"Negated\":false,\"Type\":\"IPMatch\",\"DataId\":\"<example_resource_id>\"}}]' --region us-east-1",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: ensure the WAF Classic Global rule has at least one condition\nResources:\n <example_resource_name>:\n Type: AWS::WAF::Rule\n Properties:\n Name: <example_resource_name>\n MetricName: <example_metric_name>\n # Critical: add at least one predicate (condition) so the rule is not empty\n Predicates:\n - Negated: false # evaluate as-is\n Type: IPMatch\n DataId: <example_resource_id> # existing IPSet ID\n```",
|
||||
"Other": "1. Open the AWS Console > AWS WAF, then click Switch to AWS WAF Classic\n2. In Global (CloudFront) scope, go to Rules and select the target rule\n3. Click Edit (or Add rule) > Add condition\n4. Choose a condition type (e.g., IP match), select an existing condition, set it to does (not negated)\n5. Click Update/Save to apply\n",
|
||||
"Terraform": "```hcl\n# Ensure the WAF Classic Global rule has at least one condition\nresource \"aws_waf_rule\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n metric_name = \"<example_metric_name>\"\n\n # Critical: add at least one predicate (condition) so the rule is not empty\n predicate {\n data_id = \"<example_resource_id>\" # existing IPSet ID\n negated = false\n type = \"IPMatch\"\n }\n}\n```"
|
||||
"CLI": "aws waf update-rule --rule-id <your-rule-id> --change-token <your-change-token> --updates '[{\"Action\":\"INSERT\",\"Predicate\":{\"Negated\":false,\"Type\":\"IPMatch\",\"DataId\":\"<your-ipset-id>\"}}]' --region <your-region>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-6",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Attach at least one precise **condition** to every rule, aligned to known threats and application context. Apply **least privilege** for traffic, use managed rule groups for **defense in depth**, and routinely review rules to remove placeholders. *If on Classic*, plan migration to WAFv2.",
|
||||
"Url": "https://hub.prowler.com/check/waf_global_rule_with_conditions"
|
||||
"Text": "Ensure that every AWS WAF Classic Global rule has at least one condition to properly inspect and manage web traffic.",
|
||||
"Url": "https://docs.aws.amazon.com/waf/latest/developerguide/classic-web-acl-rules-editing.html"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"internet-exposed"
|
||||
],
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
|
||||
@@ -1,34 +1,28 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "waf_global_rulegroup_not_empty",
|
||||
"CheckTitle": "AWS WAF Classic global rule group has at least one rule",
|
||||
"CheckTitle": "Check if AWS WAF Classic Global rule group has at least one rule.",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices/Network Reachability",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls"
|
||||
],
|
||||
"ServiceName": "waf",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceIdTemplate": "arn:aws:waf::account-id:rulegroup/rule-group-name/rule-group-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsWafRuleGroup",
|
||||
"Description": "**AWS WAF Classic global rule groups** are assessed for the presence of **one or more rules**. Empty groups are identified even when referenced by a web ACL, meaning the group adds no match logic.",
|
||||
"Risk": "An empty rule group performs no inspection, so web requests pass without WAF scrutiny. This creates blind spots enabling:\n- **Confidentiality**: data exfiltration via SQLi/XSS\n- **Integrity**: parameter tampering\n- **Availability**: bot abuse and layer-7 DoS\n\nIt also creates a false sense of protection when attached.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-groups.html",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-7",
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/classic-rule-group-editing.html"
|
||||
],
|
||||
"Description": "Ensure that every AWS WAF Classic Global rule group contains at least one rule.",
|
||||
"Risk": "A WAF Classic Global rule group without any rules allows all incoming traffic to bypass inspection, increasing the risk of unauthorized access and potential attacks on resources.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-groups.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws waf update-rule-group --rule-group-id <rule-group-id> --updates Action=INSERT,ActivatedRule={Priority=1,RuleId=<rule-id>,Action={Type=BLOCK}} --change-token <change-token> --region us-east-1",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: ensure the WAF Classic global rule group has at least one rule\nResources:\n <example_resource_name>:\n Type: AWS::WAF::RuleGroup\n Properties:\n Name: <example_resource_name>\n MetricName: examplemetric\n ActivatedRules:\n - Priority: 1 # Critical: adds a rule to the group (makes it non-empty)\n RuleId: <example_resource_id> # Critical: ID of the existing rule to add\n Action:\n Type: BLOCK # Critical: required action when activating the rule\n```",
|
||||
"Other": "1. Open the AWS Console and go to AWS WAF, then switch to AWS WAF Classic\n2. At the top, set scope to Global (CloudFront)\n3. Go to Rule groups and select the target rule group\n4. Click Edit rule group\n5. Select an existing rule, choose its action (e.g., BLOCK), and click Add rule to rule group\n6. Click Update to save",
|
||||
"Terraform": "```hcl\n# Terraform: ensure the WAF Classic global rule group has at least one rule\nresource \"aws_waf_rule_group\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n metric_name = \"examplemetric\"\n\n activated_rule {\n priority = 1 # Critical: adds a rule to the group (makes it non-empty)\n rule_id = \"<example_resource_id>\" # Critical: ID of the existing rule to add\n action {\n type = \"BLOCK\" # Critical: required action when activating the rule\n }\n }\n}\n```"
|
||||
"CLI": "aws waf update-rule-group --rule-group-id <rule-group-id> --updates Action=INSERT,ActivatedRule={Priority=1,RuleId=<rule-id>,Action={Type=BLOCK}} --change-token <change-token> --region <region>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-7",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Populate each rule group with **effective rules** aligned to application threats; choose `block` or `count` actions as appropriate. Prefer **managed rule groups** as a baseline and layer custom rules for **least privilege**. Avoid placeholder groups, test in staging, and monitor metrics to tune.",
|
||||
"Url": "https://hub.prowler.com/check/waf_global_rulegroup_not_empty"
|
||||
"Text": "Ensure that every AWS WAF Classic Global rule group contains at least one rule to enforce traffic inspection and defined actions such as allow, block, or count.",
|
||||
"Url": "https://docs.aws.amazon.com/waf/latest/developerguide/classic-rule-group-editing.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
|
||||
@@ -1,39 +1,31 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "waf_global_webacl_logging_enabled",
|
||||
"CheckTitle": "AWS WAF Classic Global Web ACL has logging enabled",
|
||||
"CheckTitle": "Check if AWS WAF Classic Global WebACL has logging enabled.",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls"
|
||||
],
|
||||
"ServiceName": "waf",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:waf:account-id:webacl/web-acl-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsWafWebAcl",
|
||||
"Description": "**AWS WAF Classic global Web ACLs** have **logging** enabled to capture evaluated web requests and rule actions for each ACL",
|
||||
"Risk": "Without **WAF logging**, you lose **visibility** into attacks (SQLi/XSS probes, bots, brute-force) and into allow/block decisions, limiting detection and forensics. This degrades **confidentiality**, **integrity**, and **availability**, and slows incident response and tuning.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/classic-logging.html",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-1",
|
||||
"https://docs.aws.amazon.com/cli/latest/reference/waf/put-logging-configuration.html"
|
||||
],
|
||||
"Description": "Ensure that every AWS WAF Classic Global WebACL has logging enabled.",
|
||||
"Risk": "Without logging enabled, there is no visibility into traffic patterns or potential security threats, which limits the ability to troubleshoot and monitor web traffic effectively.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-incident-response.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws waf put-logging-configuration --logging-configuration ResourceArn=<web_acl_arn>,LogDestinationConfigs=<kinesis_firehose_delivery_stream_arn>",
|
||||
"NativeIaC": "",
|
||||
"Other": "1. In the AWS console, create an Amazon Kinesis Data Firehose delivery stream named starting with \"aws-waf-logs-\" (for CloudFront/global, create it in us-east-1)\n2. Open the AWS WAF console and switch to AWS WAF Classic\n3. Select Filter: Global (CloudFront) and go to Web ACLs\n4. Open the target Web ACL and go to the Logging tab\n5. Click Enable logging and select the Firehose delivery stream created in step 1\n6. Click Enable/Save",
|
||||
"CLI": "aws waf put-logging-configuration --logging-configuration ResourceArn=<web-acl-arn>,LogDestinationConfigs=<log-destination-arn>",
|
||||
"NativeIaC": "https://docs.prowler.com/checks/aws/logging-policies/bc_aws_logging_31/",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-1",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Enable **logging** on all global Web ACLs and send records to a centralized logging platform. Apply **least privilege** to log destinations and redact sensitive fields. Monitor and alert on anomalies, and integrate logs with incident response for **defense in depth** and faster containment.",
|
||||
"Url": "https://hub.prowler.com/check/waf_global_webacl_logging_enabled"
|
||||
"Text": "Ensure logging is enabled for AWS WAF Classic Global Web ACLs to capture traffic details and maintain compliance.",
|
||||
"Url": "https://docs.aws.amazon.com/waf/latest/developerguide/classic-logging.html"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"logging"
|
||||
],
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
|
||||
@@ -1,35 +1,28 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "waf_global_webacl_with_rules",
|
||||
"CheckTitle": "AWS WAF Classic global Web ACL has at least one rule or rule group",
|
||||
"CheckTitle": "Check if AWS WAF Classic Global WebACL has at least one rule or rule group.",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls"
|
||||
],
|
||||
"ServiceName": "waf",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:waf:account-id:webacl/web-acl-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsWafWebAcl",
|
||||
"Description": "**AWS WAF Classic global web ACLs** are evaluated for the presence of at least one **rule** or **rule group** that inspects HTTP(S) requests",
|
||||
"Risk": "With no rules, the web ACL relies solely on its default action. If `allow`, hostile traffic reaches origins uninspected; if `block`, legitimate traffic can be denied.\n- SQLi/XSS can expose data (confidentiality)\n- Malicious requests can alter state (integrity)\n- Bots and scraping can drain resources (availability)",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-8",
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/classic-web-acl-editing.html",
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/waf-rules.html"
|
||||
],
|
||||
"Description": "Ensure that every AWS WAF Classic Global WebACL contains at least one rule or rule group.",
|
||||
"Risk": "An empty AWS WAF Classic Global web ACL allows all web traffic to bypass inspection, potentially exposing resources to unauthorized access and attacks.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/waf/latest/developerguide/waf-rules.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws waf update-web-acl --web-acl-id <WEB_ACL_ID> --change-token <CHANGE_TOKEN> --updates '[{\"Action\":\"INSERT\",\"ActivatedRule\":{\"Priority\":1,\"RuleId\":\"<RULE_ID>\",\"Action\":{\"Type\":\"BLOCK\"}}}]'",
|
||||
"NativeIaC": "```yaml\nResources:\n <example_resource_name>:\n Type: AWS::WAF::WebACL\n Properties:\n Name: <example_resource_name>\n MetricName: <example_metric_name>\n DefaultAction:\n Type: ALLOW\n Rules:\n - Action:\n Type: BLOCK\n Priority: 1\n RuleId: <example_rule_id> # Critical: Adds a rule so the Web ACL is not empty\n # This ensures the Web ACL has at least one rule, changing FAIL to PASS\n```",
|
||||
"Other": "1. Open the AWS console and go to WAF\n2. In the left menu, click Switch to AWS WAF Classic\n3. At the top, set Filter to Global (CloudFront)\n4. Click Web ACLs and select your web ACL\n5. On the Rules tab, click Edit web ACL\n6. In Rules, select an existing rule or rule group and click Add rule to web ACL\n7. Click Save changes",
|
||||
"Terraform": "```hcl\nresource \"aws_waf_web_acl\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n metric_name = \"<example_metric_name>\"\n\n default_action {\n type = \"ALLOW\"\n }\n\n rules { # Critical: Adds at least one rule so the Web ACL is not empty\n priority = 1\n rule_id = \"<example_rule_id>\"\n type = \"REGULAR\"\n action {\n type = \"BLOCK\"\n }\n }\n}\n```"
|
||||
"CLI": "aws waf update-web-acl --web-acl-id <your-web-acl-id> --change-token <your-change-token> --updates '[{\"Action\":\"INSERT\",\"ActivatedRule\":{\"Priority\":1,\"RuleId\":\"<your-rule-id>\",\"Action\":{\"Type\":\"BLOCK\"}}}]' --default-action Type=ALLOW --region <your-region>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-8",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Populate each global web ACL with effective protections:\n- Use rule groups and targeted rules (managed, rate-based, IP sets)\n- Apply least privilege: default `block` where feasible; explicitly `allow` required traffic\n- Layer defenses and enable logging to tune policies\n- *Consider migrating to WAFv2*",
|
||||
"Url": "https://hub.prowler.com/check/waf_global_webacl_with_rules"
|
||||
"Text": "Ensure that every AWS WAF Classic Global web ACL includes at least one rule or rule group to monitor and control web traffic effectively.",
|
||||
"Url": "https://docs.aws.amazon.com/waf/latest/developerguide/classic-web-acl-editing.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
|
||||
@@ -1,34 +1,28 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "waf_regional_rule_with_conditions",
|
||||
"CheckTitle": "AWS WAF Classic Regional rule has at least one condition",
|
||||
"CheckTitle": "AWS WAF Classic Regional Rules Should Have at Least One Condition.",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls"
|
||||
],
|
||||
"ServiceName": "waf",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:waf-regional:region:account-id:rule/rule-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsWafRegionalRule",
|
||||
"Description": "**AWS WAF Classic Regional rules** have one or more **conditions (predicates)** attached (IP, byte/regex, geo, size, SQLi/XSS) to define which requests the rule evaluates",
|
||||
"Risk": "An empty rule never matches, letting traffic bypass that control. This weakens defense-in-depth and can impact **confidentiality** (data exfiltration), **integrity** (SQLi/XSS), and **availability** (missing rate/size limits), depending on Web ACL order and default action.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/classic-web-acl-rules-editing.html",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-2",
|
||||
"https://docs.aws.amazon.com/config/latest/developerguide/waf-regional-rule-not-empty.html"
|
||||
],
|
||||
"Description": "Ensure that every AWS WAF Classic Regional Rule contains at least one condition.",
|
||||
"Risk": "An AWS WAF Classic Regional rule without any conditions cannot inspect or filter traffic, potentially allowing malicious requests to pass unchecked.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/config/latest/developerguide/waf-regional-rule-not-empty.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws waf-regional update-rule --rule-id <example_rule_id> --change-token $(aws waf-regional get-change-token --query ChangeToken --output text) --updates '[{\"Action\":\"INSERT\",\"Predicate\":{\"Negated\":false,\"Type\":\"IPMatch\",\"DataId\":\"<example_ipset_id>\"}}]'",
|
||||
"NativeIaC": "```yaml\n# Add at least one condition to a WAF Classic Regional Rule\nResources:\n <example_resource_name>:\n Type: AWS::WAFRegional::Rule\n Properties:\n Name: <example_resource_name>\n MetricName: <example_metric_name>\n Predicates:\n - Negated: false # CRITICAL: ensures the predicate is applied as-is\n Type: IPMatch # CRITICAL: predicate type\n DataId: <example_ipset_id> # CRITICAL: attaches an existing IP set as a condition\n```",
|
||||
"Other": "1. Open the AWS Console and go to AWS WAF, then select Switch to AWS WAF Classic\n2. In the left pane, choose Regional and click Rules\n3. Select the target rule and choose Add rule\n4. Click Add condition, set When a request to does, choose IP match (or another type), and select an existing condition (e.g., an IP set)\n5. Click Update to save the rule with the condition",
|
||||
"Terraform": "```hcl\n# WAF Classic Regional rule with at least one condition\nresource \"aws_wafregional_rule\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n metric_name = \"<example_metric_name>\"\n\n predicate { \n data_id = \"<example_ipset_id>\" # CRITICAL: attaches existing IP set as the condition\n type = \"IPMatch\" # CRITICAL: predicate type\n negated = false # CRITICAL: apply condition directly\n }\n}\n```"
|
||||
"CLI": "aws waf-regional update-rule --rule-id <your-rule-id> --change-token <your-change-token> --updates '[{\"Action\":\"INSERT\",\"Predicate\":{\"Negated\":false,\"Type\":\"IPMatch\",\"DataId\":\"<your-ipset-id>\"}}]' --region <your-region>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-2",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Define precise **conditions** for each rule (e.g., IP, pattern, geo, size) and avoid placeholder rules. Apply **least privilege** filtering, review rule order, and use layered controls for **defense in depth**. Regularly validate and monitor rule effectiveness.",
|
||||
"Url": "https://hub.prowler.com/check/waf_regional_rule_with_conditions"
|
||||
"Text": "Ensure that every AWS WAF Classic Regional rule has at least one condition to properly inspect and manage web traffic.",
|
||||
"Url": "https://docs.aws.amazon.com/waf/latest/developerguide/classic-web-acl-rules-editing.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
|
||||
@@ -1,34 +1,28 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "waf_regional_rulegroup_not_empty",
|
||||
"CheckTitle": "AWS WAF Classic Regional rule group has at least one rule",
|
||||
"CheckTitle": "Check if AWS WAF Classic Regional rule group has at least one rule.",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls"
|
||||
],
|
||||
"ServiceName": "waf",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:waf::account-id:rulegroup/rule-group-name/rule-group-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsWafRegionalRuleGroup",
|
||||
"Description": "**AWS WAF Classic Regional rule groups** are evaluated to confirm they contain at least one **rule**. Groups with no rule entries are considered empty.",
|
||||
"Risk": "An empty rule group contributes no filtering in a web ACL, letting requests bypass inspection within that group. This erodes **defense in depth** and can enable injection, brute-force, or bot traffic to reach applications, threatening **confidentiality**, **integrity**, and **availability**.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/cli/latest/reference/waf-regional/update-rule-group.html",
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-groups.html",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-3"
|
||||
],
|
||||
"Description": "Ensure that every AWS WAF Classic Regional rule group contains at least one rule.",
|
||||
"Risk": "A WAF Classic Regional rule group without any rules allows all incoming traffic to bypass inspection, increasing the risk of unauthorized access and potential attacks on resources.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-groups.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws waf-regional update-rule-group --rule-group-id <rule-group-id> --updates Action=INSERT,ActivatedRule={Priority=1,RuleId=<rule-id>,Action={Type=BLOCK}} --change-token <change-token>",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Ensure WAF Classic Regional Rule Group has at least one rule\nResources:\n <example_resource_name>:\n Type: AWS::WAFRegional::RuleGroup\n Properties:\n Name: <example_resource_name>\n MetricName: <example_resource_name>\n ActivatedRules:\n - Priority: 1 # Critical: adds a rule so the rule group is not empty\n RuleId: <example_resource_id> # Critical: references an existing rule to include in the group\n Action:\n Type: BLOCK\n```",
|
||||
"Other": "1. In the AWS Console, go to AWS WAF & Shield and switch to AWS WAF Classic\n2. Select the correct Region, then choose Rule groups\n3. Open the target rule group and click Edit rule group\n4. Click Add rule to rule group, select an existing rule, choose an action (e.g., BLOCK), and click Update\n5. Save changes to ensure the rule group contains at least one rule",
|
||||
"Terraform": "```hcl\n# Ensure WAF Classic Regional Rule Group has at least one rule\nresource \"aws_wafregional_rule_group\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n metric_name = \"<example_resource_name>\"\n\n # Critical: adds a rule so the rule group is not empty\n activated_rule {\n priority = 1\n rule_id = \"<example_resource_id>\" # existing rule ID\n action {\n type = \"BLOCK\"\n }\n }\n}\n```"
|
||||
"CLI": "aws waf-regional update-rule-group --rule-group-id <rule-group-id> --updates Action=INSERT,ActivatedRule={Priority=1,RuleId=<rule-id>,Action={Type=BLOCK}} --change-token <change-token> --region <region>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-3",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Apply **least privilege**: populate each rule group with vetted rules aligned to your threat model, using `ALLOW`, `BLOCK`, or `COUNT` actions as appropriate. Remove or disable unused groups to avoid false assurance. Validate behavior in staging and monitor metrics to maintain **defense in depth**.",
|
||||
"Url": "https://hub.prowler.com/check/waf_regional_rulegroup_not_empty"
|
||||
"Text": "Ensure that every AWS WAF Classic Regional rule group contains at least one rule to enforce traffic inspection and defined actions such as allow, block, or count.",
|
||||
"Url": "https://docs.aws.amazon.com/waf/latest/developerguide/classic-rule-group-editing.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
|
||||
@@ -1,35 +1,28 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "waf_regional_webacl_with_rules",
|
||||
"CheckTitle": "AWS WAF Classic Regional Web ACL has at least one rule or rule group",
|
||||
"CheckTitle": "Check if AWS WAF Classic Regional WebACL has at least one rule or rule group.",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls"
|
||||
],
|
||||
"ServiceName": "waf",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:aws:waf-regional:region:account-id:webacl/web-acl-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsWafRegionalWebAcl",
|
||||
"Description": "**AWS WAF Classic Regional web ACL** contains at least one **rule** or **rule group** to inspect and act on HTTP(S) requests. An ACL with no entries is considered empty.",
|
||||
"Risk": "With no rules, the web ACL performs no inspection, letting malicious traffic through.\n- **Confidentiality**: data exposure via SQLi/XSS\n- **Integrity**: unauthorized actions or tampering\n- **Availability**: abuse/bot traffic causing degradation or denial",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-4",
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/classic-web-acl-editing.html",
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/waf-rules.html"
|
||||
],
|
||||
"Description": "Ensure that every AWS WAF Classic Regional WebACL contains at least one rule or rule group.",
|
||||
"Risk": "An empty AWS WAF Classic Regional web ACL allows all web traffic to bypass inspection, potentially exposing resources to unauthorized access and attacks.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/waf/latest/developerguide/waf-rules.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws waf-regional update-web-acl --web-acl-id <your-web-acl-id> --change-token $(aws waf-regional get-change-token --query 'ChangeToken' --output text) --updates '[{\"Action\":\"INSERT\",\"ActivatedRule\":{\"Priority\":1,\"RuleId\":\"<your-rule-id>\",\"Action\":{\"Type\":\"BLOCK\"}}}]'",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Ensure the Web ACL has at least one rule\nResources:\n <example_resource_name>:\n Type: AWS::WAFRegional::WebACL\n Properties:\n Name: \"<example_resource_name>\"\n MetricName: \"<example_resource_name>\"\n DefaultAction:\n Type: ALLOW\n # Critical: adding any rule to the Web ACL makes it non-empty and passes the check\n Rules:\n - Action:\n Type: BLOCK\n Priority: 1\n RuleId: \"<example_resource_id>\" # Rule to insert into the Web ACL\n```",
|
||||
"Other": "1. Open the AWS Console and go to AWS WAF\n2. In the left pane, click Web ACLs and switch to AWS WAF Classic if prompted\n3. Select the Regional Web ACL and open the Rules tab\n4. Click Edit web ACL\n5. In Rules, select an existing rule or rule group and choose Add rule to web ACL\n6. Click Save changes",
|
||||
"Terraform": "```hcl\n# Terraform: Ensure the Web ACL has at least one rule\nresource \"aws_wafregional_web_acl\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n metric_name = \"<example_resource_name>\"\n\n default_action {\n type = \"ALLOW\"\n }\n\n # Critical: add at least one rule so the Web ACL is not empty\n rules {\n priority = 1\n rule_id = \"<example_resource_id>\"\n action {\n type = \"BLOCK\"\n }\n }\n}\n```"
|
||||
"CLI": "aws waf-regional update-web-acl --web-acl-id <your-web-acl-id> --change-token <your-change-token> --updates '[{\"Action\":\"INSERT\",\"ActivatedRule\":{\"Priority\":1,\"RuleId\":\"<your-rule-id>\",\"Action\":{\"Type\":\"BLOCK\"}}}]' --default-action Type=ALLOW --region <your-region>",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-4",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Populate each web ACL with at least one **rule** or **rule group** that inspects requests and enforces **least privilege**. Apply defense in depth by combining managed and custom rules, include rate controls where appropriate, and review regularly. *Default to blocking undesired traffic; only permit required patterns*.",
|
||||
"Url": "https://hub.prowler.com/check/waf_regional_webacl_with_rules"
|
||||
"Text": "Ensure that every AWS WAF Classic Regional web ACL includes at least one rule or rule group to monitor and control web traffic effectively.",
|
||||
"Url": "https://docs.aws.amazon.com/waf/latest/developerguide/classic-web-acl-editing.html"
|
||||
}
|
||||
},
|
||||
"Categories": [],
|
||||
|
||||
@@ -1,35 +1,28 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "wafv2_webacl_logging_enabled",
|
||||
"CheckTitle": "AWS WAFv2 Web ACL has logging enabled",
|
||||
"CheckTitle": "Check if AWS WAFv2 WebACL logging is enabled",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices"
|
||||
"Logging and Monitoring"
|
||||
],
|
||||
"ServiceName": "wafv2",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:partition:wafv2:region:account-id:webacl/webacl-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsWafv2WebAcl",
|
||||
"Description": "**AWS WAFv2 Web ACLs** with **logging** capture details of inspected requests and rule evaluations. The assessment determines for each Web ACL whether logging is configured to record traffic analyzed by that ACL.",
|
||||
"Risk": "Without **WAF logging**, visibility into allowed/blocked requests is lost, degrading detection and response. **SQLi**, **credential stuffing**, and **bot/DDoS probes** can go unnoticed, risking data exposure (C), undetected rule misuse (I), and service instability from unseen abuse (A).",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/WAF/enable-web-acls-logging.html",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-11",
|
||||
"https://docs.aws.amazon.com/cli/latest/reference/wafv2/put-logging-configuration.html",
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/logging.html"
|
||||
],
|
||||
"Description": "Check if AWS WAFv2 logging is enabled",
|
||||
"Risk": "Enabling AWS WAFv2 logging helps monitor and analyze traffic patterns for enhanced security.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/waf/latest/developerguide/logging.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "aws wafv2 put-logging-configuration --logging-configuration ResourceArn=<WEB_ACL_ARN>,LogDestinationConfigs=<DESTINATION_ARN>",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Enable logging for a WAFv2 Web ACL\nResources:\n <example_resource_name>:\n Type: AWS::WAFv2::LoggingConfiguration\n Properties:\n ResourceArn: arn:aws:wafv2:<region>:<account-id>:regional/webacl/<example_resource_name>/<example_resource_id> # CRITICAL: target Web ACL to log\n LogDestinationConfigs: # CRITICAL: where logs are sent\n - arn:aws:logs:<region>:<account-id>:log-group:aws-waf-logs-<example_resource_name>\n```",
|
||||
"Other": "1. In the AWS Console, go to AWS WAF & Shield > Web ACLs\n2. Select the target Web ACL\n3. Open the Logging and metrics (or Logging) section and click Enable logging\n4. Choose a log destination (CloudWatch Logs log group, S3 bucket, or Kinesis Data Firehose)\n5. Click Save to enable logging",
|
||||
"Terraform": "```hcl\n# Enable logging for a WAFv2 Web ACL\nresource \"aws_wafv2_web_acl_logging_configuration\" \"<example_resource_name>\" {\n resource_arn = \"<example_resource_arn>\" # CRITICAL: target Web ACL ARN\n log_destination_configs = [\"<example_destination_arn>\"] # CRITICAL: log destination ARN\n}\n```"
|
||||
"CLI": "aws wafv2 update-web-acl-logging-configuration --scope REGIONAL --web-acl-arn arn:partition:wafv2:region:account-id:webacl/webacl-id --logging-configuration '{\"LogDestinationConfigs\": [\"arn:partition:logs:region:account-id:log-group:log-group-name\"]}'",
|
||||
"NativeIaC": "https://docs.prowler.com/checks/aws/logging-policies/bc_aws_logging_33#terraform",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-11",
|
||||
"Terraform": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/WAF/enable-web-acls-logging.html"
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Enable **logging** on all WAFv2 Web ACLs to a centralized destination. Apply **least privilege** for log delivery, **redact sensitive fields**, and filter to retain high-value events. Integrate with monitoring/SIEM for **alerting and correlation**, and review routinely as part of **defense in depth**.",
|
||||
"Url": "https://hub.prowler.com/check/wafv2_webacl_logging_enabled"
|
||||
"Text": "Enable AWS WAFv2 logging for your Web ACLs to monitor and analyze traffic patterns effectively.",
|
||||
"Url": "https://docs.aws.amazon.com/waf/latest/developerguide/logging.html"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
|
||||
@@ -1,35 +1,28 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "wafv2_webacl_rule_logging_enabled",
|
||||
"CheckTitle": "AWS WAFv2 Web ACL has Amazon CloudWatch metrics enabled for all rules and rule groups",
|
||||
"CheckTitle": "Check if AWS WAFv2 WebACL rule or rule group has Amazon CloudWatch metrics enabled.",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices/Runtime Behavior Analysis",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls"
|
||||
],
|
||||
"ServiceName": "wafv2",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"ResourceIdTemplate": "arn:partition:wafv2:region:account-id:webacl/webacl-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsWafv2WebAcl",
|
||||
"Description": "**AWS WAFv2 Web ACLs** are assessed to confirm that every associated **rule** and **rule group** has **CloudWatch metrics** enabled for visibility into rule evaluations and traffic",
|
||||
"Risk": "Absent **CloudWatch metrics**, WAF telemetry is lost, masking spikes, rule bypasses, and misconfigurations. This delays detection of SQLi/XSS probes and bot floods, risking data confidentiality, request integrity, and application availability.",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://support.icompaas.com/support/solutions/articles/62000233644-ensure-aws-wafv2-webacl-rule-or-rule-group-has-amazon-cloudwatch-metrics-enabled",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-12"
|
||||
],
|
||||
"ResourceType": "AwsWafv2RuleGroup",
|
||||
"Description": "This control checks whether an AWS WAF rule or rule group has Amazon CloudWatch metrics enabled. The control fails if the rule or rule group doesn't have CloudWatch metrics enabled.",
|
||||
"Risk": "Without CloudWatch Metrics enabled on AWS WAF rules or rule groups, it's challenging to monitor traffic flow effectively. This reduces visibility into potential security threats, such as malicious activities or unusual traffic patterns.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/waf/latest/APIReference/API_UpdateRuleGroup.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Enable CloudWatch metrics on WAFv2 Web ACL rules\nResources:\n <example_resource_name>:\n Type: AWS::WAFv2::WebACL\n Properties:\n Name: <example_resource_name>\n Scope: REGIONAL\n DefaultAction:\n Allow: {}\n VisibilityConfig:\n SampledRequestsEnabled: true\n CloudWatchMetricsEnabled: true\n MetricName: <metric_name>\n Rules:\n - Name: <example_rule_name>\n Priority: 1\n Statement:\n ManagedRuleGroupStatement:\n VendorName: AWS\n Name: AWSManagedRulesCommonRuleSet\n OverrideAction:\n None: {}\n VisibilityConfig:\n SampledRequestsEnabled: true\n CloudWatchMetricsEnabled: true # Critical: enables CloudWatch metrics for this rule\n MetricName: <rule_metric_name> # Required with CloudWatch metrics\n```",
|
||||
"Other": "1. In AWS Console, go to AWS WAF & Shield > Web ACLs, select the Web ACL\n2. Open the Rules tab, edit each rule, and enable CloudWatch metrics (Visibility configuration > CloudWatch metrics enabled), then Save\n3. For rule groups: go to AWS WAF & Shield > Rule groups, select the rule group, edit Visibility configuration, enable CloudWatch metrics, then Save",
|
||||
"Terraform": "```hcl\n# Terraform: Enable CloudWatch metrics on WAFv2 Web ACL rules\nresource \"aws_wafv2_web_acl\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n scope = \"REGIONAL\"\n\n default_action { allow {} }\n\n visibility_config {\n cloudwatch_metrics_enabled = true\n metric_name = \"<metric_name>\"\n sampled_requests_enabled = true\n }\n\n rule {\n name = \"<example_rule_name>\"\n priority = 1\n\n statement {\n managed_rule_group_statement {\n vendor_name = \"AWS\"\n name = \"AWSManagedRulesCommonRuleSet\"\n }\n }\n\n override_action { none {} }\n\n visibility_config {\n cloudwatch_metrics_enabled = true # Critical: enables CloudWatch metrics for this rule\n metric_name = \"<rule_metric_name>\" # Required with CloudWatch metrics\n sampled_requests_enabled = true\n }\n }\n}\n```"
|
||||
"CLI": "aws wafv2 update-rule-group --id <rule-group-id> --scope <scope> --name <rule-group-name> --cloudwatch-metrics-enabled true",
|
||||
"NativeIaC": "",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-12",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Enable **CloudWatch metrics** for all WAF rules and rule groups (*including managed rule groups*). Use consistent metric names, centralize dashboards and alerts, and review trends to validate rule efficacy. Integrate with a SIEM for **defense in depth** and tune rules based on telemetry.",
|
||||
"Url": "https://hub.prowler.com/check/wafv2_webacl_rule_logging_enabled"
|
||||
"Text": "Ensure that CloudWatch Metrics are enabled for AWS WAF rules and rule groups. This provides detailed insights into traffic, enabling timely identification of security risks.",
|
||||
"Url": "https://docs.aws.amazon.com/waf/latest/APIReference/API_UpdateWebACL.html"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
|
||||
@@ -1,40 +1,31 @@
|
||||
{
|
||||
"Provider": "aws",
|
||||
"CheckID": "wafv2_webacl_with_rules",
|
||||
"CheckTitle": "AWS WAFv2 Web ACL has at least one rule or rule group attached",
|
||||
"CheckTitle": "Check if AWS WAFv2 WebACL has at least one rule or rule group.",
|
||||
"CheckType": [
|
||||
"Software and Configuration Checks/AWS Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/AWS Foundational Security Best Practices",
|
||||
"Software and Configuration Checks/Industry and Regulatory Standards/NIST 800-53 Controls"
|
||||
],
|
||||
"ServiceName": "wafv2",
|
||||
"SubServiceName": "",
|
||||
"ResourceIdTemplate": "",
|
||||
"Severity": "high",
|
||||
"ResourceIdTemplate": "arn:partition:wafv2:region:account-id:webacl/webacl-id",
|
||||
"Severity": "medium",
|
||||
"ResourceType": "AwsWafv2WebAcl",
|
||||
"Description": "**AWS WAFv2 web ACLs** are evaluated for the presence of at least one configured **rule** or **rule group** that defines how HTTP(S) requests are inspected and acted upon.",
|
||||
"Risk": "Without rules, traffic is governed only by the web ACL `DefaultAction`, often allowing requests without inspection. This increases risks to **confidentiality** (data exfiltration via injection), **integrity** (XSS/parameter tampering), and **availability** (layer-7 DDoS, bot abuse).",
|
||||
"RelatedUrl": "",
|
||||
"AdditionalURLs": [
|
||||
"https://docs.aws.amazon.com/waf/latest/developerguide/web-acl-editing.html",
|
||||
"https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-10",
|
||||
"https://support.icompaas.com/support/solutions/articles/62000233642-ensure-aws-wafv2-webacl-has-at-least-one-rule-or-rule-group"
|
||||
],
|
||||
"Description": "Check if AWS WAFv2 WebACL has at least one rule or rule group associated with it.",
|
||||
"Risk": "An empty AWS WAF web ACL allows all web traffic to pass without inspection or control, exposing resources to potential security threats and attacks.",
|
||||
"RelatedUrl": "https://docs.aws.amazon.com/waf/latest/APIReference/API_Rule.html",
|
||||
"Remediation": {
|
||||
"Code": {
|
||||
"CLI": "",
|
||||
"NativeIaC": "```yaml\n# CloudFormation: Add at least one rule to the WAFv2 WebACL\nResources:\n <example_resource_name>:\n Type: AWS::WAFv2::WebACL\n Properties:\n Scope: REGIONAL\n DefaultAction:\n Allow: {}\n VisibilityConfig:\n SampledRequestsEnabled: true\n CloudWatchMetricsEnabled: true\n MetricName: <example_resource_name>\n Rules: # CRITICAL: Adding any rule/rule group here fixes the finding by making the Web ACL non-empty\n - Name: <example_rule_name>\n Priority: 0\n Statement:\n ManagedRuleGroupStatement:\n VendorName: AWS\n Name: AWSManagedRulesCommonRuleSet # Uses an AWS managed rule group\n OverrideAction:\n Count: {} # Non-blocking to minimize impact\n VisibilityConfig:\n SampledRequestsEnabled: true\n CloudWatchMetricsEnabled: true\n MetricName: <example_rule_name>\n```",
|
||||
"Other": "1. In the AWS Console, go to AWS WAF\n2. Open Web ACLs and select the failing Web ACL\n3. Go to the Rules tab and click Add rules\n4. Choose Add managed rule group, select AWS > AWSManagedRulesCommonRuleSet\n5. Set action to Count (to avoid blocking), then Add rule and Save\n6. Verify the Web ACL now shows at least one rule",
|
||||
"Terraform": "```hcl\n# Terraform: Ensure the WAFv2 Web ACL has at least one rule\nresource \"aws_wafv2_web_acl\" \"<example_resource_name>\" {\n name = \"<example_resource_name>\"\n scope = \"REGIONAL\"\n\n default_action {\n allow {}\n }\n\n visibility_config {\n cloudwatch_metrics_enabled = true\n metric_name = \"<example_resource_name>\"\n sampled_requests_enabled = true\n }\n\n rule { # CRITICAL: Presence of this rule makes the Web ACL non-empty and passes the check\n name = \"<example_rule_name>\"\n priority = 0\n statement {\n managed_rule_group_statement {\n name = \"AWSManagedRulesCommonRuleSet\"\n vendor_name = \"AWS\" # Minimal managed rule group\n }\n }\n override_action { count {} } # Non-blocking\n visibility_config {\n cloudwatch_metrics_enabled = true\n metric_name = \"<example_rule_name>\"\n sampled_requests_enabled = true\n }\n }\n}\n```"
|
||||
"CLI": "aws wafv2 update-web-acl --id <web-acl-id> --scope <scope> --default-action <default-action> --rules <rules>",
|
||||
"NativeIaC": "https://docs.prowler.com/checks/aws/networking-policies/bc_aws_networking_64/",
|
||||
"Other": "https://docs.aws.amazon.com/securityhub/latest/userguide/waf-controls.html#waf-10",
|
||||
"Terraform": ""
|
||||
},
|
||||
"Recommendation": {
|
||||
"Text": "Populate each web ACL with targeted rules or managed rule groups to enforce least-privilege web access: cover common exploits (SQLi/XSS), IP reputation, and rate limits, scoped to your apps. Use a conservative `DefaultAction`, monitor metrics/logs, and continually tune-supporting **defense in depth** and **zero trust**.",
|
||||
"Url": "https://hub.prowler.com/check/wafv2_webacl_with_rules"
|
||||
"Text": "Ensure that each AWS WAF web ACL contains at least one rule or rule group to effectively manage and inspect incoming HTTP(S) web requests.",
|
||||
"Url": "https://docs.aws.amazon.com/waf/latest/developerguide/web-acl-editing.html"
|
||||
}
|
||||
},
|
||||
"Categories": [
|
||||
"internet-exposed"
|
||||
],
|
||||
"Categories": [],
|
||||
"DependsOn": [],
|
||||
"RelatedTo": [],
|
||||
"Notes": ""
|
||||
|
||||
@@ -77,7 +77,7 @@ class CloudStorage(GCPService):
|
||||
Bucket(
|
||||
name=bucket["name"],
|
||||
id=bucket["id"],
|
||||
region=bucket["location"].lower(),
|
||||
region=bucket["location"],
|
||||
uniform_bucket_level_access=bucket["iamConfiguration"][
|
||||
"uniformBucketLevelAccess"
|
||||
]["enabled"],
|
||||
|
||||
@@ -17,28 +17,6 @@ class Clusters(MongoDBAtlasService):
|
||||
super().__init__(__class__.__name__, provider)
|
||||
self.clusters = self._list_clusters()
|
||||
|
||||
def _extract_location(self, cluster_data: dict) -> str:
|
||||
"""
|
||||
Extract location from cluster data and convert to lowercase
|
||||
|
||||
Args:
|
||||
cluster_data: Cluster data from API
|
||||
|
||||
Returns:
|
||||
str: Location in lowercase, empty string if not found
|
||||
"""
|
||||
try:
|
||||
replication_specs = cluster_data.get("replicationSpecs", [])
|
||||
if replication_specs and len(replication_specs) > 0:
|
||||
region_configs = replication_specs[0].get("regionConfigs", [])
|
||||
if region_configs and len(region_configs) > 0:
|
||||
region_name = region_configs[0].get("regionName", "")
|
||||
if region_name:
|
||||
return region_name.lower()
|
||||
except (KeyError, IndexError, AttributeError):
|
||||
pass
|
||||
return ""
|
||||
|
||||
def _list_clusters(self):
|
||||
"""
|
||||
List all MongoDB Atlas clusters across all projects
|
||||
@@ -111,7 +89,9 @@ class Clusters(MongoDBAtlasService):
|
||||
"connectionStrings", {}
|
||||
),
|
||||
tags=cluster_data.get("tags", []),
|
||||
location=self._extract_location(cluster_data),
|
||||
location=cluster_data.get("replicationSpecs", {})[0]
|
||||
.get("regionConfigs", {})[0]
|
||||
.get("regionName", ""),
|
||||
)
|
||||
|
||||
# Use a unique key combining project_id and cluster_name
|
||||
|
||||
@@ -35,7 +35,7 @@ class TestCloudStorageService:
|
||||
assert len(cloudstorage_client.buckets) == 2
|
||||
assert cloudstorage_client.buckets[0].name == "bucket1"
|
||||
assert cloudstorage_client.buckets[0].id.__class__.__name__ == "str"
|
||||
assert cloudstorage_client.buckets[0].region == "us"
|
||||
assert cloudstorage_client.buckets[0].region == "US"
|
||||
assert cloudstorage_client.buckets[0].uniform_bucket_level_access
|
||||
assert cloudstorage_client.buckets[0].public
|
||||
|
||||
@@ -53,7 +53,7 @@ class TestCloudStorageService:
|
||||
|
||||
assert cloudstorage_client.buckets[1].name == "bucket2"
|
||||
assert cloudstorage_client.buckets[1].id.__class__.__name__ == "str"
|
||||
assert cloudstorage_client.buckets[1].region == "eu"
|
||||
assert cloudstorage_client.buckets[1].region == "EU"
|
||||
assert not cloudstorage_client.buckets[1].uniform_bucket_level_access
|
||||
assert not cloudstorage_client.buckets[1].public
|
||||
assert cloudstorage_client.buckets[1].retention_policy is None
|
||||
|
||||
@@ -157,7 +157,7 @@ class TestMongoDBAtlasMutelist:
|
||||
"*": {
|
||||
"Checks": {
|
||||
"clusters_backup_enabled": {
|
||||
"Regions": ["western_europe"],
|
||||
"Regions": ["WESTERN_EUROPE"],
|
||||
"Resources": ["*"],
|
||||
}
|
||||
}
|
||||
@@ -172,7 +172,7 @@ class TestMongoDBAtlasMutelist:
|
||||
finding.check_metadata.CheckID = "clusters_backup_enabled"
|
||||
finding.status = "FAIL"
|
||||
finding.resource_name = "any-cluster"
|
||||
finding.location = "western_europe"
|
||||
finding.location = "WESTERN_EUROPE"
|
||||
finding.resource_tags = []
|
||||
|
||||
assert mutelist.is_finding_muted(finding, "any-org-id")
|
||||
|
||||
@@ -64,7 +64,7 @@ def mock_clusters_list_clusters(_):
|
||||
pit_enabled=True,
|
||||
connection_strings={"standard": "mongodb://cluster.mongodb.net"},
|
||||
tags=[{"key": "environment", "value": "test"}],
|
||||
location="us_east_1",
|
||||
location="US_EAST_1",
|
||||
)
|
||||
}
|
||||
|
||||
@@ -109,4 +109,4 @@ class Test_Clusters_Service:
|
||||
assert cluster.connection_strings["standard"] == "mongodb://cluster.mongodb.net"
|
||||
assert cluster.tags[0]["key"] == "environment"
|
||||
assert cluster.tags[0]["value"] == "test"
|
||||
assert cluster.location == "us_east_1"
|
||||
assert cluster.location == "US_EAST_1"
|
||||
|
||||
26
ui/AGENTS.md
26
ui/AGENTS.md
@@ -13,32 +13,6 @@
|
||||
- ALWAYS: `const X = { A: "a", B: "b" } as const; type T = typeof X[keyof typeof X]`
|
||||
- NEVER: `type T = "a" | "b"`
|
||||
|
||||
### Interfaces
|
||||
|
||||
- ALWAYS: One level depth only; object property → dedicated interface (recursive)
|
||||
- ALWAYS: Reuse via `extends`
|
||||
- NEVER: Inline nested objects
|
||||
|
||||
```typescript
|
||||
// ✅ CORRECT
|
||||
interface UserAddress {
|
||||
street: string;
|
||||
city: string;
|
||||
}
|
||||
interface User {
|
||||
id: string;
|
||||
address: UserAddress;
|
||||
}
|
||||
interface Admin extends User {
|
||||
permissions: string[];
|
||||
}
|
||||
|
||||
// ❌ WRONG
|
||||
interface User {
|
||||
address: { street: string; city: string };
|
||||
}
|
||||
```
|
||||
|
||||
### Styling
|
||||
|
||||
- Single class: `className="bg-slate-800 text-white"`
|
||||
|
||||
@@ -2,30 +2,20 @@
|
||||
|
||||
All notable changes to the **Prowler UI** are documented in this file.
|
||||
|
||||
## [1.16.0] (Prowler v5.16.0)
|
||||
## [1.16.0] (Prowler Unreleased)
|
||||
|
||||
### 🚀 Added
|
||||
|
||||
- SSO and API Key link cards to Integrations page for better discoverability [(#9570)](https://github.com/prowler-cloud/prowler/pull/9570)
|
||||
- Risk Radar component with category-based severity breakdown to Overview page [(#9532)](https://github.com/prowler-cloud/prowler/pull/9532)
|
||||
- More extensive resource details (partition, details and metadata) within Findings detail and Resources detail view [(#9515)](https://github.com/prowler-cloud/prowler/pull/9515)
|
||||
- Integrated Prowler MCP server with Lighthouse AI for dynamic tool execution [(#9255)](https://github.com/prowler-cloud/prowler/pull/9255)
|
||||
- Implement "MuteList Simple" feature allowing users to mute findings directly from the findings table with checkbox selection, and a new dedicated /mutelist route with Simple (mute rules list) and Advanced (YAML config) tabs. [(#9577)](https://github.com/prowler-cloud/prowler/pull/9577)
|
||||
|
||||
### 🔄 Changed
|
||||
|
||||
- Lighthouse AI markdown rendering with strict markdownlint compliance and nested list styling [(#9586)](https://github.com/prowler-cloud/prowler/pull/9586)
|
||||
- Lighthouse AI default model updated from gpt-4o to gpt-5.2 [(#9586)](https://github.com/prowler-cloud/prowler/pull/9586)
|
||||
- Lighthouse AI destructive MCP tools blocked from LLM access (delete, trigger scan, etc.) [(#9586)](https://github.com/prowler-cloud/prowler/pull/9586)
|
||||
|
||||
### 🐞 Fixed
|
||||
|
||||
- Lighthouse AI angle-bracket placeholders now render correctly in chat messages [(#9586)](https://github.com/prowler-cloud/prowler/pull/9586)
|
||||
- Lighthouse AI recommended model badge contrast improved [(#9586)](https://github.com/prowler-cloud/prowler/pull/9586)
|
||||
|
||||
---
|
||||
|
||||
## [1.15.1] (Prowler v5.15.1)
|
||||
## [1.15.1] (Prowler Unreleased)
|
||||
|
||||
### 🔐 Security
|
||||
|
||||
|
||||
45
ui/actions/lighthouse/checks.ts
Normal file
45
ui/actions/lighthouse/checks.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
export const getLighthouseProviderChecks = async ({
|
||||
providerType,
|
||||
service,
|
||||
severity,
|
||||
compliances,
|
||||
}: {
|
||||
providerType: string;
|
||||
service: string[];
|
||||
severity: string[];
|
||||
compliances: string[];
|
||||
}) => {
|
||||
const url = new URL(
|
||||
`https://hub.prowler.com/api/check?fields=id&providers=${providerType}`,
|
||||
);
|
||||
if (service) {
|
||||
url.searchParams.append("services", service.join(","));
|
||||
}
|
||||
if (severity) {
|
||||
url.searchParams.append("severities", severity.join(","));
|
||||
}
|
||||
if (compliances) {
|
||||
url.searchParams.append("compliances", compliances.join(","));
|
||||
}
|
||||
|
||||
const response = await fetch(url.toString(), {
|
||||
method: "GET",
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
const ids = data.map((item: { id: string }) => item.id);
|
||||
return ids;
|
||||
};
|
||||
|
||||
export const getLighthouseCheckDetails = async ({
|
||||
checkId,
|
||||
}: {
|
||||
checkId: string;
|
||||
}) => {
|
||||
const url = new URL(`https://hub.prowler.com/api/check/${checkId}`);
|
||||
const response = await fetch(url.toString(), {
|
||||
method: "GET",
|
||||
});
|
||||
const data = await response.json();
|
||||
return data;
|
||||
};
|
||||
14
ui/actions/lighthouse/complianceframeworks.ts
Normal file
14
ui/actions/lighthouse/complianceframeworks.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
export const getLighthouseComplianceFrameworks = async (
|
||||
provider_type: string,
|
||||
) => {
|
||||
const url = new URL(
|
||||
`https://hub.prowler.com/api/compliance?fields=id&provider=${provider_type}`,
|
||||
);
|
||||
const response = await fetch(url.toString(), {
|
||||
method: "GET",
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
const frameworks = data.map((item: { id: string }) => item.id);
|
||||
return frameworks;
|
||||
};
|
||||
87
ui/actions/lighthouse/compliances.ts
Normal file
87
ui/actions/lighthouse/compliances.ts
Normal file
@@ -0,0 +1,87 @@
|
||||
import { apiBaseUrl, getAuthHeaders, parseStringify } from "@/lib/helper";
|
||||
|
||||
export const getLighthouseCompliancesOverview = async ({
|
||||
scanId, // required
|
||||
fields,
|
||||
filters,
|
||||
page,
|
||||
pageSize,
|
||||
sort,
|
||||
}: {
|
||||
scanId: string;
|
||||
fields?: string[];
|
||||
filters?: Record<string, string | number | boolean | undefined>;
|
||||
page?: number;
|
||||
pageSize?: number;
|
||||
sort?: string;
|
||||
}) => {
|
||||
const headers = await getAuthHeaders({ contentType: false });
|
||||
const url = new URL(`${apiBaseUrl}/compliance-overviews`);
|
||||
|
||||
// Required filter
|
||||
url.searchParams.append("filter[scan_id]", scanId);
|
||||
|
||||
// Handle optional fields
|
||||
if (fields && fields.length > 0) {
|
||||
url.searchParams.append("fields[compliance-overviews]", fields.join(","));
|
||||
}
|
||||
|
||||
// Handle filters
|
||||
if (filters) {
|
||||
Object.entries(filters).forEach(([key, value]) => {
|
||||
if (value !== "" && value !== null) {
|
||||
url.searchParams.append(key, String(value));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Handle pagination
|
||||
if (page) {
|
||||
url.searchParams.append("page[number]", page.toString());
|
||||
}
|
||||
if (pageSize) {
|
||||
url.searchParams.append("page[size]", pageSize.toString());
|
||||
}
|
||||
|
||||
// Handle sorting
|
||||
if (sort) {
|
||||
url.searchParams.append("sort", sort);
|
||||
}
|
||||
|
||||
try {
|
||||
const compliances = await fetch(url.toString(), {
|
||||
headers,
|
||||
});
|
||||
const data = await compliances.json();
|
||||
const parsedData = parseStringify(data);
|
||||
|
||||
return parsedData;
|
||||
} catch (error) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error("Error fetching providers:", error);
|
||||
return undefined;
|
||||
}
|
||||
};
|
||||
|
||||
export const getLighthouseComplianceOverview = async ({
|
||||
complianceId,
|
||||
fields,
|
||||
}: {
|
||||
complianceId: string;
|
||||
fields?: string[];
|
||||
}) => {
|
||||
const headers = await getAuthHeaders({ contentType: false });
|
||||
const url = new URL(`${apiBaseUrl}/compliance-overviews/${complianceId}`);
|
||||
|
||||
if (fields) {
|
||||
url.searchParams.append("fields[compliance-overviews]", fields.join(","));
|
||||
}
|
||||
const response = await fetch(url.toString(), {
|
||||
headers,
|
||||
});
|
||||
|
||||
const data = await response.json();
|
||||
const parsedData = parseStringify(data);
|
||||
|
||||
return parsedData;
|
||||
};
|
||||
@@ -1 +1,5 @@
|
||||
export * from "./checks";
|
||||
export * from "./complianceframeworks";
|
||||
export * from "./compliances";
|
||||
export * from "./lighthouse";
|
||||
export * from "./resources";
|
||||
|
||||
138
ui/actions/lighthouse/resources.ts
Normal file
138
ui/actions/lighthouse/resources.ts
Normal file
@@ -0,0 +1,138 @@
|
||||
import { apiBaseUrl, getAuthHeaders, parseStringify } from "@/lib/helper";
|
||||
|
||||
export async function getLighthouseResources({
|
||||
page = 1,
|
||||
query = "",
|
||||
sort = "",
|
||||
filters = {},
|
||||
fields = [],
|
||||
}: {
|
||||
page?: number;
|
||||
query?: string;
|
||||
sort?: string;
|
||||
filters?: Record<string, string | number | boolean>;
|
||||
fields?: string[];
|
||||
}) {
|
||||
const headers = await getAuthHeaders({ contentType: false });
|
||||
|
||||
const url = new URL(`${apiBaseUrl}/resources`);
|
||||
|
||||
if (page) {
|
||||
url.searchParams.append("page[number]", page.toString());
|
||||
}
|
||||
|
||||
if (sort) {
|
||||
url.searchParams.append("sort", sort);
|
||||
}
|
||||
|
||||
if (query) {
|
||||
url.searchParams.append("filter[search]", query);
|
||||
}
|
||||
|
||||
if (fields.length > 0) {
|
||||
url.searchParams.append("fields[resources]", fields.join(","));
|
||||
}
|
||||
|
||||
if (filters) {
|
||||
for (const [key, value] of Object.entries(filters)) {
|
||||
url.searchParams.append(`${key}`, value as string);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(url.toString(), {
|
||||
headers,
|
||||
});
|
||||
const data = await response.json();
|
||||
const parsedData = parseStringify(data);
|
||||
return parsedData;
|
||||
} catch (error) {
|
||||
console.error("Error fetching resources:", error);
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getLighthouseLatestResources({
|
||||
page = 1,
|
||||
query = "",
|
||||
sort = "",
|
||||
filters = {},
|
||||
fields = [],
|
||||
}: {
|
||||
page?: number;
|
||||
query?: string;
|
||||
sort?: string;
|
||||
filters?: Record<string, string | number | boolean>;
|
||||
fields?: string[];
|
||||
}) {
|
||||
const headers = await getAuthHeaders({ contentType: false });
|
||||
|
||||
const url = new URL(`${apiBaseUrl}/resources/latest`);
|
||||
|
||||
if (page) {
|
||||
url.searchParams.append("page[number]", page.toString());
|
||||
}
|
||||
|
||||
if (sort) {
|
||||
url.searchParams.append("sort", sort);
|
||||
}
|
||||
|
||||
if (query) {
|
||||
url.searchParams.append("filter[search]", query);
|
||||
}
|
||||
|
||||
if (fields.length > 0) {
|
||||
url.searchParams.append("fields[resources]", fields.join(","));
|
||||
}
|
||||
|
||||
if (filters) {
|
||||
for (const [key, value] of Object.entries(filters)) {
|
||||
url.searchParams.append(`${key}`, value as string);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(url.toString(), {
|
||||
headers,
|
||||
});
|
||||
const data = await response.json();
|
||||
const parsedData = parseStringify(data);
|
||||
return parsedData;
|
||||
} catch (error) {
|
||||
console.error("Error fetching resources:", error);
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
export async function getLighthouseResourceById({
|
||||
id,
|
||||
fields = [],
|
||||
include = [],
|
||||
}: {
|
||||
id: string;
|
||||
fields?: string[];
|
||||
include?: string[];
|
||||
}) {
|
||||
const headers = await getAuthHeaders({ contentType: false });
|
||||
const url = new URL(`${apiBaseUrl}/resources/${id}`);
|
||||
|
||||
if (fields.length > 0) {
|
||||
url.searchParams.append("fields", fields.join(","));
|
||||
}
|
||||
|
||||
if (include.length > 0) {
|
||||
url.searchParams.append("include", include.join(","));
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(url.toString(), {
|
||||
headers,
|
||||
});
|
||||
const data = await response.json();
|
||||
const parsedData = parseStringify(data);
|
||||
return parsedData;
|
||||
} catch (error) {
|
||||
console.error("Error fetching resource:", error);
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
export {
|
||||
createMuteRule,
|
||||
deleteMuteRule,
|
||||
getMuteRule,
|
||||
getMuteRules,
|
||||
toggleMuteRule,
|
||||
updateMuteRule,
|
||||
} from "./mute-rules";
|
||||
export * from "./types";
|
||||
@@ -1,383 +0,0 @@
|
||||
"use server";
|
||||
|
||||
import { revalidatePath } from "next/cache";
|
||||
|
||||
import { apiBaseUrl, getAuthHeaders } from "@/lib/helper";
|
||||
|
||||
import {
|
||||
DeleteMuteRuleActionState,
|
||||
MuteRuleActionState,
|
||||
MuteRuleData,
|
||||
MuteRulesResponse,
|
||||
} from "./types";
|
||||
|
||||
interface GetMuteRulesParams {
|
||||
page?: number;
|
||||
pageSize?: number;
|
||||
sort?: string;
|
||||
filters?: Record<string, string>;
|
||||
}
|
||||
|
||||
export const getMuteRules = async (
|
||||
params: GetMuteRulesParams = {},
|
||||
): Promise<MuteRulesResponse | undefined> => {
|
||||
const headers = await getAuthHeaders({ contentType: false });
|
||||
const url = new URL(`${apiBaseUrl}/mute-rules`);
|
||||
|
||||
if (params.page) {
|
||||
url.searchParams.append("page[number]", params.page.toString());
|
||||
}
|
||||
if (params.pageSize) {
|
||||
url.searchParams.append("page[size]", params.pageSize.toString());
|
||||
}
|
||||
if (params.sort) {
|
||||
url.searchParams.append("sort", params.sort);
|
||||
}
|
||||
if (params.filters) {
|
||||
Object.entries(params.filters).forEach(([key, value]) => {
|
||||
url.searchParams.append(`filter[${key}]`, value);
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(url.toString(), {
|
||||
method: "GET",
|
||||
headers,
|
||||
next: { revalidate: 0 },
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
// Don't log authorization errors as they're expected when endpoint is not available
|
||||
if (response.status !== 401 && response.status !== 403) {
|
||||
console.error(`Failed to fetch mute rules: ${response.statusText}`);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data;
|
||||
} catch (error) {
|
||||
console.error("Error fetching mute rules:", error);
|
||||
return undefined;
|
||||
}
|
||||
};
|
||||
|
||||
export const getMuteRule = async (
|
||||
id: string,
|
||||
): Promise<MuteRuleData | undefined> => {
|
||||
const headers = await getAuthHeaders({ contentType: false });
|
||||
const url = new URL(`${apiBaseUrl}/mute-rules/${id}`);
|
||||
|
||||
try {
|
||||
const response = await fetch(url.toString(), {
|
||||
method: "GET",
|
||||
headers,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
// Don't log authorization errors as they're expected when endpoint is not available
|
||||
if (response.status !== 401 && response.status !== 403) {
|
||||
console.error(`Failed to fetch mute rule: ${response.statusText}`);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data.data;
|
||||
} catch (error) {
|
||||
console.error("Error fetching mute rule:", error);
|
||||
return undefined;
|
||||
}
|
||||
};
|
||||
|
||||
export const createMuteRule = async (
|
||||
_prevState: MuteRuleActionState,
|
||||
formData: FormData,
|
||||
): Promise<MuteRuleActionState> => {
|
||||
const headers = await getAuthHeaders({ contentType: true });
|
||||
|
||||
const name = formData.get("name") as string;
|
||||
const reason = formData.get("reason") as string;
|
||||
const findingIdsRaw = formData.get("finding_ids") as string;
|
||||
|
||||
// Validate required fields
|
||||
if (!name || name.length < 3) {
|
||||
return {
|
||||
errors: {
|
||||
name: "Name must be at least 3 characters",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
if (!reason || reason.length < 3) {
|
||||
return {
|
||||
errors: {
|
||||
reason: "Reason must be at least 3 characters",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
let findingIds: string[];
|
||||
try {
|
||||
findingIds = JSON.parse(findingIdsRaw);
|
||||
if (!Array.isArray(findingIds) || findingIds.length === 0) {
|
||||
throw new Error("Invalid finding IDs");
|
||||
}
|
||||
} catch {
|
||||
return {
|
||||
errors: {
|
||||
finding_ids: "At least one finding must be selected",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const url = new URL(`${apiBaseUrl}/mute-rules`);
|
||||
|
||||
const bodyData = {
|
||||
data: {
|
||||
type: "mute-rules",
|
||||
attributes: {
|
||||
name,
|
||||
reason,
|
||||
finding_ids: findingIds,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const response = await fetch(url.toString(), {
|
||||
method: "POST",
|
||||
headers,
|
||||
body: JSON.stringify(bodyData),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
let errorMessage = `Failed to create mute rule: ${response.statusText}`;
|
||||
try {
|
||||
const errorData = await response.json();
|
||||
errorMessage =
|
||||
errorData?.errors?.[0]?.detail || errorData?.message || errorMessage;
|
||||
} catch {
|
||||
// JSON parsing failed, use default error message
|
||||
}
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
|
||||
revalidatePath("/findings");
|
||||
revalidatePath("/mutelist");
|
||||
|
||||
return {
|
||||
success: "Mute rule created successfully! Findings are now muted.",
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error creating mute rule:", error);
|
||||
return {
|
||||
errors: {
|
||||
general:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "Error creating mute rule. Please try again.",
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
export const updateMuteRule = async (
|
||||
_prevState: MuteRuleActionState,
|
||||
formData: FormData,
|
||||
): Promise<MuteRuleActionState> => {
|
||||
const headers = await getAuthHeaders({ contentType: true });
|
||||
|
||||
const id = formData.get("id") as string;
|
||||
const name = formData.get("name") as string;
|
||||
const reason = formData.get("reason") as string;
|
||||
const enabledRaw = formData.get("enabled") as string;
|
||||
|
||||
if (!id) {
|
||||
return {
|
||||
errors: {
|
||||
general: "Mute rule ID is required for update",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Validate optional fields if provided
|
||||
const validateOptionalField = (
|
||||
value: string | null,
|
||||
fieldName: string,
|
||||
minLength = 3,
|
||||
): MuteRuleActionState | null => {
|
||||
if (value && value.length > 0 && value.length < minLength) {
|
||||
return {
|
||||
errors: {
|
||||
[fieldName]: `${fieldName.charAt(0).toUpperCase() + fieldName.slice(1)} must be at least ${minLength} characters`,
|
||||
},
|
||||
};
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
const nameError = validateOptionalField(name, "name");
|
||||
if (nameError) return nameError;
|
||||
|
||||
const reasonError = validateOptionalField(reason, "reason");
|
||||
if (reasonError) return reasonError;
|
||||
|
||||
try {
|
||||
const url = new URL(`${apiBaseUrl}/mute-rules/${id}`);
|
||||
|
||||
const attributes: Record<string, string | boolean> = {};
|
||||
if (name) attributes.name = name;
|
||||
if (reason) attributes.reason = reason;
|
||||
if (enabledRaw !== null && enabledRaw !== undefined) {
|
||||
attributes.enabled = enabledRaw === "true";
|
||||
}
|
||||
|
||||
const bodyData = {
|
||||
data: {
|
||||
type: "mute-rules",
|
||||
id,
|
||||
attributes,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await fetch(url.toString(), {
|
||||
method: "PATCH",
|
||||
headers,
|
||||
body: JSON.stringify(bodyData),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
let errorMessage = `Failed to update mute rule: ${response.statusText}`;
|
||||
try {
|
||||
const errorData = await response.json();
|
||||
errorMessage =
|
||||
errorData?.errors?.[0]?.detail || errorData?.message || errorMessage;
|
||||
} catch {
|
||||
// JSON parsing failed, use default error message
|
||||
}
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
|
||||
revalidatePath("/mutelist");
|
||||
|
||||
return { success: "Mute rule updated successfully!" };
|
||||
} catch (error) {
|
||||
console.error("Error updating mute rule:", error);
|
||||
return {
|
||||
errors: {
|
||||
general:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "Error updating mute rule. Please try again.",
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
export const toggleMuteRule = async (
|
||||
id: string,
|
||||
enabled: boolean,
|
||||
): Promise<{ success?: string; error?: string }> => {
|
||||
const headers = await getAuthHeaders({ contentType: true });
|
||||
|
||||
try {
|
||||
const url = new URL(`${apiBaseUrl}/mute-rules/${id}`);
|
||||
|
||||
const bodyData = {
|
||||
data: {
|
||||
type: "mute-rules",
|
||||
id,
|
||||
attributes: {
|
||||
enabled,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const response = await fetch(url.toString(), {
|
||||
method: "PATCH",
|
||||
headers,
|
||||
body: JSON.stringify(bodyData),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
let errorMessage = `Failed to toggle mute rule: ${response.statusText}`;
|
||||
try {
|
||||
const errorData = await response.json();
|
||||
errorMessage =
|
||||
errorData?.errors?.[0]?.detail || errorData?.message || errorMessage;
|
||||
} catch {
|
||||
// JSON parsing failed, use default error message
|
||||
}
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
|
||||
revalidatePath("/mutelist");
|
||||
|
||||
return {
|
||||
success: `Mute rule ${enabled ? "enabled" : "disabled"} successfully!`,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error toggling mute rule:", error);
|
||||
return {
|
||||
error:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "Error toggling mute rule. Please try again.",
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
export const deleteMuteRule = async (
|
||||
_prevState: DeleteMuteRuleActionState,
|
||||
formData: FormData,
|
||||
): Promise<DeleteMuteRuleActionState> => {
|
||||
const headers = await getAuthHeaders({ contentType: true });
|
||||
const id = formData.get("id") as string;
|
||||
|
||||
if (!id) {
|
||||
return {
|
||||
errors: {
|
||||
general: "Mute rule ID is required for deletion",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const url = new URL(`${apiBaseUrl}/mute-rules/${id}`);
|
||||
const response = await fetch(url.toString(), {
|
||||
method: "DELETE",
|
||||
headers,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({}));
|
||||
throw new Error(
|
||||
errorData.errors?.[0]?.detail ||
|
||||
`Failed to delete mute rule: ${response.statusText}`,
|
||||
);
|
||||
}
|
||||
|
||||
revalidatePath("/mutelist");
|
||||
|
||||
return { success: "Mute rule deleted successfully!" };
|
||||
} catch (error) {
|
||||
console.error("Error deleting mute rule:", error);
|
||||
return {
|
||||
errors: {
|
||||
general:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "Error deleting mute rule. Please try again.",
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Note: Adding findings to existing mute rules is not supported by the API.
|
||||
// The MuteRuleUpdateSerializer only allows updating name, reason, and enabled fields.
|
||||
// finding_ids can only be specified when creating a new mute rule.
|
||||
|
||||
// Note: Unmute functionality is not currently supported by the API.
|
||||
// The FindingViewSet only allows GET operations, and deleting a mute rule
|
||||
// does not unmute the findings ("Previously muted findings remain muted").
|
||||
@@ -1 +0,0 @@
|
||||
export * from "./mute-rules.types";
|
||||
@@ -1,82 +0,0 @@
|
||||
// Mute Rules Types
|
||||
// Corresponds to the /mute-rules endpoint
|
||||
|
||||
// Base relationship data structure
|
||||
export interface RelationshipData {
|
||||
type: "users";
|
||||
id: string;
|
||||
}
|
||||
|
||||
export interface CreatedByRelationship {
|
||||
data: RelationshipData | null;
|
||||
}
|
||||
|
||||
export interface MuteRuleRelationships {
|
||||
created_by?: CreatedByRelationship;
|
||||
}
|
||||
|
||||
export interface MuteRuleAttributes {
|
||||
inserted_at: string;
|
||||
updated_at: string;
|
||||
name: string;
|
||||
reason: string;
|
||||
enabled: boolean;
|
||||
finding_uids: string[];
|
||||
}
|
||||
|
||||
export interface MuteRuleData {
|
||||
type: "mute-rules";
|
||||
id: string;
|
||||
attributes: MuteRuleAttributes;
|
||||
relationships?: MuteRuleRelationships;
|
||||
}
|
||||
|
||||
// Response pagination and links
|
||||
export interface MuteRulesPagination {
|
||||
page: number;
|
||||
pages: number;
|
||||
count: number;
|
||||
}
|
||||
|
||||
export interface MuteRulesMeta {
|
||||
pagination: MuteRulesPagination;
|
||||
}
|
||||
|
||||
export interface MuteRulesLinks {
|
||||
first: string;
|
||||
last: string;
|
||||
next: string | null;
|
||||
prev: string | null;
|
||||
}
|
||||
|
||||
export interface MuteRulesResponse {
|
||||
data: MuteRuleData[];
|
||||
meta: MuteRulesMeta;
|
||||
links: MuteRulesLinks;
|
||||
}
|
||||
|
||||
export interface MuteRuleResponse {
|
||||
data: MuteRuleData;
|
||||
}
|
||||
|
||||
// Action state types
|
||||
export interface MuteRuleActionErrors {
|
||||
name?: string;
|
||||
reason?: string;
|
||||
finding_ids?: string;
|
||||
general?: string;
|
||||
}
|
||||
|
||||
export type MuteRuleActionState = {
|
||||
errors?: MuteRuleActionErrors;
|
||||
success?: string;
|
||||
} | null;
|
||||
|
||||
export interface DeleteMuteRuleActionErrors {
|
||||
general?: string;
|
||||
}
|
||||
|
||||
export type DeleteMuteRuleActionState = {
|
||||
errors?: DeleteMuteRuleActionErrors;
|
||||
success?: string;
|
||||
} | null;
|
||||
@@ -11,10 +11,11 @@ import { getProviders } from "@/actions/providers";
|
||||
import { getScans } from "@/actions/scans";
|
||||
import { FindingsFilters } from "@/components/findings/findings-filters";
|
||||
import {
|
||||
FindingsTableWithSelection,
|
||||
ColumnFindings,
|
||||
SkeletonTableFindings,
|
||||
} from "@/components/findings/table";
|
||||
import { ContentLayout } from "@/components/ui";
|
||||
import { DataTable } from "@/components/ui/table";
|
||||
import {
|
||||
createDict,
|
||||
createScanDetailsMapping,
|
||||
@@ -165,7 +166,9 @@ const SSRDataTable = async ({
|
||||
<p>{findingsData.errors[0].detail}</p>
|
||||
</div>
|
||||
)}
|
||||
<FindingsTableWithSelection
|
||||
<DataTable
|
||||
key={Date.now()}
|
||||
columns={ColumnFindings}
|
||||
data={expandedResponse?.data || []}
|
||||
metadata={findingsData?.meta}
|
||||
/>
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import React from "react";
|
||||
|
||||
import {
|
||||
ApiKeyLinkCard,
|
||||
JiraIntegrationCard,
|
||||
S3IntegrationCard,
|
||||
SecurityHubIntegrationCard,
|
||||
SsoLinkCard,
|
||||
} from "@/components/integrations";
|
||||
import { ContentLayout } from "@/components/ui";
|
||||
|
||||
@@ -27,12 +27,6 @@ export default async function Integrations() {
|
||||
|
||||
{/* Jira Integration */}
|
||||
<JiraIntegrationCard />
|
||||
|
||||
{/* SSO Configuration - redirects to Profile */}
|
||||
<SsoLinkCard />
|
||||
|
||||
{/* API Keys - redirects to Profile */}
|
||||
<ApiKeyLinkCard />
|
||||
</div>
|
||||
</div>
|
||||
</ContentLayout>
|
||||
|
||||
@@ -27,14 +27,12 @@ export default async function AIChatbot() {
|
||||
|
||||
return (
|
||||
<ContentLayout title="Lighthouse AI" icon={<LighthouseIcon />}>
|
||||
<div className="-mx-6 -my-4 h-[calc(100dvh-4.5rem)] sm:-mx-8">
|
||||
<Chat
|
||||
hasConfig={hasConfig}
|
||||
providers={providersConfig.providers}
|
||||
defaultProviderId={providersConfig.defaultProviderId}
|
||||
defaultModelId={providersConfig.defaultModelId}
|
||||
/>
|
||||
</div>
|
||||
<Chat
|
||||
hasConfig={hasConfig}
|
||||
providers={providersConfig.providers}
|
||||
defaultProviderId={providersConfig.defaultProviderId}
|
||||
defaultModelId={providersConfig.defaultModelId}
|
||||
/>
|
||||
</ContentLayout>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,297 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { Textarea } from "@heroui/input";
|
||||
import { Trash2 } from "lucide-react";
|
||||
import { useActionState, useEffect, useState } from "react";
|
||||
|
||||
import {
|
||||
createMutedFindingsConfig,
|
||||
deleteMutedFindingsConfig,
|
||||
getMutedFindingsConfig,
|
||||
updateMutedFindingsConfig,
|
||||
} from "@/actions/processors";
|
||||
import { Button, Card, Skeleton } from "@/components/shadcn";
|
||||
import { useToast } from "@/components/ui";
|
||||
import { CustomAlertModal } from "@/components/ui/custom";
|
||||
import { CustomLink } from "@/components/ui/custom/custom-link";
|
||||
import { fontMono } from "@/config/fonts";
|
||||
import {
|
||||
convertToYaml,
|
||||
defaultMutedFindingsConfig,
|
||||
parseYamlValidation,
|
||||
} from "@/lib/yaml";
|
||||
import {
|
||||
MutedFindingsConfigActionState,
|
||||
ProcessorData,
|
||||
} from "@/types/processors";
|
||||
|
||||
export function AdvancedMutelistForm() {
|
||||
const [config, setConfig] = useState<ProcessorData | null>(null);
|
||||
const [configText, setConfigText] = useState("");
|
||||
const [showDeleteConfirmation, setShowDeleteConfirmation] = useState(false);
|
||||
const [isDeleting, setIsDeleting] = useState(false);
|
||||
const [isLoading, setIsLoading] = useState(true);
|
||||
const [yamlValidation, setYamlValidation] = useState<{
|
||||
isValid: boolean;
|
||||
error?: string;
|
||||
}>({ isValid: true });
|
||||
const [hasUserStartedTyping, setHasUserStartedTyping] = useState(false);
|
||||
|
||||
// Unified action that decides to create or update based on ID presence
|
||||
const saveConfig = async (
|
||||
_prevState: MutedFindingsConfigActionState,
|
||||
formData: FormData,
|
||||
): Promise<MutedFindingsConfigActionState> => {
|
||||
const id = formData.get("id");
|
||||
if (id) {
|
||||
return updateMutedFindingsConfig(_prevState, formData);
|
||||
}
|
||||
return createMutedFindingsConfig(_prevState, formData);
|
||||
};
|
||||
|
||||
const [state, formAction, isPending] = useActionState<
|
||||
MutedFindingsConfigActionState,
|
||||
FormData
|
||||
>(saveConfig, null);
|
||||
|
||||
const { toast } = useToast();
|
||||
|
||||
useEffect(() => {
|
||||
getMutedFindingsConfig().then((result) => {
|
||||
setConfig(result || null);
|
||||
const yamlConfig = convertToYaml(result?.attributes.configuration || "");
|
||||
setConfigText(yamlConfig);
|
||||
setHasUserStartedTyping(false);
|
||||
if (yamlConfig) {
|
||||
setYamlValidation(parseYamlValidation(yamlConfig));
|
||||
}
|
||||
setIsLoading(false);
|
||||
});
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (state?.success) {
|
||||
toast({
|
||||
title: "Configuration saved successfully",
|
||||
description: state.success,
|
||||
});
|
||||
// Reload config to get the created/updated data (shows Delete button)
|
||||
getMutedFindingsConfig().then((result) => {
|
||||
setConfig(result || null);
|
||||
});
|
||||
} else if (state?.errors?.general) {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Oops! Something went wrong",
|
||||
description: state.errors.general,
|
||||
});
|
||||
} else if (state?.errors?.configuration) {
|
||||
setHasUserStartedTyping(false);
|
||||
}
|
||||
}, [state, toast]);
|
||||
|
||||
const handleConfigChange = (value: string) => {
|
||||
setConfigText(value);
|
||||
setHasUserStartedTyping(true);
|
||||
const validation = parseYamlValidation(value);
|
||||
setYamlValidation(validation);
|
||||
};
|
||||
|
||||
const handleDelete = async () => {
|
||||
if (!config) return;
|
||||
|
||||
setIsDeleting(true);
|
||||
const formData = new FormData();
|
||||
formData.append("id", config.id);
|
||||
|
||||
try {
|
||||
const result = await deleteMutedFindingsConfig(null, formData);
|
||||
if (result?.success) {
|
||||
toast({
|
||||
title: "Configuration deleted successfully",
|
||||
description: result.success,
|
||||
});
|
||||
setConfig(null);
|
||||
setConfigText("");
|
||||
} else if (result?.errors?.general) {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Oops! Something went wrong",
|
||||
description: result.errors.general,
|
||||
});
|
||||
}
|
||||
} catch {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Oops! Something went wrong",
|
||||
description: "Error deleting configuration. Please try again.",
|
||||
});
|
||||
} finally {
|
||||
setIsDeleting(false);
|
||||
setShowDeleteConfirmation(false);
|
||||
}
|
||||
};
|
||||
|
||||
if (isLoading) {
|
||||
return (
|
||||
<Card variant="base" className="p-6">
|
||||
<div className="flex flex-col gap-4">
|
||||
<Skeleton className="h-6 w-48" />
|
||||
<Skeleton className="h-4 w-full" />
|
||||
<Skeleton className="h-4 w-3/4" />
|
||||
<Skeleton className="h-[400px] w-full" />
|
||||
<div className="flex w-full justify-end gap-4">
|
||||
<Skeleton className="h-10 w-24" />
|
||||
<Skeleton className="h-10 w-24" />
|
||||
</div>
|
||||
</div>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* Delete Confirmation Modal */}
|
||||
<CustomAlertModal
|
||||
isOpen={showDeleteConfirmation}
|
||||
onOpenChange={setShowDeleteConfirmation}
|
||||
title="Delete Mutelist Configuration"
|
||||
size="md"
|
||||
>
|
||||
<div className="flex flex-col gap-4">
|
||||
<p className="text-default-600 text-sm">
|
||||
Are you sure you want to delete this configuration? This action
|
||||
cannot be undone.
|
||||
</p>
|
||||
<div className="flex w-full justify-end gap-4">
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="lg"
|
||||
onClick={() => setShowDeleteConfirmation(false)}
|
||||
disabled={isDeleting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
type="button"
|
||||
variant="destructive"
|
||||
size="lg"
|
||||
disabled={isDeleting}
|
||||
onClick={handleDelete}
|
||||
>
|
||||
<Trash2 className="size-4" />
|
||||
{isDeleting ? "Deleting..." : "Delete"}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</CustomAlertModal>
|
||||
|
||||
<Card variant="base" className="p-6">
|
||||
<form action={formAction} className="flex flex-col gap-4">
|
||||
{config && <input type="hidden" name="id" value={config.id} />}
|
||||
|
||||
<div className="flex flex-col gap-4">
|
||||
<div>
|
||||
<h3 className="text-default-700 mb-2 text-lg font-semibold">
|
||||
Advanced Mutelist Configuration
|
||||
</h3>
|
||||
<ul className="text-default-600 mb-4 list-disc pl-5 text-sm">
|
||||
<li>
|
||||
<strong>
|
||||
This Mutelist configuration will take effect on the next
|
||||
scan.
|
||||
</strong>
|
||||
</li>
|
||||
<li>
|
||||
Use this for pattern-based muting with wildcards, regions, and
|
||||
tags.
|
||||
</li>
|
||||
<li>
|
||||
Learn more about configuring the Mutelist{" "}
|
||||
<CustomLink
|
||||
size="sm"
|
||||
href="https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/prowler-app-mute-findings"
|
||||
>
|
||||
here
|
||||
</CustomLink>
|
||||
.
|
||||
</li>
|
||||
<li>
|
||||
A default Mutelist is used to exclude certain predefined
|
||||
resources if no Mutelist is provided.
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col gap-2">
|
||||
<label
|
||||
htmlFor="configuration"
|
||||
className="text-default-700 text-sm font-medium"
|
||||
>
|
||||
Mutelist Configuration (YAML)
|
||||
</label>
|
||||
<div>
|
||||
<Textarea
|
||||
id="configuration"
|
||||
name="configuration"
|
||||
placeholder={defaultMutedFindingsConfig}
|
||||
variant="bordered"
|
||||
value={configText}
|
||||
onChange={(e) => handleConfigChange(e.target.value)}
|
||||
minRows={20}
|
||||
maxRows={20}
|
||||
isInvalid={
|
||||
(!hasUserStartedTyping && !!state?.errors?.configuration) ||
|
||||
!yamlValidation.isValid
|
||||
}
|
||||
errorMessage={
|
||||
(!hasUserStartedTyping && state?.errors?.configuration) ||
|
||||
(!yamlValidation.isValid ? yamlValidation.error : "")
|
||||
}
|
||||
classNames={{
|
||||
input: fontMono.className + " text-sm",
|
||||
base: "min-h-[400px]",
|
||||
errorMessage: "whitespace-pre-wrap",
|
||||
}}
|
||||
/>
|
||||
{yamlValidation.isValid &&
|
||||
configText &&
|
||||
hasUserStartedTyping && (
|
||||
<div className="text-tiny text-success my-1 flex items-center px-1">
|
||||
<span>Valid YAML format</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex w-full justify-end gap-4">
|
||||
{config && (
|
||||
<Button
|
||||
type="button"
|
||||
aria-label="Delete Configuration"
|
||||
variant="outline"
|
||||
size="lg"
|
||||
onClick={() => setShowDeleteConfirmation(true)}
|
||||
disabled={isPending || isDeleting}
|
||||
>
|
||||
<Trash2 className="size-4" />
|
||||
Delete
|
||||
</Button>
|
||||
)}
|
||||
<Button
|
||||
type="submit"
|
||||
size="lg"
|
||||
disabled={
|
||||
isPending || !yamlValidation.isValid || !configText.trim()
|
||||
}
|
||||
>
|
||||
{isPending ? "Saving..." : config ? "Update" : "Save"}
|
||||
</Button>
|
||||
</div>
|
||||
</form>
|
||||
</Card>
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
export { MuteRuleEditForm } from "./mute-rule-edit-form";
|
||||
export { MuteRuleEnabledToggle } from "./mute-rule-enabled-toggle";
|
||||
export { MuteRuleRowActions } from "./mute-rule-row-actions";
|
||||
export { createMuteRulesColumns } from "./mute-rules-columns";
|
||||
export { MuteRulesTable, MuteRulesTableSkeleton } from "./mute-rules-table";
|
||||
export { MuteRulesTableClient } from "./mute-rules-table-client";
|
||||
@@ -1,93 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { Input, Textarea } from "@heroui/input";
|
||||
import { useActionState, useEffect } from "react";
|
||||
|
||||
import { updateMuteRule } from "@/actions/mute-rules";
|
||||
import { MuteRuleActionState, MuteRuleData } from "@/actions/mute-rules/types";
|
||||
import { useToast } from "@/components/ui";
|
||||
import { FormButtons } from "@/components/ui/form";
|
||||
|
||||
interface MuteRuleEditFormProps {
|
||||
muteRule: MuteRuleData;
|
||||
onSuccess: () => void;
|
||||
onCancel: () => void;
|
||||
}
|
||||
|
||||
export function MuteRuleEditForm({
|
||||
muteRule,
|
||||
onSuccess,
|
||||
onCancel,
|
||||
}: MuteRuleEditFormProps) {
|
||||
const { toast } = useToast();
|
||||
|
||||
const [state, formAction, isPending] = useActionState<
|
||||
MuteRuleActionState,
|
||||
FormData
|
||||
>(updateMuteRule, null);
|
||||
|
||||
useEffect(() => {
|
||||
if (state?.success) {
|
||||
toast({
|
||||
title: "Success",
|
||||
description: state.success,
|
||||
});
|
||||
onSuccess();
|
||||
} else if (state?.errors?.general) {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
description: state.errors.general,
|
||||
});
|
||||
}
|
||||
}, [state, toast, onSuccess]);
|
||||
|
||||
return (
|
||||
<form action={formAction} className="flex flex-col gap-4">
|
||||
<input type="hidden" name="id" value={muteRule.id} />
|
||||
|
||||
<Input
|
||||
name="name"
|
||||
label="Name"
|
||||
placeholder="Enter rule name"
|
||||
defaultValue={muteRule.attributes.name}
|
||||
isRequired
|
||||
variant="bordered"
|
||||
isInvalid={!!state?.errors?.name}
|
||||
errorMessage={state?.errors?.name}
|
||||
isDisabled={isPending}
|
||||
/>
|
||||
|
||||
<Textarea
|
||||
name="reason"
|
||||
label="Reason"
|
||||
placeholder="Enter the reason for muting these findings"
|
||||
defaultValue={muteRule.attributes.reason}
|
||||
isRequired
|
||||
variant="bordered"
|
||||
minRows={3}
|
||||
maxRows={6}
|
||||
isInvalid={!!state?.errors?.reason}
|
||||
errorMessage={state?.errors?.reason}
|
||||
isDisabled={isPending}
|
||||
/>
|
||||
|
||||
<div className="text-default-500 text-xs">
|
||||
<p>
|
||||
This rule is applied to{" "}
|
||||
{muteRule.attributes.finding_uids?.length || 0} findings.
|
||||
</p>
|
||||
<p className="mt-1">
|
||||
Note: You cannot modify the findings associated with this rule after
|
||||
creation.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<FormButtons
|
||||
onCancel={onCancel}
|
||||
submitText="Update"
|
||||
isDisabled={isPending}
|
||||
/>
|
||||
</form>
|
||||
);
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { Switch } from "@heroui/switch";
|
||||
import { useState } from "react";
|
||||
|
||||
import { toggleMuteRule } from "@/actions/mute-rules";
|
||||
import { MuteRuleData } from "@/actions/mute-rules/types";
|
||||
import { useToast } from "@/components/ui";
|
||||
|
||||
interface MuteRuleEnabledToggleProps {
|
||||
muteRule: MuteRuleData;
|
||||
}
|
||||
|
||||
export function MuteRuleEnabledToggle({
|
||||
muteRule,
|
||||
}: MuteRuleEnabledToggleProps) {
|
||||
const [isEnabled, setIsEnabled] = useState(muteRule.attributes.enabled);
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const { toast } = useToast();
|
||||
|
||||
const handleToggle = async (value: boolean) => {
|
||||
setIsLoading(true);
|
||||
setIsEnabled(value);
|
||||
|
||||
const result = await toggleMuteRule(muteRule.id, value);
|
||||
|
||||
if (result.error) {
|
||||
// Revert on error
|
||||
setIsEnabled(!value);
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
description: result.error,
|
||||
});
|
||||
} else if (result.success) {
|
||||
toast({
|
||||
title: "Success",
|
||||
description: result.success,
|
||||
});
|
||||
}
|
||||
|
||||
setIsLoading(false);
|
||||
};
|
||||
|
||||
return (
|
||||
<Switch
|
||||
isSelected={isEnabled}
|
||||
onValueChange={handleToggle}
|
||||
isDisabled={isLoading}
|
||||
size="sm"
|
||||
aria-label={`Toggle mute rule ${muteRule.attributes.name}`}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import {
|
||||
Dropdown,
|
||||
DropdownItem,
|
||||
DropdownMenu,
|
||||
DropdownSection,
|
||||
DropdownTrigger,
|
||||
} from "@heroui/dropdown";
|
||||
import { Pencil, Trash2 } from "lucide-react";
|
||||
|
||||
import { MuteRuleData } from "@/actions/mute-rules/types";
|
||||
import { VerticalDotsIcon } from "@/components/icons";
|
||||
import { Button } from "@/components/shadcn";
|
||||
|
||||
interface MuteRuleRowActionsProps {
|
||||
muteRule: MuteRuleData;
|
||||
onEdit: (muteRule: MuteRuleData) => void;
|
||||
onDelete: (muteRule: MuteRuleData) => void;
|
||||
}
|
||||
|
||||
export function MuteRuleRowActions({
|
||||
muteRule,
|
||||
onEdit,
|
||||
onDelete,
|
||||
}: MuteRuleRowActionsProps) {
|
||||
return (
|
||||
<div className="flex items-center justify-center px-2">
|
||||
<Dropdown
|
||||
className="border-border-neutral-secondary bg-bg-neutral-secondary border shadow-xl"
|
||||
placement="bottom"
|
||||
>
|
||||
<DropdownTrigger>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="icon-sm"
|
||||
className="size-7 rounded-full"
|
||||
>
|
||||
<VerticalDotsIcon
|
||||
size={16}
|
||||
className="text-text-neutral-secondary"
|
||||
/>
|
||||
</Button>
|
||||
</DropdownTrigger>
|
||||
<DropdownMenu
|
||||
closeOnSelect
|
||||
aria-label="Mute rule actions"
|
||||
color="default"
|
||||
variant="flat"
|
||||
>
|
||||
<DropdownSection title="Actions">
|
||||
<DropdownItem
|
||||
key="edit"
|
||||
description="Edit rule name and reason"
|
||||
textValue="Edit"
|
||||
startContent={
|
||||
<Pencil className="text-default-500 pointer-events-none size-4 shrink-0" />
|
||||
}
|
||||
onPress={() => onEdit(muteRule)}
|
||||
>
|
||||
Edit
|
||||
</DropdownItem>
|
||||
<DropdownItem
|
||||
key="delete"
|
||||
description="Delete this mute rule"
|
||||
textValue="Delete"
|
||||
className="text-danger"
|
||||
color="danger"
|
||||
classNames={{
|
||||
description: "text-danger",
|
||||
}}
|
||||
startContent={
|
||||
<Trash2 className="pointer-events-none size-4 shrink-0" />
|
||||
}
|
||||
onPress={() => onDelete(muteRule)}
|
||||
>
|
||||
Delete
|
||||
</DropdownItem>
|
||||
</DropdownSection>
|
||||
</DropdownMenu>
|
||||
</Dropdown>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { ColumnDef } from "@tanstack/react-table";
|
||||
|
||||
import { MuteRuleData } from "@/actions/mute-rules/types";
|
||||
import { DateWithTime } from "@/components/ui/entities";
|
||||
import { DataTableColumnHeader } from "@/components/ui/table";
|
||||
|
||||
import { MuteRuleEnabledToggle } from "./mute-rule-enabled-toggle";
|
||||
import { MuteRuleRowActions } from "./mute-rule-row-actions";
|
||||
|
||||
export const createMuteRulesColumns = (
|
||||
onEdit: (muteRule: MuteRuleData) => void,
|
||||
onDelete: (muteRule: MuteRuleData) => void,
|
||||
): ColumnDef<MuteRuleData>[] => [
|
||||
{
|
||||
accessorKey: "name",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Name" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const name = row.original.attributes.name;
|
||||
return (
|
||||
<div className="max-w-[200px]">
|
||||
<p className="truncate text-sm font-medium">{name}</p>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "reason",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Reason" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const reason = row.original.attributes.reason;
|
||||
return (
|
||||
<div className="max-w-[300px]">
|
||||
<p className="truncate text-sm text-slate-600 dark:text-slate-400">
|
||||
{reason}
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
accessorKey: "finding_count",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Findings" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const count = row.original.attributes.finding_uids?.length || 0;
|
||||
return (
|
||||
<div className="w-[80px]">
|
||||
<span className="rounded-full bg-slate-100 px-2 py-1 text-xs font-medium dark:bg-slate-800">
|
||||
{count}
|
||||
</span>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
accessorKey: "inserted_at",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader
|
||||
column={column}
|
||||
title="Created"
|
||||
param="inserted_at"
|
||||
/>
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const insertedAt = row.original.attributes.inserted_at;
|
||||
return (
|
||||
<div className="w-[120px]">
|
||||
<DateWithTime dateTime={insertedAt} />
|
||||
</div>
|
||||
);
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "enabled",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Enabled" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
return <MuteRuleEnabledToggle muteRule={row.original} />;
|
||||
},
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
id: "actions",
|
||||
header: () => (
|
||||
<div className="flex items-center justify-center px-2">
|
||||
<span className="text-sm font-semibold">Actions</span>
|
||||
</div>
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
return (
|
||||
<MuteRuleRowActions
|
||||
muteRule={row.original}
|
||||
onEdit={onEdit}
|
||||
onDelete={onDelete}
|
||||
/>
|
||||
);
|
||||
},
|
||||
enableSorting: false,
|
||||
},
|
||||
];
|
||||
@@ -1,145 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useDisclosure } from "@heroui/use-disclosure";
|
||||
import { Trash2 } from "lucide-react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useActionState, useEffect, useRef, useState } from "react";
|
||||
|
||||
import { deleteMuteRule } from "@/actions/mute-rules";
|
||||
import { MuteRuleData } from "@/actions/mute-rules/types";
|
||||
import { Button } from "@/components/shadcn";
|
||||
import { useToast } from "@/components/ui";
|
||||
import { CustomAlertModal } from "@/components/ui/custom";
|
||||
import { DataTable } from "@/components/ui/table";
|
||||
import { MetaDataProps } from "@/types";
|
||||
|
||||
import { MuteRuleEditForm } from "./mute-rule-edit-form";
|
||||
import { createMuteRulesColumns } from "./mute-rules-columns";
|
||||
|
||||
interface MuteRulesTableClientProps {
|
||||
muteRules: MuteRuleData[];
|
||||
metadata?: MetaDataProps;
|
||||
}
|
||||
|
||||
export function MuteRulesTableClient({
|
||||
muteRules,
|
||||
metadata,
|
||||
}: MuteRulesTableClientProps) {
|
||||
const router = useRouter();
|
||||
const { toast } = useToast();
|
||||
const [selectedMuteRule, setSelectedMuteRule] = useState<MuteRuleData | null>(
|
||||
null,
|
||||
);
|
||||
|
||||
const editModal = useDisclosure();
|
||||
const deleteModal = useDisclosure();
|
||||
const deleteModalRef = useRef(deleteModal);
|
||||
deleteModalRef.current = deleteModal;
|
||||
|
||||
const [deleteState, deleteAction, isDeleting] = useActionState(
|
||||
deleteMuteRule,
|
||||
null,
|
||||
);
|
||||
|
||||
// Handle delete state changes
|
||||
useEffect(() => {
|
||||
if (deleteState?.success) {
|
||||
toast({
|
||||
title: "Success",
|
||||
description: deleteState.success,
|
||||
});
|
||||
deleteModalRef.current.onClose();
|
||||
router.refresh();
|
||||
} else if (deleteState?.errors?.general) {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
description: deleteState.errors.general,
|
||||
});
|
||||
}
|
||||
}, [deleteState, toast, router]);
|
||||
|
||||
const handleEditClick = (muteRule: MuteRuleData) => {
|
||||
setSelectedMuteRule(muteRule);
|
||||
editModal.onOpen();
|
||||
};
|
||||
|
||||
const handleDeleteClick = (muteRule: MuteRuleData) => {
|
||||
setSelectedMuteRule(muteRule);
|
||||
deleteModal.onOpen();
|
||||
};
|
||||
|
||||
const handleEditSuccess = () => {
|
||||
editModal.onClose();
|
||||
router.refresh();
|
||||
};
|
||||
|
||||
const columns = createMuteRulesColumns(handleEditClick, handleDeleteClick);
|
||||
|
||||
return (
|
||||
<>
|
||||
<DataTable columns={columns} data={muteRules} metadata={metadata} />
|
||||
|
||||
{/* Edit Modal */}
|
||||
{selectedMuteRule && (
|
||||
<CustomAlertModal
|
||||
isOpen={editModal.isOpen}
|
||||
onOpenChange={editModal.onOpenChange}
|
||||
title="Edit Mute Rule"
|
||||
size="lg"
|
||||
>
|
||||
<MuteRuleEditForm
|
||||
muteRule={selectedMuteRule}
|
||||
onSuccess={handleEditSuccess}
|
||||
onCancel={editModal.onClose}
|
||||
/>
|
||||
</CustomAlertModal>
|
||||
)}
|
||||
|
||||
{/* Delete Confirmation Modal */}
|
||||
{selectedMuteRule && (
|
||||
<CustomAlertModal
|
||||
isOpen={deleteModal.isOpen}
|
||||
onOpenChange={deleteModal.onOpenChange}
|
||||
title="Delete Mute Rule"
|
||||
size="md"
|
||||
>
|
||||
<div className="flex flex-col gap-4">
|
||||
<p className="text-default-600 text-sm">
|
||||
Are you sure you want to delete the mute rule "
|
||||
{selectedMuteRule.attributes.name}"? This action cannot be
|
||||
undone.
|
||||
</p>
|
||||
<p className="text-default-500 text-xs">
|
||||
Note: This will not unmute the findings that were muted by this
|
||||
rule.
|
||||
</p>
|
||||
<div className="flex w-full justify-end gap-4">
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="lg"
|
||||
onClick={deleteModal.onClose}
|
||||
disabled={isDeleting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<form action={deleteAction}>
|
||||
<input type="hidden" name="id" value={selectedMuteRule.id} />
|
||||
<Button
|
||||
type="submit"
|
||||
variant="destructive"
|
||||
size="lg"
|
||||
disabled={isDeleting}
|
||||
>
|
||||
<Trash2 className="size-4" />
|
||||
{isDeleting ? "Deleting..." : "Delete"}
|
||||
</Button>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</CustomAlertModal>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
import { Info } from "lucide-react";
|
||||
|
||||
import { getMuteRules } from "@/actions/mute-rules";
|
||||
import { Card, Skeleton } from "@/components/shadcn";
|
||||
import { SearchParamsProps } from "@/types/components";
|
||||
|
||||
import { MuteRulesTableClient } from "./mute-rules-table-client";
|
||||
|
||||
interface MuteRulesTableProps {
|
||||
searchParams: SearchParamsProps;
|
||||
}
|
||||
|
||||
export async function MuteRulesTable({ searchParams }: MuteRulesTableProps) {
|
||||
const page = parseInt(searchParams.page?.toString() || "1", 10);
|
||||
const pageSize = parseInt(searchParams.pageSize?.toString() || "10", 10);
|
||||
const sort = searchParams.sort?.toString() || "-inserted_at";
|
||||
|
||||
const muteRulesData = await getMuteRules({
|
||||
page,
|
||||
pageSize,
|
||||
sort,
|
||||
});
|
||||
|
||||
const muteRules = muteRulesData?.data || [];
|
||||
|
||||
if (muteRules.length === 0) {
|
||||
return (
|
||||
<Card variant="base" className="p-8">
|
||||
<div className="flex flex-col items-center justify-center gap-4 text-center">
|
||||
<div className="rounded-full bg-slate-100 p-4 dark:bg-slate-800">
|
||||
<Info className="size-8 text-slate-500" />
|
||||
</div>
|
||||
<div>
|
||||
<h3 className="text-lg font-medium text-slate-900 dark:text-white">
|
||||
No mute rules yet
|
||||
</h3>
|
||||
<p className="mt-1 text-sm text-slate-500 dark:text-slate-400">
|
||||
Mute rules are created when you mute findings from the Findings
|
||||
page. Select findings and click "Mute" to create your
|
||||
first rule.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<Card variant="base" className="p-6">
|
||||
<div className="mb-6">
|
||||
<h3 className="text-default-700 mb-2 text-lg font-semibold">
|
||||
Simple Mutelist Rules
|
||||
</h3>
|
||||
<ul className="text-default-600 list-disc pl-5 text-sm">
|
||||
<li>
|
||||
<strong>
|
||||
These rules take effect immediately on existing findings.
|
||||
</strong>
|
||||
</li>
|
||||
<li>
|
||||
Create rules by selecting findings from the Findings page and
|
||||
clicking "Mute".
|
||||
</li>
|
||||
<li>Toggle rules on/off to enable or disable muting.</li>
|
||||
</ul>
|
||||
</div>
|
||||
<MuteRulesTableClient
|
||||
muteRules={muteRules}
|
||||
metadata={
|
||||
muteRulesData?.meta
|
||||
? { ...muteRulesData.meta, version: "" }
|
||||
: undefined
|
||||
}
|
||||
/>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
|
||||
export function MuteRulesTableSkeleton() {
|
||||
return (
|
||||
<div className="flex flex-col gap-4">
|
||||
<div className="rounded-lg border border-slate-200 dark:border-slate-800">
|
||||
<div className="border-b border-slate-200 p-4 dark:border-slate-800">
|
||||
<div className="flex gap-8">
|
||||
<Skeleton className="h-4 w-20" />
|
||||
<Skeleton className="h-4 w-32" />
|
||||
<Skeleton className="h-4 w-16" />
|
||||
<Skeleton className="h-4 w-24" />
|
||||
<Skeleton className="h-4 w-16" />
|
||||
<Skeleton className="h-4 w-16" />
|
||||
</div>
|
||||
</div>
|
||||
{[...Array(5)].map((_, i) => (
|
||||
<div
|
||||
key={i}
|
||||
className="flex items-center gap-8 border-b border-slate-200 p-4 last:border-0 dark:border-slate-800"
|
||||
>
|
||||
<Skeleton className="h-4 w-24" />
|
||||
<Skeleton className="h-4 w-40" />
|
||||
<Skeleton className="h-4 w-12" />
|
||||
<Skeleton className="h-4 w-28" />
|
||||
<Skeleton className="h-5 w-10" />
|
||||
<Skeleton className="size-8 rounded-full" />
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { List, Settings } from "lucide-react";
|
||||
import { ReactNode } from "react";
|
||||
|
||||
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/shadcn";
|
||||
|
||||
import { AdvancedMutelistForm } from "./_components/advanced-mutelist-form";
|
||||
|
||||
interface MutelistTabsProps {
|
||||
simpleContent: ReactNode;
|
||||
}
|
||||
|
||||
export function MutelistTabs({ simpleContent }: MutelistTabsProps) {
|
||||
return (
|
||||
<Tabs defaultValue="simple" className="w-full">
|
||||
<TabsList className="mb-6">
|
||||
<TabsTrigger value="simple" className="gap-2">
|
||||
<List className="size-4" />
|
||||
Simple
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="advanced" className="gap-2">
|
||||
<Settings className="size-4" />
|
||||
Advanced
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
|
||||
<TabsContent value="simple">{simpleContent}</TabsContent>
|
||||
|
||||
<TabsContent value="advanced">
|
||||
<AdvancedMutelistForm />
|
||||
</TabsContent>
|
||||
</Tabs>
|
||||
);
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
import { Suspense } from "react";
|
||||
|
||||
import { ContentLayout } from "@/components/ui";
|
||||
import { SearchParamsProps } from "@/types/components";
|
||||
|
||||
import { MuteRulesTable, MuteRulesTableSkeleton } from "./_components/simple";
|
||||
import { MutelistTabs } from "./mutelist-tabs";
|
||||
|
||||
export default async function MutelistPage({
|
||||
searchParams,
|
||||
}: {
|
||||
searchParams: Promise<SearchParamsProps>;
|
||||
}) {
|
||||
const resolvedSearchParams = await searchParams;
|
||||
const searchParamsKey = JSON.stringify(resolvedSearchParams);
|
||||
|
||||
return (
|
||||
<ContentLayout title="Mutelist" icon="lucide:volume-x">
|
||||
<MutelistTabs
|
||||
simpleContent={
|
||||
<Suspense key={searchParamsKey} fallback={<MuteRulesTableSkeleton />}>
|
||||
<MuteRulesTable searchParams={resolvedSearchParams} />
|
||||
</Suspense>
|
||||
}
|
||||
/>
|
||||
</ContentLayout>
|
||||
);
|
||||
}
|
||||
@@ -1,21 +1,9 @@
|
||||
import { toUIMessageStream } from "@ai-sdk/langchain";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { createUIMessageStreamResponse, UIMessage } from "ai";
|
||||
|
||||
import { getTenantConfig } from "@/actions/lighthouse/lighthouse";
|
||||
import { auth } from "@/auth.config";
|
||||
import { getErrorMessage } from "@/lib/helper";
|
||||
import {
|
||||
CHAIN_OF_THOUGHT_ACTIONS,
|
||||
createTextDeltaEvent,
|
||||
createTextEndEvent,
|
||||
createTextStartEvent,
|
||||
ERROR_PREFIX,
|
||||
handleChatModelEndEvent,
|
||||
handleChatModelStreamEvent,
|
||||
handleToolEvent,
|
||||
STREAM_MESSAGE_ID,
|
||||
} from "@/lib/lighthouse/analyst-stream";
|
||||
import { authContextStorage } from "@/lib/lighthouse/auth-context";
|
||||
import { getCurrentDataSection } from "@/lib/lighthouse/data";
|
||||
import { convertVercelMessageToLangChainMessage } from "@/lib/lighthouse/utils";
|
||||
import {
|
||||
@@ -40,144 +28,116 @@ export async function POST(req: Request) {
|
||||
return Response.json({ error: "No messages provided" }, { status: 400 });
|
||||
}
|
||||
|
||||
const session = await auth();
|
||||
if (!session?.accessToken) {
|
||||
return Response.json({ error: "Unauthorized" }, { status: 401 });
|
||||
// Create a new array for processed messages
|
||||
const processedMessages = [...messages];
|
||||
|
||||
// Get AI configuration to access business context
|
||||
const tenantConfigResult = await getTenantConfig();
|
||||
const businessContext =
|
||||
tenantConfigResult?.data?.attributes?.business_context;
|
||||
|
||||
// Get current user data
|
||||
const currentData = await getCurrentDataSection();
|
||||
|
||||
// Add context messages at the beginning
|
||||
const contextMessages: UIMessage[] = [];
|
||||
|
||||
// Add business context if available
|
||||
if (businessContext) {
|
||||
contextMessages.push({
|
||||
id: "business-context",
|
||||
role: "assistant",
|
||||
parts: [
|
||||
{
|
||||
type: "text",
|
||||
text: `Business Context Information:\n${businessContext}`,
|
||||
},
|
||||
],
|
||||
});
|
||||
}
|
||||
|
||||
const accessToken = session.accessToken;
|
||||
|
||||
return await authContextStorage.run(accessToken, async () => {
|
||||
// Get AI configuration to access business context
|
||||
const tenantConfigResult = await getTenantConfig();
|
||||
const businessContext =
|
||||
tenantConfigResult?.data?.attributes?.business_context;
|
||||
|
||||
// Get current user data
|
||||
const currentData = await getCurrentDataSection();
|
||||
|
||||
// Pass context to workflow instead of injecting as assistant messages
|
||||
const runtimeConfig: RuntimeConfig = {
|
||||
model,
|
||||
provider,
|
||||
businessContext,
|
||||
currentData,
|
||||
};
|
||||
|
||||
const app = await initLighthouseWorkflow(runtimeConfig);
|
||||
|
||||
// Use streamEvents to get token-by-token streaming + tool events
|
||||
const agentStream = app.streamEvents(
|
||||
{
|
||||
messages: messages
|
||||
.filter(
|
||||
(message: UIMessage) =>
|
||||
message.role === "user" || message.role === "assistant",
|
||||
)
|
||||
.map(convertVercelMessageToLangChainMessage),
|
||||
},
|
||||
{
|
||||
version: "v2",
|
||||
},
|
||||
);
|
||||
|
||||
// Custom stream transformer that handles both text and tool events
|
||||
const stream = new ReadableStream({
|
||||
async start(controller) {
|
||||
let hasStarted = false;
|
||||
|
||||
try {
|
||||
// Emit text-start at the beginning
|
||||
controller.enqueue(createTextStartEvent(STREAM_MESSAGE_ID));
|
||||
|
||||
for await (const streamEvent of agentStream) {
|
||||
const { event, data, tags, name } = streamEvent;
|
||||
|
||||
// Stream model tokens (smooth text streaming)
|
||||
if (event === "on_chat_model_stream") {
|
||||
const wasHandled = handleChatModelStreamEvent(
|
||||
controller,
|
||||
data,
|
||||
tags,
|
||||
);
|
||||
if (wasHandled) {
|
||||
hasStarted = true;
|
||||
}
|
||||
}
|
||||
// Model finished - check for tool calls
|
||||
else if (event === "on_chat_model_end") {
|
||||
handleChatModelEndEvent(controller, data);
|
||||
}
|
||||
// Tool execution started
|
||||
else if (event === "on_tool_start") {
|
||||
handleToolEvent(
|
||||
controller,
|
||||
CHAIN_OF_THOUGHT_ACTIONS.START,
|
||||
name,
|
||||
data?.input,
|
||||
);
|
||||
}
|
||||
// Tool execution completed
|
||||
else if (event === "on_tool_end") {
|
||||
handleToolEvent(
|
||||
controller,
|
||||
CHAIN_OF_THOUGHT_ACTIONS.COMPLETE,
|
||||
name,
|
||||
data?.input,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Emit text-end at the end
|
||||
controller.enqueue(createTextEndEvent(STREAM_MESSAGE_ID));
|
||||
|
||||
controller.close();
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
|
||||
// Capture stream processing errors
|
||||
Sentry.captureException(error, {
|
||||
tags: {
|
||||
api_route: "lighthouse_analyst",
|
||||
error_type: SentryErrorType.STREAM_PROCESSING,
|
||||
error_source: SentryErrorSource.API_ROUTE,
|
||||
},
|
||||
level: "error",
|
||||
contexts: {
|
||||
lighthouse: {
|
||||
event_type: "stream_error",
|
||||
message_count: messages.length,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Emit error as text with consistent prefix
|
||||
// Use consistent ERROR_PREFIX for both scenarios so client can detect errors
|
||||
if (hasStarted) {
|
||||
controller.enqueue(
|
||||
createTextDeltaEvent(
|
||||
STREAM_MESSAGE_ID,
|
||||
`\n\n${ERROR_PREFIX} ${errorMessage}`,
|
||||
),
|
||||
);
|
||||
} else {
|
||||
controller.enqueue(
|
||||
createTextDeltaEvent(
|
||||
STREAM_MESSAGE_ID,
|
||||
`${ERROR_PREFIX} ${errorMessage}`,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
controller.enqueue(createTextEndEvent(STREAM_MESSAGE_ID));
|
||||
|
||||
controller.close();
|
||||
}
|
||||
},
|
||||
// Add current data if available
|
||||
if (currentData) {
|
||||
contextMessages.push({
|
||||
id: "current-data",
|
||||
role: "assistant",
|
||||
parts: [
|
||||
{
|
||||
type: "text",
|
||||
text: currentData,
|
||||
},
|
||||
],
|
||||
});
|
||||
}
|
||||
|
||||
return createUIMessageStreamResponse({ stream });
|
||||
// Insert all context messages at the beginning
|
||||
processedMessages.unshift(...contextMessages);
|
||||
|
||||
// Prepare runtime config with client-provided model
|
||||
const runtimeConfig: RuntimeConfig = {
|
||||
model,
|
||||
provider,
|
||||
};
|
||||
|
||||
const app = await initLighthouseWorkflow(runtimeConfig);
|
||||
|
||||
const agentStream = app.streamEvents(
|
||||
{
|
||||
messages: processedMessages
|
||||
.filter(
|
||||
(message: UIMessage) =>
|
||||
message.role === "user" || message.role === "assistant",
|
||||
)
|
||||
.map(convertVercelMessageToLangChainMessage),
|
||||
},
|
||||
{
|
||||
streamMode: ["values", "messages", "custom"],
|
||||
version: "v2",
|
||||
},
|
||||
);
|
||||
|
||||
const stream = new ReadableStream({
|
||||
async start(controller) {
|
||||
try {
|
||||
for await (const streamEvent of agentStream) {
|
||||
const { event, data, tags } = streamEvent;
|
||||
if (event === "on_chat_model_stream") {
|
||||
if (data.chunk.content && !!tags && tags.includes("supervisor")) {
|
||||
// Pass the raw LangChain stream event - toUIMessageStream will handle conversion
|
||||
controller.enqueue(streamEvent);
|
||||
}
|
||||
}
|
||||
}
|
||||
controller.close();
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
|
||||
// Capture stream processing errors
|
||||
Sentry.captureException(error, {
|
||||
tags: {
|
||||
api_route: "lighthouse_analyst",
|
||||
error_type: SentryErrorType.STREAM_PROCESSING,
|
||||
error_source: SentryErrorSource.API_ROUTE,
|
||||
},
|
||||
level: "error",
|
||||
contexts: {
|
||||
lighthouse: {
|
||||
event_type: "stream_error",
|
||||
message_count: processedMessages.length,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
controller.enqueue(`[LIGHTHOUSE_ANALYST_ERROR]: ${errorMessage}`);
|
||||
controller.close();
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
// Convert LangChain stream to UI message stream and return as SSE response
|
||||
return createUIMessageStreamResponse({
|
||||
stream: toUIMessageStream(stream),
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Error in POST request:", error);
|
||||
@@ -200,6 +160,9 @@ export async function POST(req: Request) {
|
||||
},
|
||||
});
|
||||
|
||||
return Response.json({ error: getErrorMessage(error) }, { status: 500 });
|
||||
return Response.json(
|
||||
{ error: await getErrorMessage(error) },
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
"cssVariables": true,
|
||||
"prefix": ""
|
||||
},
|
||||
"iconLibrary": "lucide",
|
||||
"aliases": {
|
||||
"components": "@/components",
|
||||
"utils": "@/lib/utils",
|
||||
@@ -18,7 +17,5 @@
|
||||
"lib": "@/lib",
|
||||
"hooks": "@/hooks"
|
||||
},
|
||||
"registries": {
|
||||
"@ai-elements": "https://registry.ai-sdk.dev/{name}.json"
|
||||
}
|
||||
"iconLibrary": "lucide"
|
||||
}
|
||||
|
||||
@@ -1,232 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useControllableState } from "@radix-ui/react-use-controllable-state";
|
||||
import {
|
||||
BrainIcon,
|
||||
ChevronDownIcon,
|
||||
DotIcon,
|
||||
type LucideIcon,
|
||||
} from "lucide-react";
|
||||
import type { ComponentProps, ReactNode } from "react";
|
||||
import { createContext, memo, useContext, useMemo } from "react";
|
||||
|
||||
import { Badge } from "@/components/shadcn/badge/badge";
|
||||
import {
|
||||
Collapsible,
|
||||
CollapsibleContent,
|
||||
CollapsibleTrigger,
|
||||
} from "@/components/shadcn/collapsible";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
type ChainOfThoughtContextValue = {
|
||||
isOpen: boolean;
|
||||
setIsOpen: (open: boolean) => void;
|
||||
};
|
||||
|
||||
const ChainOfThoughtContext = createContext<ChainOfThoughtContextValue | null>(
|
||||
null,
|
||||
);
|
||||
|
||||
const useChainOfThought = () => {
|
||||
const context = useContext(ChainOfThoughtContext);
|
||||
if (!context) {
|
||||
throw new Error(
|
||||
"ChainOfThought components must be used within ChainOfThought",
|
||||
);
|
||||
}
|
||||
return context;
|
||||
};
|
||||
|
||||
export type ChainOfThoughtProps = ComponentProps<"div"> & {
|
||||
open?: boolean;
|
||||
defaultOpen?: boolean;
|
||||
onOpenChange?: (open: boolean) => void;
|
||||
};
|
||||
|
||||
export const ChainOfThought = memo(
|
||||
({
|
||||
className,
|
||||
open,
|
||||
defaultOpen = false,
|
||||
onOpenChange,
|
||||
children,
|
||||
...props
|
||||
}: ChainOfThoughtProps) => {
|
||||
const [isOpen, setIsOpen] = useControllableState({
|
||||
prop: open,
|
||||
defaultProp: defaultOpen,
|
||||
onChange: onOpenChange,
|
||||
});
|
||||
|
||||
const chainOfThoughtContext = useMemo(
|
||||
() => ({ isOpen, setIsOpen }),
|
||||
[isOpen, setIsOpen],
|
||||
);
|
||||
|
||||
return (
|
||||
<ChainOfThoughtContext.Provider value={chainOfThoughtContext}>
|
||||
<div
|
||||
className={cn("not-prose max-w-prose space-y-4", className)}
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
</div>
|
||||
</ChainOfThoughtContext.Provider>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
export type ChainOfThoughtHeaderProps = ComponentProps<
|
||||
typeof CollapsibleTrigger
|
||||
>;
|
||||
|
||||
export const ChainOfThoughtHeader = memo(
|
||||
({ className, children, ...props }: ChainOfThoughtHeaderProps) => {
|
||||
const { isOpen, setIsOpen } = useChainOfThought();
|
||||
|
||||
return (
|
||||
<Collapsible onOpenChange={setIsOpen} open={isOpen}>
|
||||
<CollapsibleTrigger
|
||||
className={cn(
|
||||
"text-muted-foreground hover:text-foreground flex w-full items-center gap-2 text-sm transition-colors",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
<BrainIcon className="size-4" />
|
||||
<span className="flex-1 text-left">
|
||||
{children ?? "Chain of Thought"}
|
||||
</span>
|
||||
<ChevronDownIcon
|
||||
className={cn(
|
||||
"size-4 transition-transform",
|
||||
isOpen ? "rotate-180" : "rotate-0",
|
||||
)}
|
||||
/>
|
||||
</CollapsibleTrigger>
|
||||
</Collapsible>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
export type ChainOfThoughtStepProps = ComponentProps<"div"> & {
|
||||
icon?: LucideIcon;
|
||||
label: ReactNode;
|
||||
description?: ReactNode;
|
||||
status?: "complete" | "active" | "pending";
|
||||
};
|
||||
|
||||
export const ChainOfThoughtStep = memo(
|
||||
({
|
||||
className,
|
||||
icon: Icon = DotIcon,
|
||||
label,
|
||||
description,
|
||||
status = "complete",
|
||||
children,
|
||||
...props
|
||||
}: ChainOfThoughtStepProps) => {
|
||||
const statusStyles = {
|
||||
complete: "text-muted-foreground",
|
||||
active: "text-foreground",
|
||||
pending: "text-muted-foreground/50",
|
||||
};
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"flex gap-2 text-sm",
|
||||
statusStyles[status],
|
||||
"fade-in-0 slide-in-from-top-2 animate-in",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
<div className="relative mt-0.5">
|
||||
<Icon className="size-4" />
|
||||
<div className="bg-border absolute top-7 bottom-0 left-1/2 -mx-px w-px" />
|
||||
</div>
|
||||
<div className="flex-1 space-y-2 overflow-hidden">
|
||||
<div>{label}</div>
|
||||
{description && (
|
||||
<div className="text-muted-foreground text-xs">{description}</div>
|
||||
)}
|
||||
{children}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
export type ChainOfThoughtSearchResultsProps = ComponentProps<"div">;
|
||||
|
||||
export const ChainOfThoughtSearchResults = memo(
|
||||
({ className, ...props }: ChainOfThoughtSearchResultsProps) => (
|
||||
<div
|
||||
className={cn("flex flex-wrap items-center gap-2", className)}
|
||||
{...props}
|
||||
/>
|
||||
),
|
||||
);
|
||||
|
||||
export type ChainOfThoughtSearchResultProps = ComponentProps<typeof Badge>;
|
||||
|
||||
export const ChainOfThoughtSearchResult = memo(
|
||||
({ className, children, ...props }: ChainOfThoughtSearchResultProps) => (
|
||||
<Badge
|
||||
className={cn("gap-1 px-2 py-0.5 text-xs font-normal", className)}
|
||||
variant="secondary"
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
</Badge>
|
||||
),
|
||||
);
|
||||
|
||||
export type ChainOfThoughtContentProps = ComponentProps<
|
||||
typeof CollapsibleContent
|
||||
>;
|
||||
|
||||
export const ChainOfThoughtContent = memo(
|
||||
({ className, children, ...props }: ChainOfThoughtContentProps) => {
|
||||
const { isOpen } = useChainOfThought();
|
||||
|
||||
return (
|
||||
<Collapsible open={isOpen}>
|
||||
<CollapsibleContent
|
||||
className={cn(
|
||||
"mt-2 space-y-3",
|
||||
"data-[state=closed]:fade-out-0 data-[state=closed]:slide-out-to-top-2 data-[state=open]:slide-in-from-top-2 text-popover-foreground data-[state=closed]:animate-out data-[state=open]:animate-in outline-none",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
</CollapsibleContent>
|
||||
</Collapsible>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
export type ChainOfThoughtImageProps = ComponentProps<"div"> & {
|
||||
caption?: string;
|
||||
};
|
||||
|
||||
export const ChainOfThoughtImage = memo(
|
||||
({ className, children, caption, ...props }: ChainOfThoughtImageProps) => (
|
||||
<div className={cn("mt-2 space-y-2", className)} {...props}>
|
||||
<div className="bg-muted relative flex max-h-[22rem] items-center justify-center overflow-hidden rounded-lg p-3">
|
||||
{children}
|
||||
</div>
|
||||
{caption && <p className="text-muted-foreground text-xs">{caption}</p>}
|
||||
</div>
|
||||
),
|
||||
);
|
||||
|
||||
ChainOfThought.displayName = "ChainOfThought";
|
||||
ChainOfThoughtHeader.displayName = "ChainOfThoughtHeader";
|
||||
ChainOfThoughtStep.displayName = "ChainOfThoughtStep";
|
||||
ChainOfThoughtSearchResults.displayName = "ChainOfThoughtSearchResults";
|
||||
ChainOfThoughtSearchResult.displayName = "ChainOfThoughtSearchResult";
|
||||
ChainOfThoughtContent.displayName = "ChainOfThoughtContent";
|
||||
ChainOfThoughtImage.displayName = "ChainOfThoughtImage";
|
||||
@@ -1,101 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { ArrowDownIcon } from "lucide-react";
|
||||
import type { ComponentProps, ReactNode } from "react";
|
||||
import { StickToBottom, useStickToBottomContext } from "use-stick-to-bottom";
|
||||
|
||||
import { Button } from "@/components/shadcn/button/button";
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
export type ConversationProps = ComponentProps<typeof StickToBottom>;
|
||||
|
||||
export const Conversation = ({ className, ...props }: ConversationProps) => (
|
||||
<StickToBottom
|
||||
className={cn("relative flex-1 overflow-y-hidden", className)}
|
||||
initial="smooth"
|
||||
resize="smooth"
|
||||
role="log"
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
|
||||
export type ConversationContentProps = ComponentProps<
|
||||
typeof StickToBottom.Content
|
||||
>;
|
||||
|
||||
export const ConversationContent = ({
|
||||
className,
|
||||
...props
|
||||
}: ConversationContentProps) => (
|
||||
<StickToBottom.Content
|
||||
className={cn("flex flex-col gap-8 p-4", className)}
|
||||
{...props}
|
||||
/>
|
||||
);
|
||||
|
||||
export type ConversationEmptyStateProps = ComponentProps<"div"> & {
|
||||
title?: string;
|
||||
description?: string;
|
||||
icon?: ReactNode;
|
||||
};
|
||||
|
||||
export const ConversationEmptyState = ({
|
||||
className,
|
||||
title = "No messages yet",
|
||||
description = "Start a conversation to see messages here",
|
||||
icon,
|
||||
children,
|
||||
...props
|
||||
}: ConversationEmptyStateProps) => (
|
||||
<div
|
||||
className={cn(
|
||||
"flex size-full flex-col items-center justify-center gap-3 p-8 text-center",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
{children ?? (
|
||||
<>
|
||||
{icon && <div className="text-muted-foreground">{icon}</div>}
|
||||
<div className="space-y-1">
|
||||
<h3 className="text-sm font-medium">{title}</h3>
|
||||
{description && (
|
||||
<p className="text-muted-foreground text-sm">{description}</p>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
|
||||
export type ConversationScrollButtonProps = ComponentProps<typeof Button>;
|
||||
|
||||
export const ConversationScrollButton = ({
|
||||
className,
|
||||
...props
|
||||
}: ConversationScrollButtonProps) => {
|
||||
const { isAtBottom, scrollToBottom } = useStickToBottomContext();
|
||||
|
||||
const handleScrollToBottom = () => {
|
||||
scrollToBottom();
|
||||
};
|
||||
|
||||
return (
|
||||
!isAtBottom && (
|
||||
<Button
|
||||
aria-label="Scroll to bottom"
|
||||
className={cn(
|
||||
"absolute bottom-4 left-[50%] translate-x-[-50%] rounded-full",
|
||||
className,
|
||||
)}
|
||||
onClick={handleScrollToBottom}
|
||||
size="icon"
|
||||
type="button"
|
||||
variant="outline"
|
||||
{...props}
|
||||
>
|
||||
<ArrowDownIcon className="size-4" />
|
||||
</Button>
|
||||
)
|
||||
);
|
||||
};
|
||||
@@ -5,7 +5,7 @@ import { useEffect, useRef, useState } from "react";
|
||||
|
||||
import { getFindings } from "@/actions/findings/findings";
|
||||
import {
|
||||
getColumnFindings,
|
||||
ColumnFindings,
|
||||
SkeletonTableFindings,
|
||||
} from "@/components/findings/table";
|
||||
import { Accordion } from "@/components/ui/accordion/Accordion";
|
||||
@@ -159,12 +159,8 @@ export const ClientAccordionContent = ({
|
||||
<h4 className="mb-2 text-sm font-medium">Findings</h4>
|
||||
|
||||
<DataTable
|
||||
// Remove select and updated_at columns for compliance view
|
||||
columns={getColumnFindings({}, 0).filter(
|
||||
(col) =>
|
||||
col.id !== "select" &&
|
||||
!("accessorKey" in col && col.accessorKey === "updated_at"),
|
||||
)}
|
||||
// Remove the updated_at column as compliance is for the last scan
|
||||
columns={ColumnFindings.filter((_, index) => index !== 7)}
|
||||
data={expandedFindings || []}
|
||||
metadata={findings?.meta}
|
||||
disableScroll={true}
|
||||
|
||||
@@ -1,63 +1,44 @@
|
||||
"use client";
|
||||
|
||||
import { usePathname, useRouter, useSearchParams } from "next/navigation";
|
||||
import { Checkbox } from "@heroui/checkbox";
|
||||
import { useSearchParams } from "next/navigation";
|
||||
import { useState } from "react";
|
||||
|
||||
import { Checkbox } from "@/components/shadcn";
|
||||
|
||||
// Constants for muted filter URL values
|
||||
const MUTED_FILTER_VALUES = {
|
||||
EXCLUDE: "false",
|
||||
INCLUDE: "include",
|
||||
} as const;
|
||||
import { useUrlFilters } from "@/hooks/use-url-filters";
|
||||
|
||||
export const CustomCheckboxMutedFindings = () => {
|
||||
const router = useRouter();
|
||||
const pathname = usePathname();
|
||||
const { updateFilter, clearFilter } = useUrlFilters();
|
||||
const searchParams = useSearchParams();
|
||||
const [excludeMuted, setExcludeMuted] = useState(
|
||||
searchParams.get("filter[muted]") === "false",
|
||||
);
|
||||
|
||||
// Get the current muted filter value from URL
|
||||
// Middleware ensures filter[muted] is always present when navigating to /findings
|
||||
const mutedFilterValue = searchParams.get("filter[muted]");
|
||||
const handleMutedChange = (value: boolean) => {
|
||||
setExcludeMuted(value);
|
||||
|
||||
// URL states:
|
||||
// - filter[muted]=false → Exclude muted (checkbox UNCHECKED)
|
||||
// - filter[muted]=include → Include muted (checkbox CHECKED)
|
||||
const includeMuted = mutedFilterValue === MUTED_FILTER_VALUES.INCLUDE;
|
||||
|
||||
const handleMutedChange = (checked: boolean | "indeterminate") => {
|
||||
const isChecked = checked === true;
|
||||
const params = new URLSearchParams(searchParams.toString());
|
||||
|
||||
if (isChecked) {
|
||||
// Include muted: set special value (API will ignore invalid value and show all)
|
||||
params.set("filter[muted]", MUTED_FILTER_VALUES.INCLUDE);
|
||||
// Only URL update if value is false else remove filter
|
||||
if (value) {
|
||||
updateFilter("muted", "false");
|
||||
} else {
|
||||
// Exclude muted: apply filter to show only non-muted
|
||||
params.set("filter[muted]", MUTED_FILTER_VALUES.EXCLUDE);
|
||||
clearFilter("muted");
|
||||
}
|
||||
|
||||
// Reset to page 1 when changing filter
|
||||
if (params.has("page")) {
|
||||
params.set("page", "1");
|
||||
}
|
||||
|
||||
router.push(`${pathname}?${params.toString()}`, { scroll: false });
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex h-full items-center gap-2 text-nowrap">
|
||||
<div className="flex h-full text-nowrap">
|
||||
<Checkbox
|
||||
id="include-muted"
|
||||
checked={includeMuted}
|
||||
onCheckedChange={handleMutedChange}
|
||||
aria-label="Include muted findings"
|
||||
/>
|
||||
<label
|
||||
htmlFor="include-muted"
|
||||
className="cursor-pointer text-sm leading-none peer-disabled:cursor-not-allowed peer-disabled:opacity-70"
|
||||
classNames={{
|
||||
label: "text-small",
|
||||
wrapper: "checkbox-update",
|
||||
}}
|
||||
size="md"
|
||||
color="primary"
|
||||
aria-label="Include Mutelist"
|
||||
isSelected={excludeMuted}
|
||||
onValueChange={handleMutedChange}
|
||||
>
|
||||
Include muted findings
|
||||
</label>
|
||||
Exclude muted findings
|
||||
</Checkbox>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { VolumeX } from "lucide-react";
|
||||
import { useState } from "react";
|
||||
|
||||
import { Button } from "@/components/shadcn";
|
||||
|
||||
import { MuteFindingsModal } from "./mute-findings-modal";
|
||||
|
||||
interface FloatingMuteButtonProps {
|
||||
selectedCount: number;
|
||||
selectedFindingIds: string[];
|
||||
onComplete?: () => void;
|
||||
}
|
||||
|
||||
export function FloatingMuteButton({
|
||||
selectedCount,
|
||||
selectedFindingIds,
|
||||
onComplete,
|
||||
}: FloatingMuteButtonProps) {
|
||||
const [isModalOpen, setIsModalOpen] = useState(false);
|
||||
|
||||
return (
|
||||
<>
|
||||
<MuteFindingsModal
|
||||
isOpen={isModalOpen}
|
||||
onOpenChange={setIsModalOpen}
|
||||
findingIds={selectedFindingIds}
|
||||
onComplete={onComplete}
|
||||
/>
|
||||
|
||||
<div className="animate-in fade-in slide-in-from-bottom-4 fixed right-6 bottom-6 z-50 duration-300">
|
||||
<Button
|
||||
onClick={() => setIsModalOpen(true)}
|
||||
size="lg"
|
||||
className="shadow-lg"
|
||||
>
|
||||
<VolumeX className="size-5" />
|
||||
Mute ({selectedCount})
|
||||
</Button>
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { Input, Textarea } from "@heroui/input";
|
||||
import {
|
||||
Dispatch,
|
||||
SetStateAction,
|
||||
useActionState,
|
||||
useEffect,
|
||||
useRef,
|
||||
} from "react";
|
||||
|
||||
import { createMuteRule } from "@/actions/mute-rules";
|
||||
import { MuteRuleActionState } from "@/actions/mute-rules/types";
|
||||
import { useToast } from "@/components/ui";
|
||||
import { CustomAlertModal } from "@/components/ui/custom";
|
||||
import { FormButtons } from "@/components/ui/form";
|
||||
|
||||
interface MuteFindingsModalProps {
|
||||
isOpen: boolean;
|
||||
onOpenChange: Dispatch<SetStateAction<boolean>>;
|
||||
findingIds: string[];
|
||||
onComplete?: () => void;
|
||||
}
|
||||
|
||||
export function MuteFindingsModal({
|
||||
isOpen,
|
||||
onOpenChange,
|
||||
findingIds,
|
||||
onComplete,
|
||||
}: MuteFindingsModalProps) {
|
||||
const { toast } = useToast();
|
||||
|
||||
// Use refs to avoid stale closures in useEffect
|
||||
const onCompleteRef = useRef(onComplete);
|
||||
onCompleteRef.current = onComplete;
|
||||
|
||||
const onOpenChangeRef = useRef(onOpenChange);
|
||||
onOpenChangeRef.current = onOpenChange;
|
||||
|
||||
const [state, formAction, isPending] = useActionState<
|
||||
MuteRuleActionState,
|
||||
FormData
|
||||
>(createMuteRule, null);
|
||||
|
||||
useEffect(() => {
|
||||
if (state?.success) {
|
||||
toast({
|
||||
title: "Success",
|
||||
description: state.success,
|
||||
});
|
||||
// Call onComplete BEFORE closing the modal to ensure router.refresh() executes
|
||||
onCompleteRef.current?.();
|
||||
onOpenChangeRef.current(false);
|
||||
} else if (state?.errors?.general) {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
description: state.errors.general,
|
||||
});
|
||||
}
|
||||
}, [state, toast]);
|
||||
|
||||
const handleCancel = () => {
|
||||
onOpenChange(false);
|
||||
};
|
||||
|
||||
return (
|
||||
<CustomAlertModal
|
||||
isOpen={isOpen}
|
||||
onOpenChange={onOpenChange}
|
||||
title="Mute Findings"
|
||||
size="lg"
|
||||
>
|
||||
<form action={formAction} className="flex flex-col gap-4">
|
||||
<input
|
||||
type="hidden"
|
||||
name="finding_ids"
|
||||
value={JSON.stringify(findingIds)}
|
||||
/>
|
||||
|
||||
<div className="rounded-lg bg-slate-50 p-3 dark:bg-slate-800/50">
|
||||
<p className="text-sm text-slate-600 dark:text-slate-400">
|
||||
You are about to mute{" "}
|
||||
<span className="font-semibold text-slate-900 dark:text-white">
|
||||
{findingIds.length}
|
||||
</span>{" "}
|
||||
{findingIds.length === 1 ? "finding" : "findings"}.
|
||||
</p>
|
||||
<p className="mt-1 text-xs text-slate-500 dark:text-slate-500">
|
||||
Muted findings will be hidden by default but can be shown using
|
||||
filters.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<Input
|
||||
name="name"
|
||||
label="Rule Name"
|
||||
placeholder="e.g., Ignore dev environment S3 buckets"
|
||||
isRequired
|
||||
variant="bordered"
|
||||
isInvalid={!!state?.errors?.name}
|
||||
errorMessage={state?.errors?.name}
|
||||
isDisabled={isPending}
|
||||
description="A descriptive name for this mute rule"
|
||||
/>
|
||||
|
||||
<Textarea
|
||||
name="reason"
|
||||
label="Reason"
|
||||
placeholder="e.g., These are expected findings in the development environment"
|
||||
isRequired
|
||||
variant="bordered"
|
||||
minRows={3}
|
||||
maxRows={6}
|
||||
isInvalid={!!state?.errors?.reason}
|
||||
errorMessage={state?.errors?.reason}
|
||||
isDisabled={isPending}
|
||||
description="Explain why these findings are being muted"
|
||||
/>
|
||||
|
||||
<FormButtons
|
||||
setIsOpen={onOpenChange}
|
||||
onCancel={handleCancel}
|
||||
submitText="Mute Findings"
|
||||
isDisabled={isPending}
|
||||
/>
|
||||
</form>
|
||||
</CustomAlertModal>
|
||||
);
|
||||
}
|
||||
@@ -1,20 +1,12 @@
|
||||
"use client";
|
||||
|
||||
import { ColumnDef, RowSelectionState } from "@tanstack/react-table";
|
||||
import { ColumnDef } from "@tanstack/react-table";
|
||||
import { Database } from "lucide-react";
|
||||
import Link from "next/link";
|
||||
import { useSearchParams } from "next/navigation";
|
||||
|
||||
import { DataTableRowDetails } from "@/components/findings/table";
|
||||
import { DataTableRowActions } from "@/components/findings/table/data-table-row-actions";
|
||||
import { InfoIcon, MutedIcon } from "@/components/icons";
|
||||
import {
|
||||
Checkbox,
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/shadcn";
|
||||
import { InfoIcon } from "@/components/icons";
|
||||
import {
|
||||
DateWithTime,
|
||||
EntityInfo,
|
||||
@@ -28,6 +20,7 @@ import {
|
||||
} from "@/components/ui/table";
|
||||
import { FindingProps, ProviderType } from "@/types";
|
||||
|
||||
import { Muted } from "../muted";
|
||||
import { DeltaIndicator } from "./delta-indicator";
|
||||
|
||||
const getFindingsData = (row: { original: FindingProps }) => {
|
||||
@@ -95,275 +88,187 @@ const FindingDetailsCell = ({ row }: { row: any }) => {
|
||||
);
|
||||
};
|
||||
|
||||
// Function to generate columns with access to selection state
|
||||
export function getColumnFindings(
|
||||
rowSelection: RowSelectionState,
|
||||
selectableRowCount: number,
|
||||
): ColumnDef<FindingProps>[] {
|
||||
// Calculate selection state from rowSelection for header checkbox
|
||||
const selectedCount = Object.values(rowSelection).filter(Boolean).length;
|
||||
const isAllSelected =
|
||||
selectedCount > 0 && selectedCount === selectableRowCount;
|
||||
const isSomeSelected =
|
||||
selectedCount > 0 && selectedCount < selectableRowCount;
|
||||
return [
|
||||
{
|
||||
id: "select",
|
||||
header: ({ table }) => {
|
||||
// Use state calculated from rowSelection to force re-render
|
||||
const headerChecked = isAllSelected
|
||||
? true
|
||||
: isSomeSelected
|
||||
? "indeterminate"
|
||||
: false;
|
||||
export const ColumnFindings: ColumnDef<FindingProps>[] = [
|
||||
{
|
||||
id: "moreInfo",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Details" />
|
||||
),
|
||||
cell: ({ row }) => <FindingDetailsCell row={row} />,
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
accessorKey: "check",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader
|
||||
column={column}
|
||||
title={"Finding"}
|
||||
param="check_id"
|
||||
/>
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const { checktitle } = getFindingsMetadata(row);
|
||||
const {
|
||||
attributes: { muted, muted_reason },
|
||||
} = getFindingsData(row);
|
||||
const { delta } = row.original.attributes;
|
||||
|
||||
return (
|
||||
<div className="flex w-6 items-center justify-center">
|
||||
<Checkbox
|
||||
checked={headerChecked}
|
||||
onCheckedChange={(checked) =>
|
||||
table.toggleAllPageRowsSelected(checked === true)
|
||||
}
|
||||
aria-label="Select all"
|
||||
// Disable when no rows are selectable (all muted)
|
||||
disabled={selectableRowCount === 0}
|
||||
/>
|
||||
return (
|
||||
<div className="3xl:max-w-[660px] relative flex max-w-[410px] flex-row items-center gap-2">
|
||||
<div className="flex flex-row items-center gap-4">
|
||||
{delta === "new" || delta === "changed" ? (
|
||||
<DeltaIndicator delta={delta} />
|
||||
) : null}
|
||||
<p className="mr-7 text-sm break-words whitespace-normal">
|
||||
{checktitle}
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
cell: ({ row }) => {
|
||||
const finding = row.original;
|
||||
const isMuted = finding.attributes.muted;
|
||||
const mutedReason = finding.attributes.muted_reason;
|
||||
|
||||
// Show muted icon with tooltip for muted findings
|
||||
if (isMuted) {
|
||||
const ruleName = mutedReason || "Unknown rule";
|
||||
|
||||
return (
|
||||
<div className="flex w-6 items-center justify-center">
|
||||
<TooltipProvider>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<div className="border-system-severity-critical/40 cursor-pointer rounded-full border p-0.5">
|
||||
<MutedIcon className="text-system-severity-critical size-3.5" />
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<Link
|
||||
href="/mutelist"
|
||||
className="text-button-tertiary hover:text-button-tertiary-hover flex items-center gap-1 text-xs underline-offset-4"
|
||||
>
|
||||
<span className="text-text-neutral-primary">
|
||||
Mute rule:
|
||||
</span>
|
||||
<span className="max-w-[150px] truncate">{ruleName}</span>
|
||||
</Link>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</TooltipProvider>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// Use rowSelection directly instead of row.getIsSelected()
|
||||
// This ensures re-render when selection state changes
|
||||
const isSelected = !!rowSelection[row.id];
|
||||
|
||||
return (
|
||||
<div className="flex w-6 items-center justify-center">
|
||||
<Checkbox
|
||||
checked={isSelected}
|
||||
onCheckedChange={(checked) =>
|
||||
row.toggleSelected(checked === true)
|
||||
}
|
||||
aria-label="Select row"
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
enableSorting: false,
|
||||
enableHiding: false,
|
||||
<span className="absolute top-1/2 -right-2 -translate-y-1/2">
|
||||
<Muted isMuted={muted} mutedReason={muted_reason || ""} />
|
||||
</span>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
{
|
||||
id: "moreInfo",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Details" />
|
||||
),
|
||||
cell: ({ row }) => <FindingDetailsCell row={row} />,
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
accessorKey: "check",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader
|
||||
column={column}
|
||||
title={"Finding"}
|
||||
param="check_id"
|
||||
},
|
||||
{
|
||||
accessorKey: "resourceName",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Resource name" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const resourceName = getResourceData(row, "name");
|
||||
|
||||
return (
|
||||
<SnippetChip
|
||||
value={resourceName as string}
|
||||
formatter={(value: string) => `...${value.slice(-10)}`}
|
||||
icon={<Database size={16} />}
|
||||
/>
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const { checktitle } = getFindingsMetadata(row);
|
||||
const { delta } = row.original.attributes;
|
||||
|
||||
return (
|
||||
<div className="3xl:max-w-[660px] flex max-w-[410px] flex-row items-center gap-2">
|
||||
<div className="flex flex-row items-center gap-4">
|
||||
{delta === "new" || delta === "changed" ? (
|
||||
<DeltaIndicator delta={delta} />
|
||||
) : null}
|
||||
<p className="text-sm break-words whitespace-normal">
|
||||
{checktitle}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
);
|
||||
},
|
||||
{
|
||||
accessorKey: "resourceName",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Resource name" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const resourceName = getResourceData(row, "name");
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
accessorKey: "severity",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader
|
||||
column={column}
|
||||
title={"Severity"}
|
||||
param="severity"
|
||||
/>
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const {
|
||||
attributes: { severity },
|
||||
} = getFindingsData(row);
|
||||
return <SeverityBadge severity={severity} />;
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "status",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title={"Status"} param="status" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const {
|
||||
attributes: { status },
|
||||
} = getFindingsData(row);
|
||||
|
||||
return (
|
||||
<SnippetChip
|
||||
value={resourceName as string}
|
||||
formatter={(value: string) => `...${value.slice(-10)}`}
|
||||
icon={<Database size={16} />}
|
||||
return <StatusFindingBadge status={status} />;
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "updated_at",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader
|
||||
column={column}
|
||||
title={"Last seen"}
|
||||
param="updated_at"
|
||||
/>
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const {
|
||||
attributes: { updated_at },
|
||||
} = getFindingsData(row);
|
||||
return (
|
||||
<div className="w-[100px]">
|
||||
<DateWithTime dateTime={updated_at} />
|
||||
</div>
|
||||
);
|
||||
},
|
||||
},
|
||||
// {
|
||||
// accessorKey: "scanName",
|
||||
// header: "Scan Name",
|
||||
// cell: ({ row }) => {
|
||||
// const name = getScanData(row, "name");
|
||||
|
||||
// return (
|
||||
// <p className="text-small">
|
||||
// {typeof name === "string" || typeof name === "number"
|
||||
// ? name
|
||||
// : "Invalid data"}
|
||||
// </p>
|
||||
// );
|
||||
// },
|
||||
// },
|
||||
{
|
||||
accessorKey: "region",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Region" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const region = getResourceData(row, "region");
|
||||
|
||||
return (
|
||||
<div className="w-[80px] text-xs">
|
||||
{typeof region === "string" ? region : "Invalid region"}
|
||||
</div>
|
||||
);
|
||||
},
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
accessorKey: "service",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Service" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const { servicename } = getFindingsMetadata(row);
|
||||
return <p className="max-w-96 truncate text-xs">{servicename}</p>;
|
||||
},
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
accessorKey: "cloudProvider",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Cloud Provider" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const provider = getProviderData(row, "provider");
|
||||
const alias = getProviderData(row, "alias");
|
||||
const uid = getProviderData(row, "uid");
|
||||
|
||||
return (
|
||||
<>
|
||||
<EntityInfo
|
||||
cloudProvider={provider as ProviderType}
|
||||
entityAlias={alias as string}
|
||||
entityId={uid as string}
|
||||
/>
|
||||
);
|
||||
},
|
||||
enableSorting: false,
|
||||
</>
|
||||
);
|
||||
},
|
||||
{
|
||||
accessorKey: "severity",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader
|
||||
column={column}
|
||||
title={"Severity"}
|
||||
param="severity"
|
||||
/>
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const {
|
||||
attributes: { severity },
|
||||
} = getFindingsData(row);
|
||||
return <SeverityBadge severity={severity} />;
|
||||
},
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
id: "actions",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Actions" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
return <DataTableRowActions row={row} />;
|
||||
},
|
||||
{
|
||||
accessorKey: "status",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader
|
||||
column={column}
|
||||
title={"Status"}
|
||||
param="status"
|
||||
/>
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const {
|
||||
attributes: { status },
|
||||
} = getFindingsData(row);
|
||||
|
||||
return <StatusFindingBadge status={status} />;
|
||||
},
|
||||
},
|
||||
{
|
||||
accessorKey: "updated_at",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader
|
||||
column={column}
|
||||
title={"Last seen"}
|
||||
param="updated_at"
|
||||
/>
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const {
|
||||
attributes: { updated_at },
|
||||
} = getFindingsData(row);
|
||||
return (
|
||||
<div className="w-[100px]">
|
||||
<DateWithTime dateTime={updated_at} />
|
||||
</div>
|
||||
);
|
||||
},
|
||||
},
|
||||
// {
|
||||
// accessorKey: "scanName",
|
||||
// header: "Scan Name",
|
||||
// cell: ({ row }) => {
|
||||
// const name = getScanData(row, "name");
|
||||
|
||||
// return (
|
||||
// <p className="text-small">
|
||||
// {typeof name === "string" || typeof name === "number"
|
||||
// ? name
|
||||
// : "Invalid data"}
|
||||
// </p>
|
||||
// );
|
||||
// },
|
||||
// },
|
||||
{
|
||||
accessorKey: "region",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Region" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const region = getResourceData(row, "region");
|
||||
|
||||
return (
|
||||
<div className="w-[80px] text-xs">
|
||||
{typeof region === "string" ? region : "Invalid region"}
|
||||
</div>
|
||||
);
|
||||
},
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
accessorKey: "service",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Service" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const { servicename } = getFindingsMetadata(row);
|
||||
return <p className="max-w-96 truncate text-xs">{servicename}</p>;
|
||||
},
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
accessorKey: "cloudProvider",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Cloud Provider" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const provider = getProviderData(row, "provider");
|
||||
const alias = getProviderData(row, "alias");
|
||||
const uid = getProviderData(row, "uid");
|
||||
|
||||
return (
|
||||
<>
|
||||
<EntityInfo
|
||||
cloudProvider={provider as ProviderType}
|
||||
entityAlias={alias as string}
|
||||
entityId={uid as string}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
},
|
||||
enableSorting: false,
|
||||
},
|
||||
{
|
||||
id: "actions",
|
||||
header: ({ column }) => (
|
||||
<DataTableColumnHeader column={column} title="Actions" />
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
return <DataTableRowActions row={row} />;
|
||||
},
|
||||
enableSorting: false,
|
||||
},
|
||||
];
|
||||
}
|
||||
enableSorting: false,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -8,74 +8,25 @@ import {
|
||||
DropdownTrigger,
|
||||
} from "@heroui/dropdown";
|
||||
import { Row } from "@tanstack/react-table";
|
||||
import { VolumeOff, VolumeX } from "lucide-react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useContext, useState } from "react";
|
||||
import { useState } from "react";
|
||||
|
||||
import { MuteFindingsModal } from "@/components/findings/mute-findings-modal";
|
||||
import { SendToJiraModal } from "@/components/findings/send-to-jira-modal";
|
||||
import { VerticalDotsIcon } from "@/components/icons";
|
||||
import { JiraIcon } from "@/components/icons/services/IconServices";
|
||||
import { Button } from "@/components/shadcn";
|
||||
import type { FindingProps } from "@/types/components";
|
||||
|
||||
import { FindingsSelectionContext } from "./findings-selection-context";
|
||||
|
||||
interface DataTableRowActionsProps {
|
||||
row: Row<FindingProps>;
|
||||
}
|
||||
|
||||
export function DataTableRowActions({ row }: DataTableRowActionsProps) {
|
||||
const router = useRouter();
|
||||
const finding = row.original;
|
||||
const [isJiraModalOpen, setIsJiraModalOpen] = useState(false);
|
||||
const [isMuteModalOpen, setIsMuteModalOpen] = useState(false);
|
||||
|
||||
const isMuted = finding.attributes.muted;
|
||||
|
||||
// Get selection context - if there are other selected rows, include them
|
||||
const selectionContext = useContext(FindingsSelectionContext);
|
||||
const { selectedFindingIds, clearSelection } = selectionContext || {
|
||||
selectedFindingIds: [],
|
||||
clearSelection: () => {},
|
||||
};
|
||||
|
||||
const findingTitle =
|
||||
finding.attributes.check_metadata?.checktitle || "Security Finding";
|
||||
|
||||
// If current finding is selected and there are multiple selections, mute all
|
||||
// Otherwise, just mute this single finding
|
||||
const isCurrentSelected = selectedFindingIds.includes(finding.id);
|
||||
const hasMultipleSelected = selectedFindingIds.length > 1;
|
||||
|
||||
const getMuteIds = (): string[] => {
|
||||
if (isCurrentSelected && hasMultipleSelected) {
|
||||
// Mute all selected including current
|
||||
return selectedFindingIds;
|
||||
}
|
||||
// Just mute the current finding
|
||||
return [finding.id];
|
||||
};
|
||||
|
||||
const getMuteDescription = (): string => {
|
||||
if (isMuted) {
|
||||
return "This finding is already muted";
|
||||
}
|
||||
const ids = getMuteIds();
|
||||
if (ids.length > 1) {
|
||||
return `Mute ${ids.length} selected findings`;
|
||||
}
|
||||
return "Mute this finding";
|
||||
};
|
||||
|
||||
const handleMuteComplete = () => {
|
||||
// Always clear selection when a finding is muted because:
|
||||
// 1. If the muted finding was selected, its index now points to a different finding
|
||||
// 2. rowSelection uses indices (0, 1, 2...) not IDs, so after refresh the wrong findings would appear selected
|
||||
clearSelection();
|
||||
router.refresh();
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<SendToJiraModal
|
||||
@@ -85,28 +36,14 @@ export function DataTableRowActions({ row }: DataTableRowActionsProps) {
|
||||
findingTitle={findingTitle}
|
||||
/>
|
||||
|
||||
<MuteFindingsModal
|
||||
isOpen={isMuteModalOpen}
|
||||
onOpenChange={setIsMuteModalOpen}
|
||||
findingIds={getMuteIds()}
|
||||
onComplete={handleMuteComplete}
|
||||
/>
|
||||
|
||||
<div className="flex items-center justify-center px-2">
|
||||
<div className="relative flex items-center justify-end gap-2">
|
||||
<Dropdown
|
||||
className="border-border-neutral-secondary bg-bg-neutral-secondary border shadow-xl"
|
||||
placement="bottom"
|
||||
>
|
||||
<DropdownTrigger>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="icon-sm"
|
||||
className="size-7 rounded-full"
|
||||
>
|
||||
<VerticalDotsIcon
|
||||
size={16}
|
||||
className="text-text-neutral-secondary"
|
||||
/>
|
||||
<Button variant="ghost" size="icon-sm" className="rounded-full">
|
||||
<VerticalDotsIcon className="text-slate-400" />
|
||||
</Button>
|
||||
</DropdownTrigger>
|
||||
<DropdownMenu
|
||||
@@ -116,27 +53,6 @@ export function DataTableRowActions({ row }: DataTableRowActionsProps) {
|
||||
variant="flat"
|
||||
>
|
||||
<DropdownSection title="Actions">
|
||||
<DropdownItem
|
||||
key="mute"
|
||||
description={getMuteDescription()}
|
||||
textValue="Mute"
|
||||
isDisabled={isMuted}
|
||||
startContent={
|
||||
isMuted ? (
|
||||
<VolumeOff className="text-default-300 pointer-events-none size-5 shrink-0" />
|
||||
) : (
|
||||
<VolumeX className="text-default-500 pointer-events-none size-5 shrink-0" />
|
||||
)
|
||||
}
|
||||
onPress={() => setIsMuteModalOpen(true)}
|
||||
>
|
||||
{isMuted ? "Muted" : "Mute"}
|
||||
{!isMuted && isCurrentSelected && hasMultipleSelected && (
|
||||
<span className="ml-1 text-xs text-slate-500">
|
||||
({selectedFindingIds.length})
|
||||
</span>
|
||||
)}
|
||||
</DropdownItem>
|
||||
<DropdownItem
|
||||
key="jira"
|
||||
description="Create a Jira issue for this finding"
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { createContext, useContext } from "react";
|
||||
|
||||
import { FindingProps } from "@/types";
|
||||
|
||||
interface FindingsSelectionContextValue {
|
||||
selectedFindingIds: string[];
|
||||
selectedFindings: FindingProps[];
|
||||
clearSelection: () => void;
|
||||
isSelected: (id: string) => boolean;
|
||||
}
|
||||
|
||||
export const FindingsSelectionContext =
|
||||
createContext<FindingsSelectionContextValue>({
|
||||
selectedFindingIds: [],
|
||||
selectedFindings: [],
|
||||
clearSelection: () => {},
|
||||
isSelected: () => false,
|
||||
});
|
||||
|
||||
export function useFindingsSelection() {
|
||||
const context = useContext(FindingsSelectionContext);
|
||||
if (!context) {
|
||||
throw new Error(
|
||||
"useFindingsSelection must be used within a FindingsSelectionProvider",
|
||||
);
|
||||
}
|
||||
return context;
|
||||
}
|
||||
@@ -1,111 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { Row, RowSelectionState } from "@tanstack/react-table";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
|
||||
import { DataTable } from "@/components/ui/table";
|
||||
import { FindingProps, MetaDataProps } from "@/types";
|
||||
|
||||
import { FloatingMuteButton } from "../floating-mute-button";
|
||||
import { getColumnFindings } from "./column-findings";
|
||||
import { FindingsSelectionContext } from "./findings-selection-context";
|
||||
|
||||
interface FindingsTableWithSelectionProps {
|
||||
data: FindingProps[];
|
||||
metadata?: MetaDataProps;
|
||||
}
|
||||
|
||||
export function FindingsTableWithSelection({
|
||||
data,
|
||||
metadata,
|
||||
}: FindingsTableWithSelectionProps) {
|
||||
const router = useRouter();
|
||||
const [rowSelection, setRowSelection] = useState<RowSelectionState>({});
|
||||
|
||||
// Track the finding IDs to detect when data changes (e.g., after muting)
|
||||
const currentFindingIds = (data ?? []).map((f) => f.id).join(",");
|
||||
const previousFindingIdsRef = useRef(currentFindingIds);
|
||||
|
||||
// Reset selection when page changes
|
||||
useEffect(() => {
|
||||
setRowSelection({});
|
||||
}, [metadata?.pagination?.page]);
|
||||
|
||||
// Reset selection when the data changes (e.g., after muting a finding)
|
||||
// This prevents the wrong findings from appearing selected after refresh
|
||||
useEffect(() => {
|
||||
if (previousFindingIdsRef.current !== currentFindingIds) {
|
||||
setRowSelection({});
|
||||
previousFindingIdsRef.current = currentFindingIds;
|
||||
}
|
||||
}, [currentFindingIds]);
|
||||
|
||||
// Ensure data is always an array for safe operations
|
||||
const safeData = data ?? [];
|
||||
|
||||
// Get selected finding IDs and data (only non-muted findings can be selected)
|
||||
const selectedFindingIds = Object.keys(rowSelection)
|
||||
.filter((key) => rowSelection[key])
|
||||
.map((idx) => safeData[parseInt(idx)]?.id)
|
||||
.filter(Boolean);
|
||||
|
||||
const selectedFindings = Object.keys(rowSelection)
|
||||
.filter((key) => rowSelection[key])
|
||||
.map((idx) => safeData[parseInt(idx)])
|
||||
.filter(Boolean);
|
||||
|
||||
// Count of selectable rows (non-muted findings only)
|
||||
const selectableRowCount = safeData.filter((f) => !f.attributes.muted).length;
|
||||
|
||||
// Function to determine if a row can be selected (muted findings cannot be selected)
|
||||
const getRowCanSelect = (row: Row<FindingProps>): boolean => {
|
||||
return !row.original.attributes.muted;
|
||||
};
|
||||
|
||||
const clearSelection = () => {
|
||||
setRowSelection({});
|
||||
};
|
||||
|
||||
const isSelected = (id: string) => {
|
||||
return selectedFindingIds.includes(id);
|
||||
};
|
||||
|
||||
// Handle mute completion: clear selection and refresh data
|
||||
const handleMuteComplete = () => {
|
||||
clearSelection();
|
||||
router.refresh();
|
||||
};
|
||||
|
||||
// Generate columns with access to rowSelection state and selectable row count
|
||||
const columns = getColumnFindings(rowSelection, selectableRowCount);
|
||||
|
||||
return (
|
||||
<FindingsSelectionContext.Provider
|
||||
value={{
|
||||
selectedFindingIds,
|
||||
selectedFindings,
|
||||
clearSelection,
|
||||
isSelected,
|
||||
}}
|
||||
>
|
||||
<DataTable
|
||||
columns={columns}
|
||||
data={safeData}
|
||||
metadata={metadata}
|
||||
enableRowSelection
|
||||
rowSelection={rowSelection}
|
||||
onRowSelectionChange={setRowSelection}
|
||||
getRowCanSelect={getRowCanSelect}
|
||||
/>
|
||||
|
||||
{selectedFindingIds.length > 0 && (
|
||||
<FloatingMuteButton
|
||||
selectedCount={selectedFindingIds.length}
|
||||
selectedFindingIds={selectedFindingIds}
|
||||
onComplete={handleMuteComplete}
|
||||
/>
|
||||
)}
|
||||
</FindingsSelectionContext.Provider>
|
||||
);
|
||||
}
|
||||
@@ -2,6 +2,4 @@ export * from "./column-findings";
|
||||
export * from "./data-table-row-actions";
|
||||
export * from "./data-table-row-details";
|
||||
export * from "./finding-detail";
|
||||
export * from "./findings-selection-context";
|
||||
export * from "./findings-table-with-selection";
|
||||
export * from "./skeleton-table-findings";
|
||||
|
||||
@@ -61,17 +61,6 @@ export function HorizontalBarChart({
|
||||
"var(--bg-neutral-tertiary)";
|
||||
|
||||
const isClickable = !isEmpty && onBarClick;
|
||||
const maxValue =
|
||||
data.length > 0 ? Math.max(...data.map((d) => d.value)) : 0;
|
||||
const calculatedWidth = isEmpty
|
||||
? item.percentage
|
||||
: (item.percentage ??
|
||||
(maxValue > 0 ? (item.value / maxValue) * 100 : 0));
|
||||
// Calculate display percentage (value / total * 100)
|
||||
const displayPercentage = isEmpty
|
||||
? 0
|
||||
: (item.percentage ??
|
||||
(total > 0 ? Math.round((item.value / total) * 100) : 0));
|
||||
return (
|
||||
<div
|
||||
key={item.name}
|
||||
@@ -116,13 +105,15 @@ export function HorizontalBarChart({
|
||||
</div>
|
||||
|
||||
{/* Bar - flexible */}
|
||||
<div className="relative h-[22px] flex-1">
|
||||
<div className="relative flex-1">
|
||||
<div className="bg-bg-neutral-tertiary absolute inset-0 h-[22px] w-full rounded-sm" />
|
||||
{(item.value > 0 || isEmpty) && (
|
||||
<div
|
||||
className="relative h-[22px] rounded-sm border border-black/10 transition-all duration-300"
|
||||
style={{
|
||||
width: `${calculatedWidth}%`,
|
||||
width: isEmpty
|
||||
? `${item.percentage}%`
|
||||
: `${item.percentage || (item.value / Math.max(...data.map((d) => d.value))) * 100}%`,
|
||||
backgroundColor: barColor,
|
||||
opacity: isFaded ? 0.5 : 1,
|
||||
}}
|
||||
@@ -183,7 +174,7 @@ export function HorizontalBarChart({
|
||||
}}
|
||||
>
|
||||
<span className="min-w-[26px] text-right font-medium">
|
||||
{displayPercentage}%
|
||||
{isEmpty ? "0" : item.percentage}%
|
||||
</span>
|
||||
<span className="shrink-0 font-medium">•</span>
|
||||
<span className="font-bold whitespace-nowrap">
|
||||
|
||||
@@ -18,7 +18,6 @@ export const SEVERITY_ORDER = {
|
||||
Medium: 2,
|
||||
Low: 3,
|
||||
Informational: 4,
|
||||
Info: 4,
|
||||
} as const;
|
||||
|
||||
export const LAYOUT_OPTIONS = {
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { KeyRoundIcon } from "lucide-react";
|
||||
|
||||
import { LinkCard } from "../shared/link-card";
|
||||
|
||||
export const ApiKeyLinkCard = () => {
|
||||
return (
|
||||
<LinkCard
|
||||
icon={KeyRoundIcon}
|
||||
title="API Keys"
|
||||
description="Manage API keys for programmatic access."
|
||||
learnMoreUrl="https://docs.prowler.com/user-guide/tutorials/prowler-app-api-keys"
|
||||
learnMoreAriaLabel="Learn more about API Keys"
|
||||
bodyText="API Key management is available in your User Profile. Create and manage API keys to authenticate with the Prowler API for automation and integrations."
|
||||
linkHref="/profile"
|
||||
linkText="Go to Profile"
|
||||
/>
|
||||
);
|
||||
};
|
||||
@@ -1,5 +1,4 @@
|
||||
export * from "../providers/enhanced-provider-selector";
|
||||
export * from "./api-key/api-key-link-card";
|
||||
export * from "./jira/jira-integration-card";
|
||||
export * from "./jira/jira-integration-form";
|
||||
export * from "./jira/jira-integrations-manager";
|
||||
@@ -12,4 +11,3 @@ export * from "./security-hub/security-hub-integration-card";
|
||||
export * from "./security-hub/security-hub-integration-form";
|
||||
export * from "./security-hub/security-hub-integrations-manager";
|
||||
export * from "./shared";
|
||||
export * from "./sso/sso-link-card";
|
||||
|
||||
@@ -14,7 +14,6 @@ import { SamlConfigForm } from "./saml-config-form";
|
||||
|
||||
export const SamlIntegrationCard = ({ samlConfig }: { samlConfig?: any }) => {
|
||||
const [isSamlModalOpen, setIsSamlModalOpen] = useState(false);
|
||||
const [isDeleteModalOpen, setIsDeleteModalOpen] = useState(false);
|
||||
const [isDeleting, setIsDeleting] = useState(false);
|
||||
const { toast } = useToast();
|
||||
const id = samlConfig?.id;
|
||||
@@ -31,7 +30,6 @@ export const SamlIntegrationCard = ({ samlConfig }: { samlConfig?: any }) => {
|
||||
title: "SAML configuration removed",
|
||||
description: result.success,
|
||||
});
|
||||
setIsDeleteModalOpen(false);
|
||||
} else if (result.errors?.general) {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
@@ -39,7 +37,7 @@ export const SamlIntegrationCard = ({ samlConfig }: { samlConfig?: any }) => {
|
||||
description: result.errors.general,
|
||||
});
|
||||
}
|
||||
} catch {
|
||||
} catch (error) {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: "Error",
|
||||
@@ -52,7 +50,6 @@ export const SamlIntegrationCard = ({ samlConfig }: { samlConfig?: any }) => {
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* Configure SAML Modal */}
|
||||
<CustomAlertModal
|
||||
isOpen={isSamlModalOpen}
|
||||
onOpenChange={setIsSamlModalOpen}
|
||||
@@ -64,42 +61,6 @@ export const SamlIntegrationCard = ({ samlConfig }: { samlConfig?: any }) => {
|
||||
/>
|
||||
</CustomAlertModal>
|
||||
|
||||
{/* Delete Confirmation Modal */}
|
||||
<CustomAlertModal
|
||||
isOpen={isDeleteModalOpen}
|
||||
onOpenChange={setIsDeleteModalOpen}
|
||||
title="Remove SAML Configuration"
|
||||
size="md"
|
||||
>
|
||||
<div className="flex flex-col gap-4">
|
||||
<p className="text-default-600 text-sm">
|
||||
Are you sure you want to remove the SAML SSO configuration? Users
|
||||
will no longer be able to sign in using SAML.
|
||||
</p>
|
||||
<div className="flex w-full justify-end gap-4">
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="lg"
|
||||
onClick={() => setIsDeleteModalOpen(false)}
|
||||
disabled={isDeleting}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
type="button"
|
||||
variant="destructive"
|
||||
size="lg"
|
||||
disabled={isDeleting}
|
||||
onClick={handleRemoveSaml}
|
||||
>
|
||||
<Trash2Icon className="size-4" />
|
||||
{isDeleting ? "Removing..." : "Remove"}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</CustomAlertModal>
|
||||
|
||||
<Card variant="base" padding="lg">
|
||||
<CardHeader>
|
||||
<div className="flex flex-col gap-1">
|
||||
@@ -137,10 +98,11 @@ export const SamlIntegrationCard = ({ samlConfig }: { samlConfig?: any }) => {
|
||||
<Button
|
||||
size="sm"
|
||||
variant="destructive"
|
||||
onClick={() => setIsDeleteModalOpen(true)}
|
||||
disabled={isDeleting}
|
||||
onClick={handleRemoveSaml}
|
||||
>
|
||||
<Trash2Icon size={16} />
|
||||
Remove
|
||||
{!isDeleting && <Trash2Icon size={16} />}
|
||||
{isDeleting ? "Removing..." : "Remove"}
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
export { IntegrationActionButtons } from "./integration-action-buttons";
|
||||
export { IntegrationCardHeader } from "./integration-card-header";
|
||||
export { IntegrationSkeleton } from "./integration-skeleton";
|
||||
export { LinkCard } from "./link-card";
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { ExternalLinkIcon, LucideIcon } from "lucide-react";
|
||||
import Link from "next/link";
|
||||
|
||||
import { Button } from "@/components/shadcn";
|
||||
import { CustomLink } from "@/components/ui/custom/custom-link";
|
||||
|
||||
import { Card, CardContent, CardHeader } from "../../shadcn";
|
||||
|
||||
interface LinkCardProps {
|
||||
icon: LucideIcon;
|
||||
title: string;
|
||||
description: string;
|
||||
learnMoreUrl: string;
|
||||
learnMoreAriaLabel: string;
|
||||
bodyText: string;
|
||||
linkHref: string;
|
||||
linkText: string;
|
||||
}
|
||||
|
||||
export const LinkCard = ({
|
||||
icon: Icon,
|
||||
title,
|
||||
description,
|
||||
learnMoreUrl,
|
||||
learnMoreAriaLabel,
|
||||
bodyText,
|
||||
linkHref,
|
||||
linkText,
|
||||
}: LinkCardProps) => {
|
||||
return (
|
||||
<Card variant="base" padding="lg">
|
||||
<CardHeader>
|
||||
<div className="flex w-full flex-col items-start gap-2 sm:flex-row sm:items-center sm:justify-between">
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="dark:bg-prowler-blue-800 flex h-10 w-10 items-center justify-center rounded-lg bg-gray-100">
|
||||
<Icon size={24} className="text-gray-700 dark:text-gray-200" />
|
||||
</div>
|
||||
<div className="flex flex-col gap-1">
|
||||
<h4 className="text-lg font-bold text-gray-900 dark:text-gray-100">
|
||||
{title}
|
||||
</h4>
|
||||
<div className="flex flex-col items-start gap-2 sm:flex-row sm:items-center">
|
||||
<p className="text-xs text-nowrap text-gray-500 dark:text-gray-300">
|
||||
{description}
|
||||
</p>
|
||||
<CustomLink
|
||||
href={learnMoreUrl}
|
||||
aria-label={learnMoreAriaLabel}
|
||||
size="xs"
|
||||
>
|
||||
Learn more
|
||||
</CustomLink>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex items-center gap-2 self-end sm:self-center">
|
||||
<Button asChild size="sm">
|
||||
<Link href={linkHref}>
|
||||
<ExternalLinkIcon size={14} />
|
||||
{linkText}
|
||||
</Link>
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<p className="text-sm text-gray-600 dark:text-gray-300">{bodyText}</p>
|
||||
</CardContent>
|
||||
</Card>
|
||||
);
|
||||
};
|
||||
@@ -1,20 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { ShieldCheckIcon } from "lucide-react";
|
||||
|
||||
import { LinkCard } from "../shared/link-card";
|
||||
|
||||
export const SsoLinkCard = () => {
|
||||
return (
|
||||
<LinkCard
|
||||
icon={ShieldCheckIcon}
|
||||
title="SSO Configuration"
|
||||
description="Configure SAML Single Sign-On for your organization."
|
||||
learnMoreUrl="https://docs.prowler.com/projects/prowler-open-source/en/latest/tutorials/prowler-app-sso/"
|
||||
learnMoreAriaLabel="Learn more about SSO configuration"
|
||||
bodyText="SSO configuration is available in your User Profile. Enable SAML Single Sign-On to allow users to authenticate using your organization's identity provider."
|
||||
linkHref="/profile"
|
||||
linkText="Go to Profile"
|
||||
/>
|
||||
);
|
||||
};
|
||||
@@ -1,72 +0,0 @@
|
||||
/**
|
||||
* ChainOfThoughtDisplay component
|
||||
* Displays tool execution progress for Lighthouse assistant messages
|
||||
*/
|
||||
|
||||
import { CheckCircle2 } from "lucide-react";
|
||||
|
||||
import {
|
||||
ChainOfThought,
|
||||
ChainOfThoughtContent,
|
||||
ChainOfThoughtHeader,
|
||||
ChainOfThoughtStep,
|
||||
} from "@/components/ai-elements/chain-of-thought";
|
||||
import {
|
||||
CHAIN_OF_THOUGHT_ACTIONS,
|
||||
type ChainOfThoughtEvent,
|
||||
getChainOfThoughtHeaderText,
|
||||
getChainOfThoughtStepLabel,
|
||||
isMetaTool,
|
||||
} from "@/components/lighthouse/chat-utils";
|
||||
|
||||
interface ChainOfThoughtDisplayProps {
|
||||
events: ChainOfThoughtEvent[];
|
||||
isStreaming: boolean;
|
||||
messageKey: string;
|
||||
}
|
||||
|
||||
export function ChainOfThoughtDisplay({
|
||||
events,
|
||||
isStreaming,
|
||||
messageKey,
|
||||
}: ChainOfThoughtDisplayProps) {
|
||||
if (events.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const headerText = getChainOfThoughtHeaderText(isStreaming, events);
|
||||
|
||||
return (
|
||||
<div className="mb-4">
|
||||
<ChainOfThought defaultOpen={false}>
|
||||
<ChainOfThoughtHeader>{headerText}</ChainOfThoughtHeader>
|
||||
<ChainOfThoughtContent>
|
||||
{events.map((event, eventIdx) => {
|
||||
const { action, metaTool, tool } = event;
|
||||
|
||||
// Only show tool_complete events (skip planning and start)
|
||||
if (action !== CHAIN_OF_THOUGHT_ACTIONS.COMPLETE) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Skip actual tool execution events (only show meta-tools)
|
||||
if (!isMetaTool(metaTool)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const label = getChainOfThoughtStepLabel(metaTool, tool);
|
||||
|
||||
return (
|
||||
<ChainOfThoughtStep
|
||||
key={`${messageKey}-cot-${eventIdx}`}
|
||||
icon={CheckCircle2}
|
||||
label={label}
|
||||
status="complete"
|
||||
/>
|
||||
);
|
||||
})}
|
||||
</ChainOfThoughtContent>
|
||||
</ChainOfThought>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
/**
|
||||
* Utilities for Lighthouse chat message processing
|
||||
* Client-side utilities for chat.tsx
|
||||
*/
|
||||
|
||||
import {
|
||||
CHAIN_OF_THOUGHT_ACTIONS,
|
||||
ERROR_PREFIX,
|
||||
MESSAGE_ROLES,
|
||||
MESSAGE_STATUS,
|
||||
META_TOOLS,
|
||||
} from "@/lib/lighthouse/constants";
|
||||
import type { ChainOfThoughtData, Message } from "@/lib/lighthouse/types";
|
||||
|
||||
// Re-export constants for convenience
|
||||
export {
|
||||
CHAIN_OF_THOUGHT_ACTIONS,
|
||||
ERROR_PREFIX,
|
||||
MESSAGE_ROLES,
|
||||
MESSAGE_STATUS,
|
||||
META_TOOLS,
|
||||
};
|
||||
|
||||
// Re-export types
|
||||
export type { ChainOfThoughtData as ChainOfThoughtEvent, Message };
|
||||
|
||||
/**
|
||||
* Extracts text content from a message by filtering and joining text parts
|
||||
*
|
||||
* @param message - The message to extract text from
|
||||
* @returns The concatenated text content
|
||||
*/
|
||||
export function extractMessageText(message: Message): string {
|
||||
return message.parts
|
||||
.filter((p) => p.type === "text")
|
||||
.map((p) => (p.text ? p.text : ""))
|
||||
.join("");
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts chain-of-thought events from a message
|
||||
*
|
||||
* @param message - The message to extract events from
|
||||
* @returns Array of chain-of-thought events
|
||||
*/
|
||||
export function extractChainOfThoughtEvents(
|
||||
message: Message,
|
||||
): ChainOfThoughtData[] {
|
||||
return message.parts
|
||||
.filter((part) => part.type === "data-chain-of-thought")
|
||||
.map((part) => part.data as ChainOfThoughtData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the label for a chain-of-thought step based on meta-tool and tool name
|
||||
*
|
||||
* @param metaTool - The meta-tool name
|
||||
* @param tool - The actual tool name
|
||||
* @returns A human-readable label for the step
|
||||
*/
|
||||
export function getChainOfThoughtStepLabel(
|
||||
metaTool: string,
|
||||
tool: string | null,
|
||||
): string {
|
||||
if (metaTool === META_TOOLS.DESCRIBE && tool) {
|
||||
return `Retrieving ${tool} tool info`;
|
||||
}
|
||||
|
||||
if (metaTool === META_TOOLS.EXECUTE && tool) {
|
||||
return `Executing ${tool}`;
|
||||
}
|
||||
|
||||
return tool || "Completed";
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if a meta-tool is a wrapper tool (describe_tool or execute_tool)
|
||||
*
|
||||
* @param metaTool - The meta-tool name to check
|
||||
* @returns True if it's a meta-tool, false otherwise
|
||||
*/
|
||||
export function isMetaTool(metaTool: string): boolean {
|
||||
return metaTool === META_TOOLS.DESCRIBE || metaTool === META_TOOLS.EXECUTE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the header text for chain-of-thought display
|
||||
*
|
||||
* @param isStreaming - Whether the message is currently streaming
|
||||
* @param events - The chain-of-thought events
|
||||
* @returns The header text to display
|
||||
*/
|
||||
export function getChainOfThoughtHeaderText(
|
||||
isStreaming: boolean,
|
||||
events: ChainOfThoughtData[],
|
||||
): string {
|
||||
if (!isStreaming) {
|
||||
return "Thought process";
|
||||
}
|
||||
|
||||
// Find the last completed tool to show current status
|
||||
const lastCompletedEvent = events
|
||||
.slice()
|
||||
.reverse()
|
||||
.find((e) => e.action === CHAIN_OF_THOUGHT_ACTIONS.COMPLETE && e.tool);
|
||||
|
||||
if (lastCompletedEvent?.tool) {
|
||||
return `Executing ${lastCompletedEvent.tool}...`;
|
||||
}
|
||||
|
||||
return "Processing...";
|
||||
}
|
||||
@@ -2,15 +2,12 @@
|
||||
|
||||
import { useChat } from "@ai-sdk/react";
|
||||
import { DefaultChatTransport } from "ai";
|
||||
import { Plus } from "lucide-react";
|
||||
import { Copy, Plus, RotateCcw } from "lucide-react";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { Streamdown } from "streamdown";
|
||||
|
||||
import { getLighthouseModelIds } from "@/actions/lighthouse/lighthouse";
|
||||
import {
|
||||
Conversation,
|
||||
ConversationContent,
|
||||
ConversationScrollButton,
|
||||
} from "@/components/ai-elements/conversation";
|
||||
import { Action, Actions } from "@/components/lighthouse/ai-elements/actions";
|
||||
import {
|
||||
PromptInput,
|
||||
PromptInputBody,
|
||||
@@ -19,13 +16,7 @@ import {
|
||||
PromptInputToolbar,
|
||||
PromptInputTools,
|
||||
} from "@/components/lighthouse/ai-elements/prompt-input";
|
||||
import {
|
||||
ERROR_PREFIX,
|
||||
MESSAGE_ROLES,
|
||||
MESSAGE_STATUS,
|
||||
} from "@/components/lighthouse/chat-utils";
|
||||
import { Loader } from "@/components/lighthouse/loader";
|
||||
import { MessageItem } from "@/components/lighthouse/message-item";
|
||||
import {
|
||||
Button,
|
||||
Card,
|
||||
@@ -69,11 +60,6 @@ interface SelectedModel {
|
||||
modelName: string;
|
||||
}
|
||||
|
||||
interface ExtendedError extends Error {
|
||||
status?: number;
|
||||
body?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
const SUGGESTED_ACTIONS: SuggestedAction[] = [
|
||||
{
|
||||
title: "Are there any exposed S3",
|
||||
@@ -216,18 +202,14 @@ export const Chat = ({
|
||||
// There is no specific way to output the error message from langgraph supervisor
|
||||
// Hence, all error messages are sent as normal messages with the prefix [LIGHTHOUSE_ANALYST_ERROR]:
|
||||
// Detect error messages sent from backend using specific prefix and display the error
|
||||
// Use includes() instead of startsWith() to catch errors that occur mid-stream (after text has been sent)
|
||||
const firstTextPart = message.parts.find((p) => p.type === "text");
|
||||
if (
|
||||
firstTextPart &&
|
||||
"text" in firstTextPart &&
|
||||
firstTextPart.text.includes(ERROR_PREFIX)
|
||||
firstTextPart.text.startsWith("[LIGHTHOUSE_ANALYST_ERROR]:")
|
||||
) {
|
||||
// Extract error text - handle both start-of-message and mid-stream errors
|
||||
const fullText = firstTextPart.text;
|
||||
const errorIndex = fullText.indexOf(ERROR_PREFIX);
|
||||
const errorText = fullText
|
||||
.substring(errorIndex + ERROR_PREFIX.length)
|
||||
const errorText = firstTextPart.text
|
||||
.replace("[LIGHTHOUSE_ANALYST_ERROR]:", "")
|
||||
.trim();
|
||||
setErrorMessage(errorText);
|
||||
// Remove error message from chat history
|
||||
@@ -237,7 +219,7 @@ export const Chat = ({
|
||||
return !(
|
||||
textPart &&
|
||||
"text" in textPart &&
|
||||
textPart.text.includes(ERROR_PREFIX)
|
||||
textPart.text.startsWith("[LIGHTHOUSE_ANALYST_ERROR]:")
|
||||
);
|
||||
}),
|
||||
);
|
||||
@@ -263,6 +245,8 @@ export const Chat = ({
|
||||
},
|
||||
});
|
||||
|
||||
const messagesContainerRef = useRef<HTMLDivElement | null>(null);
|
||||
|
||||
const restoreLastUserMessage = () => {
|
||||
let restoredText = "";
|
||||
|
||||
@@ -298,14 +282,19 @@ export const Chat = ({
|
||||
};
|
||||
|
||||
const stopGeneration = () => {
|
||||
if (
|
||||
status === MESSAGE_STATUS.STREAMING ||
|
||||
status === MESSAGE_STATUS.SUBMITTED
|
||||
) {
|
||||
if (status === "streaming" || status === "submitted") {
|
||||
stop();
|
||||
}
|
||||
};
|
||||
|
||||
// Auto-scroll to bottom when new messages arrive or when streaming
|
||||
useEffect(() => {
|
||||
if (messagesContainerRef.current) {
|
||||
messagesContainerRef.current.scrollTop =
|
||||
messagesContainerRef.current.scrollHeight;
|
||||
}
|
||||
}, [messages, status]);
|
||||
|
||||
// Handlers
|
||||
const handleNewChat = () => {
|
||||
setMessages([]);
|
||||
@@ -322,7 +311,7 @@ export const Chat = ({
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="relative flex h-full min-w-0 flex-col overflow-hidden">
|
||||
<div className="relative flex h-[calc(100vh-(--spacing(16)))] min-w-0 flex-col overflow-hidden">
|
||||
{/* Header with New Chat button */}
|
||||
{messages.length > 0 && (
|
||||
<div className="border-default-200 dark:border-default-100 border-b px-2 py-3 sm:px-4">
|
||||
@@ -393,18 +382,18 @@ export const Chat = ({
|
||||
"An error occurred. Please retry your message."}
|
||||
</p>
|
||||
{/* Original error details for native errors */}
|
||||
{error && (error as ExtendedError).status && (
|
||||
{error && (error as any).status && (
|
||||
<p className="text-text-neutral-tertiary mt-1 text-xs">
|
||||
Status: {(error as ExtendedError).status}
|
||||
Status: {(error as any).status}
|
||||
</p>
|
||||
)}
|
||||
{error && (error as ExtendedError).body && (
|
||||
{error && (error as any).body && (
|
||||
<details className="mt-2">
|
||||
<summary className="text-text-neutral-tertiary hover:text-text-neutral-secondary cursor-pointer text-xs">
|
||||
Show details
|
||||
</summary>
|
||||
<pre className="bg-bg-neutral-tertiary text-text-neutral-secondary mt-1 max-h-20 overflow-auto rounded p-2 text-xs">
|
||||
{JSON.stringify((error as ExtendedError).body, null, 2)}
|
||||
{JSON.stringify((error as any).body, null, 2)}
|
||||
</pre>
|
||||
</details>
|
||||
)}
|
||||
@@ -438,48 +427,113 @@ export const Chat = ({
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<Conversation className="flex-1">
|
||||
<ConversationContent className="gap-4 px-2 py-4 sm:p-4">
|
||||
{messages.map((message, idx) => (
|
||||
<MessageItem
|
||||
key={`${message.id}-${idx}-${message.role}`}
|
||||
message={message}
|
||||
index={idx}
|
||||
isLastMessage={idx === messages.length - 1}
|
||||
status={status}
|
||||
onCopy={(text) => {
|
||||
navigator.clipboard.writeText(text);
|
||||
toast({
|
||||
title: "Copied",
|
||||
description: "Message copied to clipboard",
|
||||
});
|
||||
}}
|
||||
onRegenerate={regenerate}
|
||||
/>
|
||||
))}
|
||||
{/* Show loader only if no assistant message exists yet */}
|
||||
{(status === MESSAGE_STATUS.SUBMITTED ||
|
||||
status === MESSAGE_STATUS.STREAMING) &&
|
||||
messages.length > 0 &&
|
||||
messages[messages.length - 1].role === MESSAGE_ROLES.USER && (
|
||||
<div className="flex justify-start">
|
||||
<div className="bg-muted max-w-[80%] rounded-lg px-4 py-2">
|
||||
<Loader size="default" text="Thinking..." />
|
||||
<div
|
||||
className="no-scrollbar flex flex-1 flex-col gap-4 overflow-y-auto px-2 py-4 sm:p-4"
|
||||
ref={messagesContainerRef}
|
||||
>
|
||||
{messages.map((message, idx) => {
|
||||
const isLastMessage = idx === messages.length - 1;
|
||||
const messageText = message.parts
|
||||
.filter((p) => p.type === "text")
|
||||
.map((p) => ("text" in p ? p.text : ""))
|
||||
.join("");
|
||||
|
||||
// Check if this is the streaming assistant message (last message, assistant role, while streaming)
|
||||
const isStreamingAssistant =
|
||||
isLastMessage &&
|
||||
message.role === "assistant" &&
|
||||
status === "streaming";
|
||||
|
||||
// Use a composite key to ensure uniqueness even if IDs are duplicated temporarily
|
||||
const uniqueKey = `${message.id}-${idx}-${message.role}`;
|
||||
|
||||
return (
|
||||
<div key={uniqueKey}>
|
||||
<div
|
||||
className={`flex ${
|
||||
message.role === "user" ? "justify-end" : "justify-start"
|
||||
}`}
|
||||
>
|
||||
<div
|
||||
className={`max-w-[80%] rounded-lg px-4 py-2 ${
|
||||
message.role === "user"
|
||||
? "bg-bg-neutral-tertiary border-border-neutral-secondary border"
|
||||
: "bg-muted"
|
||||
}`}
|
||||
>
|
||||
{/* Show loader before text appears or while streaming empty content */}
|
||||
{isStreamingAssistant && !messageText ? (
|
||||
<Loader size="default" text="Thinking..." />
|
||||
) : (
|
||||
<div>
|
||||
<Streamdown
|
||||
parseIncompleteMarkdown={true}
|
||||
shikiTheme={["github-light", "github-dark"]}
|
||||
controls={{
|
||||
code: true,
|
||||
table: true,
|
||||
mermaid: true,
|
||||
}}
|
||||
allowedLinkPrefixes={["*"]}
|
||||
allowedImagePrefixes={["*"]}
|
||||
>
|
||||
{messageText}
|
||||
</Streamdown>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</ConversationContent>
|
||||
<ConversationScrollButton />
|
||||
</Conversation>
|
||||
|
||||
{/* Actions for assistant messages */}
|
||||
{message.role === "assistant" &&
|
||||
isLastMessage &&
|
||||
messageText &&
|
||||
status !== "streaming" && (
|
||||
<div className="mt-2 flex justify-start">
|
||||
<Actions className="max-w-[80%]">
|
||||
<Action
|
||||
tooltip="Copy message"
|
||||
label="Copy"
|
||||
onClick={() => {
|
||||
navigator.clipboard.writeText(messageText);
|
||||
toast({
|
||||
title: "Copied",
|
||||
description: "Message copied to clipboard",
|
||||
});
|
||||
}}
|
||||
>
|
||||
<Copy className="h-3 w-3" />
|
||||
</Action>
|
||||
<Action
|
||||
tooltip="Regenerate response"
|
||||
label="Retry"
|
||||
onClick={() => regenerate()}
|
||||
>
|
||||
<RotateCcw className="h-3 w-3" />
|
||||
</Action>
|
||||
</Actions>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
{/* Show loader only if no assistant message exists yet */}
|
||||
{(status === "submitted" || status === "streaming") &&
|
||||
messages.length > 0 &&
|
||||
messages[messages.length - 1].role === "user" && (
|
||||
<div className="flex justify-start">
|
||||
<div className="bg-muted max-w-[80%] rounded-lg px-4 py-2">
|
||||
<Loader size="default" text="Thinking..." />
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="mx-auto w-full px-4 pb-16 md:max-w-3xl md:pb-16">
|
||||
<PromptInput
|
||||
onSubmit={(message) => {
|
||||
if (
|
||||
status === MESSAGE_STATUS.STREAMING ||
|
||||
status === MESSAGE_STATUS.SUBMITTED
|
||||
) {
|
||||
if (status === "streaming" || status === "submitted") {
|
||||
return;
|
||||
}
|
||||
if (message.text?.trim()) {
|
||||
@@ -545,24 +599,20 @@ export const Chat = ({
|
||||
<PromptInputSubmit
|
||||
status={status}
|
||||
type={
|
||||
status === MESSAGE_STATUS.STREAMING ||
|
||||
status === MESSAGE_STATUS.SUBMITTED
|
||||
status === "streaming" || status === "submitted"
|
||||
? "button"
|
||||
: "submit"
|
||||
}
|
||||
onClick={(event) => {
|
||||
if (
|
||||
status === MESSAGE_STATUS.STREAMING ||
|
||||
status === MESSAGE_STATUS.SUBMITTED
|
||||
) {
|
||||
if (status === "streaming" || status === "submitted") {
|
||||
event.preventDefault();
|
||||
stopGeneration();
|
||||
}
|
||||
}}
|
||||
disabled={
|
||||
!uiState.inputValue?.trim() &&
|
||||
status !== MESSAGE_STATUS.STREAMING &&
|
||||
status !== MESSAGE_STATUS.SUBMITTED
|
||||
status !== "streaming" &&
|
||||
status !== "submitted"
|
||||
}
|
||||
/>
|
||||
</PromptInputToolbar>
|
||||
|
||||
@@ -69,7 +69,7 @@ export const refreshModelsInBackground = async (
|
||||
}
|
||||
|
||||
// Wait for task to complete
|
||||
const modelsStatus = await checkTaskStatus(modelsResult.data.id, 40, 2000);
|
||||
const modelsStatus = await checkTaskStatus(modelsResult.data.id);
|
||||
if (!modelsStatus.completed) {
|
||||
throw new Error(modelsStatus.error || "Model refresh failed");
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user