Compare commits

...

6 Commits

Author SHA1 Message Date
Alan Buscaglia
dd747f7474 feat(ui): hide new overview route and filter mongo providers (#9314) 2025-11-25 14:31:43 +01:00
Alan Buscaglia
eaf2721569 fix: add filters for mongo providers and findings (#9311) 2025-11-25 13:21:05 +01:00
Andoni Alonso
d912f05300 docs: refactor Lighthouse AI pages (#9310)
Co-authored-by: Chandrapal Badshah <12944530+Chan9390@users.noreply.github.com>
2025-11-25 13:21:00 +01:00
Chandrapal Badshah
29c1ae2021 docs: Lighthouse multi LLM provider support (#9306)
Co-authored-by: Chandrapal Badshah <12944530+Chan9390@users.noreply.github.com>
Co-authored-by: Andoni A. <14891798+andoniaf@users.noreply.github.com>
2025-11-25 13:20:47 +01:00
Daniel Barranquero
f5b52541ce fix(api): add alter to mongodbatlas migration (#9308) 2025-11-25 11:29:49 +01:00
Prowler Bot
a99a4f8e26 chore(api): Update prowler dependency to v5.14 for release 5.14.0 (#9305)
Co-authored-by: prowler-bot <179230569+prowler-bot@users.noreply.github.com>
2025-11-25 09:50:30 +01:00
51 changed files with 476 additions and 346 deletions

8
api/poetry.lock generated
View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
[[package]]
name = "about-time"
@@ -4705,8 +4705,8 @@ tzlocal = "5.3.1"
[package.source]
type = "git"
url = "https://github.com/prowler-cloud/prowler.git"
reference = "master"
resolved_reference = "de5aba6d4db54eed4c95cb7629443da186c17afd"
reference = "v5.14"
resolved_reference = "3b05a1430e016cee92b60973705cba400255d9e5"
[[package]]
name = "psutil"
@@ -6860,4 +6860,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.11,<3.13"
content-hash = "943e2cd6b87229704550d4e140b36509fb9f58896ebb5834b9fbabe28a9ee92f"
content-hash = "ed4c11443ea6a54da50a62c8c27efbf69608ef9f15894746696169fa010d04c9"

View File

@@ -24,7 +24,7 @@ dependencies = [
"drf-spectacular-jsonapi==0.5.1",
"gunicorn==23.0.0",
"lxml==5.3.2",
"prowler @ git+https://github.com/prowler-cloud/prowler.git@master",
"prowler @ git+https://github.com/prowler-cloud/prowler.git@v5.14",
"psycopg2-binary==2.9.9",
"pytest-celery[redis] (>=1.0.1,<2.0.0)",
"sentry-sdk[django] (>=2.20.0,<3.0.0)",

View File

@@ -22,7 +22,7 @@ class Migration(migrations.Migration):
("kubernetes", "Kubernetes"),
("m365", "M365"),
("github", "GitHub"),
("oci", "Oracle Cloud Infrastructure"),
("oraclecloud", "Oracle Cloud Infrastructure"),
("iac", "IaC"),
],
default="aws",

View File

@@ -29,4 +29,8 @@ class Migration(migrations.Migration):
default="aws",
),
),
migrations.RunSQL(
"ALTER TYPE provider ADD VALUE IF NOT EXISTS 'mongodbatlas';",
reverse_sql=migrations.RunSQL.noop,
),
]

View File

@@ -52,7 +52,7 @@
{
"group": "Prowler Lighthouse AI",
"pages": [
"user-guide/tutorials/prowler-app-lighthouse"
"getting-started/products/prowler-lighthouse-ai"
]
},
{
@@ -109,7 +109,13 @@
"user-guide/tutorials/prowler-app-jira-integration"
]
},
"user-guide/tutorials/prowler-app-lighthouse",
{
"group": "Lighthouse AI",
"pages": [
"user-guide/tutorials/prowler-app-lighthouse",
"user-guide/tutorials/prowler-app-lighthouse-multi-llm"
]
},
"user-guide/tutorials/prowler-cloud-public-ips",
{
"group": "Tutorials",

View File

@@ -0,0 +1,180 @@
---
title: 'Overview'
---
import { VersionBadge } from "/snippets/version-badge.mdx"
<VersionBadge version="5.8.0" />
Prowler Lighthouse AI is a Cloud Security Analyst chatbot that helps you understand, prioritize, and remediate security findings in your cloud environments. It's designed to provide security expertise for teams without dedicated resources, acting as your 24/7 virtual cloud security analyst.
<img src="/images/prowler-app/lighthouse-intro.png" alt="Prowler Lighthouse" />
<Card title="Set Up Lighthouse AI" icon="rocket" href="/user-guide/tutorials/prowler-app-lighthouse#set-up">
Learn how to configure Lighthouse AI with your preferred LLM provider
</Card>
## Capabilities
Prowler Lighthouse AI is designed to be your AI security team member, with capabilities including:
### Natural Language Querying
Ask questions in plain English about your security findings. Examples:
- "What are my highest risk findings?"
- "Show me all S3 buckets with public access."
- "What security issues were found in my production accounts?"
<img src="/images/prowler-app/lighthouse-feature1.png" alt="Natural language querying" />
### Detailed Remediation Guidance
Get tailored step-by-step instructions for fixing security issues:
- Clear explanations of the problem and its impact
- Commands or console steps to implement fixes
- Alternative approaches with different solutions
<img src="/images/prowler-app/lighthouse-feature2.png" alt="Detailed Remediation" />
### Enhanced Context and Analysis
Lighthouse AI can provide additional context to help you understand the findings:
- Explain security concepts related to findings in simple terms
- Provide risk assessments based on your environment and context
- Connect related findings to show broader security patterns
<img src="/images/prowler-app/lighthouse-config.png" alt="Business Context" />
<img src="/images/prowler-app/lighthouse-feature3.png" alt="Contextual Responses" />
## Important Notes
Prowler Lighthouse AI is powerful, but there are limitations:
- **Continuous improvement**: Please report any issues, as the feature may make mistakes or encounter errors, despite extensive testing.
- **Access limitations**: Lighthouse AI can only access data the logged-in user can view. If you can't see certain information, Lighthouse AI can't see it either.
- **NextJS session dependence**: If your Prowler application session expires or logs out, Lighthouse AI will error out. Refresh and log back in to continue.
- **Response quality**: The response quality depends on the selected LLM provider and model. Choose models with strong tool-calling capabilities for best results. We recommend `gpt-5` model from OpenAI.
### Getting Help
If you encounter issues with Prowler Lighthouse AI or have suggestions for improvements, please [reach out through our Slack channel](https://goto.prowler.com/slack).
### What Data Is Shared to LLM Providers?
The following API endpoints are accessible to Prowler Lighthouse AI. Data from the following API endpoints could be shared with LLM provider depending on the scope of user's query:
#### Accessible API Endpoints
**User Management:**
- List all users - `/api/v1/users`
- Retrieve the current user's information - `/api/v1/users/me`
**Provider Management:**
- List all providers - `/api/v1/providers`
- Retrieve data from a provider - `/api/v1/providers/{id}`
**Scan Management:**
- List all scans - `/api/v1/scans`
- Retrieve data from a specific scan - `/api/v1/scans/{id}`
**Resource Management:**
- List all resources - `/api/v1/resources`
- Retrieve data for a resource - `/api/v1/resources/{id}`
**Findings Management:**
- List all findings - `/api/v1/findings`
- Retrieve data from a specific finding - `/api/v1/findings/{id}`
- Retrieve metadata values from findings - `/api/v1/findings/metadata`
**Overview Data:**
- Get aggregated findings data - `/api/v1/overviews/findings`
- Get findings data by severity - `/api/v1/overviews/findings_severity`
- Get aggregated provider data - `/api/v1/overviews/providers`
- Get findings data by service - `/api/v1/overviews/services`
**Compliance Management:**
- List compliance overviews (optionally filter by scan) - `/api/v1/compliance-overviews`
- Retrieve data from a specific compliance overview - `/api/v1/compliance-overviews/{id}`
#### Excluded API Endpoints
Not all Prowler API endpoints are integrated with Lighthouse AI. They are intentionally excluded for the following reasons:
- OpenAI/other LLM providers shouldn't have access to sensitive data (like fetching provider secrets and other sensitive config)
- Users queries don't need responses from those API endpoints (ex: tasks, tenant details, downloading zip file, etc.)
**Excluded Endpoints:**
**User Management:**
- List specific users information - `/api/v1/users/{id}`
- List user memberships - `/api/v1/users/{user_pk}/memberships`
- Retrieve membership data from the user - `/api/v1/users/{user_pk}/memberships/{id}`
**Tenant Management:**
- List all tenants - `/api/v1/tenants`
- Retrieve data from a tenant - `/api/v1/tenants/{id}`
- List tenant memberships - `/api/v1/tenants/{tenant_pk}/memberships`
- List all invitations - `/api/v1/tenants/invitations`
- Retrieve data from tenant invitation - `/api/v1/tenants/invitations/{id}`
**Security and Configuration:**
- List all secrets - `/api/v1/providers/secrets`
- Retrieve data from a secret - `/api/v1/providers/secrets/{id}`
- List all provider groups - `/api/v1/provider-groups`
- Retrieve data from a provider group - `/api/v1/provider-groups/{id}`
**Reports and Tasks:**
- Download zip report - `/api/v1/scans/{v1}/report`
- List all tasks - `/api/v1/tasks`
- Retrieve data from a specific task - `/api/v1/tasks/{id}`
**Lighthouse AI Configuration:**
- List LLM providers - `/api/v1/lighthouse/providers`
- Retrieve LLM provider - `/api/v1/lighthouse/providers/{id}`
- List available models - `/api/v1/lighthouse/models`
- Retrieve tenant configuration - `/api/v1/lighthouse/configuration`
<Note>
Agents only have access to hit GET endpoints. They don't have access to other HTTP methods.
</Note>
## FAQs
**1. Which LLM providers are supported?**
Lighthouse AI supports three providers:
- **OpenAI** - GPT models (GPT-5, GPT-4o, etc.)
- **Amazon Bedrock** - Claude, Llama, Titan, and other models via AWS
- **OpenAI Compatible** - Custom endpoints like OpenRouter, Ollama, or any OpenAI-compatible service
For detailed configuration instructions, see [Using Multiple LLM Providers with Lighthouse](/user-guide/tutorials/prowler-app-lighthouse-multi-llm).
**2. Why a multi-agent supervisor model?**
Context windows are limited. While demo data fits inside the context window, querying real-world data often exceeds it. A multi-agent architecture is used so different agents fetch different sizes of data and respond with the minimum required data to the supervisor. This spreads the context window usage across agents.
**3. Is my security data shared with LLM providers?**
Minimal data is shared to generate useful responses. Agents can access security findings and remediation details when needed. Provider secrets are protected by design and cannot be read. The LLM provider credentials configured with Lighthouse AI are only accessible to our NextJS server and are never sent to the LLM providers. Resource metadata (names, tags, account/project IDs, etc) may be shared with the configured LLM provider based on query requirements.
**4. Can the Lighthouse AI change my cloud environment?**
No. The agent doesn't have the tools to make the changes, even if the configured cloud provider API keys contain permissions to modify resources.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 197 KiB

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 540 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 204 KiB

After

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 241 KiB

After

Width:  |  Height:  |  Size: 147 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 268 KiB

After

Width:  |  Height:  |  Size: 180 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 404 KiB

After

Width:  |  Height:  |  Size: 165 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 347 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 173 KiB

View File

@@ -0,0 +1,128 @@
---
title: 'Using Multiple LLM Providers with Lighthouse'
---
import { VersionBadge } from "/snippets/version-badge.mdx"
<VersionBadge version="5.14.0" />
Prowler Lighthouse AI supports multiple Large Language Model (LLM) providers, offering flexibility to choose the provider that best fits infrastructure, compliance requirements, and cost considerations. This guide explains how to configure and use different LLM providers with Lighthouse AI.
## Supported Providers
Lighthouse AI supports the following LLM providers:
- **OpenAI**: Provides access to GPT models (GPT-4o, GPT-4, etc.)
- **Amazon Bedrock**: Offers AWS-hosted access to Claude, Llama, Titan, and other models
- **OpenAI Compatible**: Supports custom endpoints like OpenRouter, Ollama, or any OpenAI-compatible service
## How Default Providers Work
All three providers can be configured for a tenant, but only one can be set as the default provider. The first configured provider automatically becomes the default.
When visiting Lighthouse AI chat, the default provider's default model loads automatically. Users can switch to any available LLM model (including those from non-default providers) using the dropdown in chat.
<img src="/images/prowler-app/lighthouse-switch-models.png" alt="Switch models in Lighthouse AI chat interface" />
## Configuring Providers
Navigate to **Configuration** → **Lighthouse AI** to see all three provider options with a **Connect** button under each.
<img src="/images/prowler-app/lighthouse-configuration.png" alt="Prowler Lighthouse Configuration" />
### Connecting a Provider
To connect a provider:
1. Click **Connect** under the desired provider
2. Enter the required credentials
3. Select a default model for that provider
4. Click **Connect** to save
### OpenAI
#### Required Information
- **API Key**: OpenAI API key (starts with `sk-` or `sk-proj-`)
<Note>
To generate an OpenAI API key, visit https://platform.openai.com/api-keys
</Note>
### Amazon Bedrock
#### Required Information
- **AWS Access Key ID**: AWS access key ID
- **AWS Secret Access Key**: AWS secret access key
- **AWS Region**: Region where Bedrock is available (e.g., `us-east-1`, `us-west-2`)
#### Required Permissions
The AWS user must have the `AmazonBedrockLimitedAccess` managed policy attached:
```text
arn:aws:iam::aws:policy/AmazonBedrockLimitedAccess
```
<Note>
Currently, only AWS access key and secret key authentication is supported. Amazon Bedrock API key support will be available soon.
</Note>
<Note>
Available models depend on AWS region and account entitlements. Lighthouse AI displays only accessible models.
</Note>
### OpenAI Compatible
Use this option to connect to any LLM provider exposing OpenAI compatible API endpoint (OpenRouter, Ollama, etc.).
#### Required Information
- **API Key**: API key from the compatible service
- **Base URL**: API endpoint URL including the API version (e.g., `https://openrouter.ai/api/v1`)
#### Example: OpenRouter
1. Create an account at [OpenRouter](https://openrouter.ai/)
2. [Generate an API key](https://openrouter.ai/docs/guides/overview/auth/provisioning-api-keys) from the OpenRouter dashboard
3. Configure in Lighthouse AI:
- **API Key**: OpenRouter API key
- **Base URL**: `https://openrouter.ai/api/v1`
## Changing the Default Provider
To set a different provider as default:
1. Navigate to **Configuration** → **Lighthouse AI**
2. Click **Configure** under the provider you want as default
3. Click **Set as Default**
<img src="/images/prowler-app/lighthouse-set-default-provider.png" alt="Set default LLM provider" />
## Updating Provider Credentials
To update credentials for a connected provider:
1. Navigate to **Configuration** → **Lighthouse AI**
2. Click **Configure** under the provider
3. Enter the new credentials
4. Click **Update**
## Deleting a Provider
To remove a configured provider:
1. Navigate to **Configuration** → **Lighthouse AI**
2. Click **Configure** under the provider
3. Click **Delete**
## Model Recommendations
For best results with Lighthouse AI, the recommended model is `gpt-5` from OpenAI.
Models from other providers such as Amazon Bedrock and OpenAI Compatible endpoints can be connected and used, but performance is not guaranteed.
## Getting Help
For issues or suggestions, [reach out through our Slack channel](https://goto.prowler.com/slack).

View File

@@ -1,26 +1,20 @@
---
title: 'Prowler Lighthouse AI'
title: 'How It Works'
---
import { VersionBadge } from "/snippets/version-badge.mdx"
<VersionBadge version="5.8.0" />
Prowler Lighthouse AI is a Cloud Security Analyst chatbot that helps you understand, prioritize, and remediate security findings in your cloud environments. It's designed to provide security expertise for teams without dedicated resources, acting as your 24/7 virtual cloud security analyst.
<img src="/images/prowler-app/lighthouse-intro.png" alt="Prowler Lighthouse" />
## How It Works
Prowler Lighthouse AI uses OpenAI's language models and integrates with your Prowler security findings data.
Prowler Lighthouse AI integrates Large Language Models (LLMs) with Prowler security findings data.
Here's what's happening behind the scenes:
- The system uses a multi-agent architecture built with [LanggraphJS](https://github.com/langchain-ai/langgraphjs) for LLM logic and [Vercel AI SDK UI](https://sdk.vercel.ai/docs/ai-sdk-ui/overview) for frontend chatbot.
- It uses a ["supervisor" architecture](https://langchain-ai.lang.chat/langgraphjs/tutorials/multi_agent/agent_supervisor/) that interacts with different agents for specialized tasks. For example, `findings_agent` can analyze detected security findings, while `overview_agent` provides a summary of connected cloud accounts.
- The system connects to OpenAI models to understand, fetch the right data, and respond to the user's query.
- The system connects to the configured LLM provider to understand user's query, fetches the right data, and responds to the query.
<Note>
Lighthouse AI is tested against `gpt-4o` and `gpt-4o-mini` OpenAI models.
Lighthouse AI supports multiple LLM providers including OpenAI, Amazon Bedrock, and OpenAI-compatible services. For configuration details, see [Using Multiple LLM Providers with Lighthouse](/user-guide/tutorials/prowler-app-lighthouse-multi-llm).
</Note>
- The supervisor agent is the main contact point. It is what users interact with directly from the chat interface. It coordinates with other agents to answer users' questions comprehensively.
@@ -30,16 +24,22 @@ Lighthouse AI is tested against `gpt-4o` and `gpt-4o-mini` OpenAI models.
All agents can only read relevant security data. They cannot modify your data or access sensitive information like configured secrets or tenant details.
</Note>
## Set up
Getting started with Prowler Lighthouse AI is easy:
1. Go to the configuration page in your Prowler dashboard.
2. Enter your OpenAI API key.
3. Select your preferred model. The recommended one for best results is `gpt-4o`.
4. (Optional) Add business context to improve response quality and prioritization.
1. Navigate to **Configuration** → **Lighthouse AI**
2. Click **Connect** under the desired provider (OpenAI, Amazon Bedrock, or OpenAI Compatible)
3. Enter the required credentials
4. Select a default model
5. Click **Connect** to save
<img src="/images/prowler-app/lighthouse-config.png" alt="Lighthouse AI Configuration" />
<Note>
For detailed configuration instructions for each provider, see [Using Multiple LLM Providers with Lighthouse](/user-guide/tutorials/prowler-app-lighthouse-multi-llm).
</Note>
<img src="/images/prowler-app/lighthouse-configuration.png" alt="Lighthouse AI Configuration" />
### Adding Business Context
@@ -51,163 +51,3 @@ The optional business context field lets you provide additional information to h
- Current security initiatives or focus areas
Better context leads to more relevant responses and prioritization that aligns with your needs.
## Capabilities
Prowler Lighthouse AI is designed to be your AI security team member, with capabilities including:
### Natural Language Querying
Ask questions in plain English about your security findings. Examples:
- "What are my highest risk findings?"
- "Show me all S3 buckets with public access."
- "What security issues were found in my production accounts?"
<img src="/images/prowler-app/lighthouse-feature1.png" alt="Natural language querying" />
### Detailed Remediation Guidance
Get tailored step-by-step instructions for fixing security issues:
- Clear explanations of the problem and its impact
- Commands or console steps to implement fixes
- Alternative approaches with different solutions
<img src="/images/prowler-app/lighthouse-feature2.png" alt="Detailed Remediation" />
### Enhanced Context and Analysis
Lighthouse AI can provide additional context to help you understand the findings:
- Explain security concepts related to findings in simple terms
- Provide risk assessments based on your environment and context
- Connect related findings to show broader security patterns
<img src="/images/prowler-app/lighthouse-config.png" alt="Business Context" />
<img src="/images/prowler-app/lighthouse-feature3.png" alt="Contextual Responses" />
## Important Notes
Prowler Lighthouse AI is powerful, but there are limitations:
- **Continuous improvement**: Please report any issues, as the feature may make mistakes or encounter errors, despite extensive testing.
- **Access limitations**: Lighthouse AI can only access data the logged-in user can view. If you can't see certain information, Lighthouse AI can't see it either.
- **NextJS session dependence**: If your Prowler application session expires or logs out, Lighthouse AI will error out. Refresh and log back in to continue.
- **Response quality**: The response quality depends on the selected OpenAI model. For best results, use gpt-4o.
### Getting Help
If you encounter issues with Prowler Lighthouse AI or have suggestions for improvements, please [reach out through our Slack channel](https://goto.prowler.com/slack).
### What Data Is Shared to OpenAI?
The following API endpoints are accessible to Prowler Lighthouse AI. Data from the following API endpoints could be shared with OpenAI depending on the scope of user's query:
#### Accessible API Endpoints
**User Management:**
- List all users - `/api/v1/users`
- Retrieve the current user's information - `/api/v1/users/me`
**Provider Management:**
- List all providers - `/api/v1/providers`
- Retrieve data from a provider - `/api/v1/providers/{id}`
**Scan Management:**
- List all scans - `/api/v1/scans`
- Retrieve data from a specific scan - `/api/v1/scans/{id}`
**Resource Management:**
- List all resources - `/api/v1/resources`
- Retrieve data for a resource - `/api/v1/resources/{id}`
**Findings Management:**
- List all findings - `/api/v1/findings`
- Retrieve data from a specific finding - `/api/v1/findings/{id}`
- Retrieve metadata values from findings - `/api/v1/findings/metadata`
**Overview Data:**
- Get aggregated findings data - `/api/v1/overviews/findings`
- Get findings data by severity - `/api/v1/overviews/findings_severity`
- Get aggregated provider data - `/api/v1/overviews/providers`
- Get findings data by service - `/api/v1/overviews/services`
**Compliance Management:**
- List compliance overviews (optionally filter by scan) - `/api/v1/compliance-overviews`
- Retrieve data from a specific compliance overview - `/api/v1/compliance-overviews/{id}`
#### Excluded API Endpoints
Not all Prowler API endpoints are integrated with Lighthouse AI. They are intentionally excluded for the following reasons:
- OpenAI/other LLM providers shouldn't have access to sensitive data (like fetching provider secrets and other sensitive config)
- Users queries don't need responses from those API endpoints (ex: tasks, tenant details, downloading zip file, etc.)
**Excluded Endpoints:**
**User Management:**
- List specific users information - `/api/v1/users/{id}`
- List user memberships - `/api/v1/users/{user_pk}/memberships`
- Retrieve membership data from the user - `/api/v1/users/{user_pk}/memberships/{id}`
**Tenant Management:**
- List all tenants - `/api/v1/tenants`
- Retrieve data from a tenant - `/api/v1/tenants/{id}`
- List tenant memberships - `/api/v1/tenants/{tenant_pk}/memberships`
- List all invitations - `/api/v1/tenants/invitations`
- Retrieve data from tenant invitation - `/api/v1/tenants/invitations/{id}`
**Security and Configuration:**
- List all secrets - `/api/v1/providers/secrets`
- Retrieve data from a secret - `/api/v1/providers/secrets/{id}`
- List all provider groups - `/api/v1/provider-groups`
- Retrieve data from a provider group - `/api/v1/provider-groups/{id}`
**Reports and Tasks:**
- Download zip report - `/api/v1/scans/{v1}/report`
- List all tasks - `/api/v1/tasks`
- Retrieve data from a specific task - `/api/v1/tasks/{id}`
**Lighthouse AI Configuration:**
- List OpenAI configuration - `/api/v1/lighthouse-config`
- Retrieve OpenAI key and configuration - `/api/v1/lighthouse-config/{id}`
<Note>
Agents only have access to hit GET endpoints. They don't have access to other HTTP methods.
</Note>
## FAQs
**1. Why only OpenAI models?**
During feature development, we evaluated other LLM models.
- **Claude AI** - Claude models have [tier-based ratelimits](https://docs.anthropic.com/en/api/rate-limits#requirements-to-advance-tier). For Lighthouse AI to answer slightly complex questions, there are a handful of API calls to the LLM provider within few seconds. With Claude's tiering system, users must purchase $400 credits or convert their subscription to monthly invoicing after talking to their sales team. This pricing may not suit all Prowler users.
- **Gemini Models** - Gemini lacks a solid tool calling feature like OpenAI. It calls functions recursively until exceeding limits. Gemini-2.5-Pro-Experimental is better than previous models regarding tool calling and responding, but it's still experimental.
- **Deepseek V3** - Doesn't support system prompt messages.
**2. Why a multi-agent supervisor model?**
Context windows are limited. While demo data fits inside the context window, querying real-world data often exceeds it. A multi-agent architecture is used so different agents fetch different sizes of data and respond with the minimum required data to the supervisor. This spreads the context window usage across agents.
**3. Is my security data shared with OpenAI?**
Minimal data is shared to generate useful responses. Agents can access security findings and remediation details when needed. Provider secrets are protected by design and cannot be read. The OpenAI key configured with Lighthouse AI is only accessible to our NextJS server and is never sent to LLMs. Resource metadata (names, tags, account/project IDs, etc) may be shared with OpenAI based on your query requirements.
**4. Can the Lighthouse AI change my cloud environment?**
No. The agent doesn't have the tools to make the changes, even if the configured cloud provider API keys contain permissions to modify resources.

View File

@@ -4,6 +4,61 @@ import { redirect } from "next/navigation";
import { apiBaseUrl, getAuthHeaders } from "@/lib";
import { handleApiResponse } from "@/lib/server-actions-helper";
import { FindingsResponse } from "@/types";
interface IncludedItem {
type: string;
id: string;
attributes?: { provider?: string };
relationships?: { provider?: { data?: { id: string } } };
}
type FindingsApiResponse = FindingsResponse & {
included?: IncludedItem[];
};
const filterMongoFindings = <T extends FindingsApiResponse | null | undefined>(
result: T,
): T => {
if (!result?.data) return result;
const included = (result as FindingsApiResponse).included || [];
// Get IDs of providers containing "mongo" in included items
const mongoProviderIds = new Set(
included
.filter(
(item) =>
item.type === "providers" &&
item.attributes?.provider?.toLowerCase().includes("mongo"),
)
.map((item) => item.id),
);
// Filter out findings associated with mongo providers
result.data = result.data.filter((finding) => {
const scanId = finding.relationships?.scan?.data?.id;
// Find the scan in included items
const scan = included.find(
(item) => item.type === "scans" && item.id === scanId,
);
const providerId = scan?.relationships?.provider?.data?.id;
return !providerId || !mongoProviderIds.has(providerId);
});
// Filter out mongo-related included items
if ((result as FindingsApiResponse).included) {
(result as FindingsApiResponse).included = included.filter(
(item) =>
!(
item.type === "providers" &&
item.attributes?.provider?.toLowerCase().includes("mongo")
),
);
}
return result;
};
export const getFindings = async ({
page = 1,
@@ -33,7 +88,10 @@ export const getFindings = async ({
const findings = await fetch(url.toString(), {
headers,
});
return handleApiResponse(findings);
const result = await handleApiResponse(findings);
return filterMongoFindings(result);
} catch (error) {
console.error("Error fetching findings:", error);
return undefined;
@@ -70,7 +128,10 @@ export const getLatestFindings = async ({
const findings = await fetch(url.toString(), {
headers,
});
return handleApiResponse(findings);
const result = await handleApiResponse(findings);
return filterMongoFindings(result);
} catch (error) {
console.error("Error fetching findings:", error);
return undefined;

View File

@@ -39,7 +39,26 @@ export const getProviders = async ({
headers,
});
return handleApiResponse(response);
const result = (await handleApiResponse(response)) as
| ProvidersApiResponse
| undefined;
if (result?.data) {
// Filter out providers with provider type containing "mongo"
result.data = result.data.filter(
(provider) =>
!provider.attributes?.provider?.toLowerCase().includes("mongo"),
);
// Also filter out mongo-related included items if present
if (result.included) {
result.included = result.included.filter(
(item) => !item.type.toLowerCase().includes("mongo"),
);
}
}
return result;
} catch (error) {
console.error("Error fetching providers:", error);
return undefined;

View File

@@ -9,6 +9,46 @@ import {
} from "@/lib/compliance/compliance-report-types";
import { addScanOperation } from "@/lib/sentry-breadcrumbs";
import { handleApiError, handleApiResponse } from "@/lib/server-actions-helper";
import { ScansApiResponse } from "@/types";
const filterMongoScans = (result: ScansApiResponse | undefined) => {
if (!result?.data) return result;
const included = result.included || [];
// Get IDs of providers containing "mongo"
const mongoProviderIds = new Set(
included
.filter(
(item) =>
item.type === "providers" &&
item.attributes?.provider?.toLowerCase().includes("mongo"),
)
.map((item) => item.id),
);
// If no mongo providers found, return as-is
if (mongoProviderIds.size === 0) return result;
// Filter out scans associated with mongo providers
result.data = result.data.filter((scan) => {
const providerId = scan.relationships?.provider?.data?.id;
return !providerId || !mongoProviderIds.has(providerId);
});
// Filter out mongo-related included items
if (result.included) {
result.included = included.filter(
(item) =>
!(
item.type === "providers" &&
item.attributes?.provider?.toLowerCase().includes("mongo")
),
);
}
return result;
};
export const getScans = async ({
page = 1,
@@ -44,7 +84,10 @@ export const getScans = async ({
try {
const response = await fetch(url.toString(), { headers });
return handleApiResponse(response);
const result = await handleApiResponse(response);
// Filter out mongo-related scans when provider is included
return filterMongoScans(result);
} catch (error) {
console.error("Error fetching scans:", error);
return undefined;

View File

@@ -1,5 +1,5 @@
import { Spacer } from "@heroui/spacer";
import React, { Suspense } from "react";
import { Suspense } from "react";
import {
getFindings,
@@ -78,7 +78,7 @@ export default async function Findings({
completedScans?.map((scan: ScanProps) => scan.id) || [];
const scanDetails = createScanDetailsMapping(
completedScans,
completedScans || [],
providersData,
) as { [uid: string]: ScanEntity }[];

View File

@@ -12,18 +12,18 @@ import { createDict } from "@/lib/helper";
import { FindingProps, SearchParamsProps } from "@/types";
import { LighthouseBanner } from "../../components/lighthouse/banner";
import { AccountsSelector } from "./new-overview/components/accounts-selector";
import { CheckFindingsSSR } from "./new-overview/components/check-findings";
import { ProviderTypeSelector } from "./new-overview/components/provider-type-selector";
import { AccountsSelector } from "./_new-overview/components/accounts-selector";
import { CheckFindingsSSR } from "./_new-overview/components/check-findings";
import { ProviderTypeSelector } from "./_new-overview/components/provider-type-selector";
import {
RiskSeverityChartSkeleton,
RiskSeverityChartSSR,
} from "./new-overview/components/risk-severity-chart";
import { StatusChartSkeleton } from "./new-overview/components/status-chart";
} from "./_new-overview/components/risk-severity-chart";
import { StatusChartSkeleton } from "./_new-overview/components/status-chart";
import {
ThreatScoreSkeleton,
ThreatScoreSSR,
} from "./new-overview/components/threat-score";
} from "./_new-overview/components/threat-score";
const FILTER_PREFIX = "filter[";

View File

@@ -1,151 +0,0 @@
#!/bin/bash
# Prowler UI - Pre-Commit Hook
# Optionally validates ONLY staged files against AGENTS.md standards using Claude Code
# Controlled by CODE_REVIEW_ENABLED in .env
set -e
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🚀 Prowler UI - Pre-Commit Hook"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Load .env file (look in git root directory)
GIT_ROOT=$(git rev-parse --show-toplevel)
if [ -f "$GIT_ROOT/ui/.env" ]; then
CODE_REVIEW_ENABLED=$(grep "^CODE_REVIEW_ENABLED" "$GIT_ROOT/ui/.env" | cut -d'=' -f2 | tr -d ' ')
elif [ -f "$GIT_ROOT/.env" ]; then
CODE_REVIEW_ENABLED=$(grep "^CODE_REVIEW_ENABLED" "$GIT_ROOT/.env" | cut -d'=' -f2 | tr -d ' ')
elif [ -f ".env" ]; then
CODE_REVIEW_ENABLED=$(grep "^CODE_REVIEW_ENABLED" .env | cut -d'=' -f2 | tr -d ' ')
else
CODE_REVIEW_ENABLED="false"
fi
# Normalize the value to lowercase
CODE_REVIEW_ENABLED=$(echo "$CODE_REVIEW_ENABLED" | tr '[:upper:]' '[:lower:]')
echo -e "${BLUE} Code Review Status: ${CODE_REVIEW_ENABLED}${NC}"
echo ""
# Get staged files (what will be committed)
STAGED_FILES=$(git diff --cached --name-only --diff-filter=ACM | grep -E '\.(tsx?|jsx?)$' || true)
if [ "$CODE_REVIEW_ENABLED" = "true" ]; then
if [ -z "$STAGED_FILES" ]; then
echo -e "${YELLOW}⚠️ No TypeScript/JavaScript files staged to validate${NC}"
echo ""
else
echo -e "${YELLOW}🔍 Running Claude Code standards validation...${NC}"
echo ""
echo -e "${BLUE}📋 Files to validate:${NC}"
echo "$STAGED_FILES" | sed 's/^/ - /'
echo ""
echo -e "${BLUE}📤 Sending to Claude Code for validation...${NC}"
echo ""
# Build prompt with git diff of changes AND full context
VALIDATION_PROMPT=$(
cat <<'PROMPT_EOF'
You are a code reviewer for the Prowler UI project. Analyze the code changes (git diff with full context) below and validate they comply with AGENTS.md standards.
**CRITICAL: You MUST check BOTH the changed lines AND the surrounding context for violations.**
**RULES TO CHECK:**
1. React Imports: NO `import * as React` or `import React, {` → Use `import { useState }`
2. TypeScript: NO union types like `type X = "a" | "b"` → Use const-based: `const X = {...} as const`
3. Tailwind: NO `var()` or hex colors in className → Use Tailwind utilities and semantic color classes (e.g., `bg-bg-neutral-tertiary`, `border-border-neutral-primary`)
4. cn(): Use for merging multiple classes or for conditionals (handles Tailwind conflicts with twMerge) → `cn(BUTTON_STYLES.base, BUTTON_STYLES.active, isLoading && "opacity-50")`
5. React 19: NO `useMemo`/`useCallback` without reason
6. Zod v4: Use `.min(1)` not `.nonempty()`, `z.email()` not `z.string().email()`. All inputs must be validated with Zod.
7. File Org: 1 feature = local, 2+ features = shared
8. Directives: Server Actions need "use server", clients need "use client"
9. Implement DRY, KISS principles. (example: reusable components, avoid repetition)
10. Layout must work for all the responsive breakpoints (mobile, tablet, desktop)
11. ANY types cannot be used - CRITICAL: Check for `: any` in all visible lines
12. Use the components inside components/shadcn if possible
13. Check Accessibility best practices (like alt tags in images, semantic HTML, Aria labels, etc.)
=== GIT DIFF WITH CONTEXT ===
PROMPT_EOF
)
# Add git diff to prompt with more context (U5 = 5 lines before/after)
VALIDATION_PROMPT="$VALIDATION_PROMPT
$(git diff --cached -U5)"
VALIDATION_PROMPT="$VALIDATION_PROMPT
=== END DIFF ===
**IMPORTANT: Your response MUST start with exactly one of these lines:**
STATUS: PASSED
STATUS: FAILED
**If FAILED:** List each violation with File, Line Number, Rule Number, and Issue.
**If PASSED:** Confirm all visible code (including context) complies with AGENTS.md standards.
**Start your response now with STATUS:**"
# Send to Claude Code
if VALIDATION_OUTPUT=$(echo "$VALIDATION_PROMPT" | claude 2>&1); then
echo "$VALIDATION_OUTPUT"
echo ""
# Check result - STRICT MODE: fail if status unclear
if echo "$VALIDATION_OUTPUT" | grep -q "^STATUS: PASSED"; then
echo ""
echo -e "${GREEN}✅ VALIDATION PASSED${NC}"
echo ""
elif echo "$VALIDATION_OUTPUT" | grep -q "^STATUS: FAILED"; then
echo ""
echo -e "${RED}❌ VALIDATION FAILED${NC}"
echo -e "${RED}Fix violations before committing${NC}"
echo ""
exit 1
else
echo ""
echo -e "${RED}❌ VALIDATION ERROR${NC}"
echo -e "${RED}Could not determine validation status from Claude Code response${NC}"
echo -e "${YELLOW}Response must start with 'STATUS: PASSED' or 'STATUS: FAILED'${NC}"
echo ""
echo -e "${YELLOW}To bypass validation temporarily, set CODE_REVIEW_ENABLED=false in .env${NC}"
echo ""
exit 1
fi
else
echo -e "${YELLOW}⚠️ Claude Code not available${NC}"
fi
echo ""
fi
else
echo -e "${YELLOW}⏭️ Code review disabled (CODE_REVIEW_ENABLED=false)${NC}"
echo ""
fi
# Run healthcheck (typecheck and lint check)
echo -e "${BLUE}🏥 Running healthcheck...${NC}"
echo ""
cd ui || cd .
if npm run healthcheck; then
echo ""
echo -e "${GREEN}✅ Healthcheck passed${NC}"
echo ""
else
echo ""
echo -e "${RED}❌ Healthcheck failed${NC}"
echo -e "${RED}Fix type errors and linting issues before committing${NC}"
echo ""
exit 1
fi