mirror of
https://github.com/prowler-cloud/prowler.git
synced 2026-02-08 04:57:03 +00:00
Compare commits
19 Commits
chore-api-
...
PRWLR-7212
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bf91c5779 | ||
|
|
0cda7716ce | ||
|
|
e4ab6d3589 | ||
|
|
82289f28bb | ||
|
|
a5b59bc346 | ||
|
|
78e59fbf78 | ||
|
|
2aa506db92 | ||
|
|
e9dbf58ec5 | ||
|
|
5b7bf307d4 | ||
|
|
c3e50e3600 | ||
|
|
a1a8b9e17e | ||
|
|
c3fdfddff0 | ||
|
|
78dcddee07 | ||
|
|
11649f227a | ||
|
|
3bb3edc0bb | ||
|
|
47cf5758f5 | ||
|
|
5bd9214e4f | ||
|
|
fe358d08e9 | ||
|
|
2caf001e21 |
82
ui/actions/lighthouse/findings.ts
Normal file
82
ui/actions/lighthouse/findings.ts
Normal file
@@ -0,0 +1,82 @@
|
||||
"use server";
|
||||
|
||||
import { apiBaseUrl, getAuthHeaders, parseStringify } from "@/lib";
|
||||
|
||||
export const getLighthouseFindings = async ({
|
||||
page = 1,
|
||||
pageSize = 10,
|
||||
query = "",
|
||||
sort = "",
|
||||
filters = {},
|
||||
fields = [],
|
||||
}) => {
|
||||
const headers = await getAuthHeaders({ contentType: false });
|
||||
|
||||
// For lighthouse usage, handle invalid page numbers by defaulting to 1
|
||||
const validPage = isNaN(Number(page)) || page < 1 ? 1 : page;
|
||||
|
||||
const url = new URL(`${apiBaseUrl}/findings`);
|
||||
|
||||
if (validPage) url.searchParams.append("page[number]", validPage.toString());
|
||||
if (pageSize) url.searchParams.append("page[size]", pageSize.toString());
|
||||
|
||||
if (query) url.searchParams.append("filter[search]", query);
|
||||
if (sort) url.searchParams.append("sort", sort);
|
||||
if (fields.length > 0) {
|
||||
url.searchParams.append("fields[findings]", fields.join(","));
|
||||
}
|
||||
|
||||
Object.entries(filters).forEach(([key, value]) => {
|
||||
url.searchParams.append(key, String(value));
|
||||
});
|
||||
|
||||
try {
|
||||
const findings = await fetch(url.toString(), {
|
||||
headers,
|
||||
});
|
||||
const data = await findings.json();
|
||||
const parsedData = parseStringify(data);
|
||||
return parsedData;
|
||||
} catch (error) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error("Error fetching lighthouse findings:", error);
|
||||
return undefined;
|
||||
}
|
||||
};
|
||||
|
||||
export const getLighthouseLatestFindings = async ({
|
||||
page = 1,
|
||||
pageSize = 10,
|
||||
query = "",
|
||||
sort = "",
|
||||
filters = {},
|
||||
}) => {
|
||||
const headers = await getAuthHeaders({ contentType: false });
|
||||
|
||||
const validPage = isNaN(Number(page)) || page < 1 ? 1 : page;
|
||||
|
||||
const url = new URL(`${apiBaseUrl}/findings/latest`);
|
||||
|
||||
if (validPage) url.searchParams.append("page[number]", validPage.toString());
|
||||
if (pageSize) url.searchParams.append("page[size]", pageSize.toString());
|
||||
|
||||
if (query) url.searchParams.append("filter[search]", query);
|
||||
if (sort) url.searchParams.append("sort", sort);
|
||||
|
||||
Object.entries(filters).forEach(([key, value]) => {
|
||||
url.searchParams.append(key, String(value));
|
||||
});
|
||||
|
||||
try {
|
||||
const findings = await fetch(url.toString(), {
|
||||
headers,
|
||||
});
|
||||
const data = await findings.json();
|
||||
const parsedData = parseStringify(data);
|
||||
return parsedData;
|
||||
} catch (error) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error("Error fetching lighthouse latest findings:", error);
|
||||
return undefined;
|
||||
}
|
||||
};
|
||||
@@ -2,16 +2,55 @@ import { getLighthouseConfig } from "@/actions/lighthouse/lighthouse";
|
||||
import { LighthouseIcon } from "@/components/icons/Icons";
|
||||
import { Chat } from "@/components/lighthouse";
|
||||
import { ContentLayout } from "@/components/ui";
|
||||
import { CacheService } from "@/lib/lighthouse/cache";
|
||||
import { suggestedActions } from "@/lib/lighthouse/suggested-actions";
|
||||
|
||||
export default async function AIChatbot() {
|
||||
interface LighthousePageProps {
|
||||
searchParams: { cachedMessage?: string };
|
||||
}
|
||||
|
||||
export default async function AIChatbot({ searchParams }: LighthousePageProps) {
|
||||
const config = await getLighthouseConfig();
|
||||
|
||||
const hasConfig = !!config;
|
||||
const isActive = config?.attributes?.is_active ?? false;
|
||||
|
||||
// Fetch cached content if a cached message type is specified
|
||||
let cachedContent = null;
|
||||
if (searchParams.cachedMessage) {
|
||||
const cached = await CacheService.getCachedMessage(
|
||||
searchParams.cachedMessage,
|
||||
);
|
||||
cachedContent = cached.success ? cached.data : null;
|
||||
}
|
||||
|
||||
// Pre-fetch all question answers and processing status
|
||||
const isProcessing = await CacheService.isRecommendationProcessing();
|
||||
const questionAnswers: Record<string, string> = {};
|
||||
|
||||
if (!isProcessing) {
|
||||
for (const action of suggestedActions) {
|
||||
if (action.questionRef) {
|
||||
const cached = await CacheService.getCachedMessage(
|
||||
`question_${action.questionRef}`,
|
||||
);
|
||||
if (cached.success && cached.data) {
|
||||
questionAnswers[action.questionRef] = cached.data;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<ContentLayout title="Lighthouse AI" icon={<LighthouseIcon />}>
|
||||
<Chat hasConfig={hasConfig} isActive={isActive} />
|
||||
<Chat
|
||||
hasConfig={hasConfig}
|
||||
isActive={isActive}
|
||||
cachedContent={cachedContent}
|
||||
messageType={searchParams.cachedMessage}
|
||||
isProcessing={isProcessing}
|
||||
questionAnswers={questionAnswers}
|
||||
/>
|
||||
</ContentLayout>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -154,7 +154,6 @@ const SSRDataNewFindingsTable = async ({
|
||||
sort,
|
||||
filters: combinedFilters,
|
||||
});
|
||||
|
||||
// Create dictionaries for resources, scans, and providers
|
||||
const resourceDict = createDict("resources", findingsData);
|
||||
const scanDict = createDict("scans", findingsData);
|
||||
|
||||
@@ -2,6 +2,7 @@ import { Bot } from "lucide-react";
|
||||
import Link from "next/link";
|
||||
|
||||
import { getLighthouseConfig } from "@/actions/lighthouse/lighthouse";
|
||||
import { CacheService, initializeTenantCache } from "@/lib/lighthouse/cache";
|
||||
|
||||
interface BannerConfig {
|
||||
message: string;
|
||||
@@ -26,6 +27,11 @@ const renderBanner = ({ message, href, gradient }: BannerConfig) => (
|
||||
</Link>
|
||||
);
|
||||
|
||||
// Triggers a background job to process the scans and generate recommendations
|
||||
// Immediately returns a banner with different content
|
||||
// If Lighthouse is not configured, returns a banner with a different message
|
||||
// If cache available, returns a banner with the cached recommendation
|
||||
// If recommendation is being processed, returns a banner with a different message
|
||||
export const LighthouseBanner = async () => {
|
||||
try {
|
||||
const lighthouseConfig = await getLighthouseConfig();
|
||||
@@ -37,14 +43,48 @@ export const LighthouseBanner = async () => {
|
||||
gradient:
|
||||
"bg-gradient-to-r from-green-500 to-blue-500 hover:from-green-600 hover:to-blue-600 focus:ring-green-500/50 dark:from-green-600 dark:to-blue-600 dark:hover:from-green-700 dark:hover:to-blue-700 dark:focus:ring-green-400/50",
|
||||
});
|
||||
} else {
|
||||
}
|
||||
|
||||
// If lighthouse is active, trigger background job to process the scans and generate recommendations
|
||||
if (lighthouseConfig.attributes.is_active) {
|
||||
await initializeTenantCache();
|
||||
}
|
||||
|
||||
// Check if recommendation is being processed
|
||||
const isProcessing = await CacheService.isRecommendationProcessing();
|
||||
|
||||
if (isProcessing) {
|
||||
return renderBanner({
|
||||
message: "Use Lighthouse to review your findings and gain insights",
|
||||
href: "/lighthouse",
|
||||
message: "Lighthouse is reviewing your findings for insights",
|
||||
href: "",
|
||||
gradient:
|
||||
"bg-gradient-to-r from-green-500 to-blue-500 hover:from-green-600 hover:to-blue-600 focus:ring-green-500/50 dark:from-green-600 dark:to-blue-600 dark:hover:from-green-700 dark:hover:to-blue-700 dark:focus:ring-green-400/50",
|
||||
"bg-gradient-to-r from-orange-500 to-yellow-500 hover:from-orange-600 hover:to-yellow-600 focus:ring-orange-500/50 dark:from-orange-600 dark:to-yellow-600 dark:hover:from-orange-700 dark:hover:to-yellow-700 dark:focus:ring-orange-400/50",
|
||||
});
|
||||
}
|
||||
|
||||
// Check if recommendation exists
|
||||
const cachedRecommendations = await CacheService.getRecommendations();
|
||||
|
||||
if (
|
||||
cachedRecommendations.success &&
|
||||
cachedRecommendations.data &&
|
||||
cachedRecommendations.data.trim().length > 0
|
||||
) {
|
||||
return renderBanner({
|
||||
message: cachedRecommendations.data,
|
||||
href: "/lighthouse?cachedMessage=recommendation",
|
||||
gradient:
|
||||
"bg-gradient-to-r from-blue-500 to-purple-600 hover:from-blue-600 hover:to-purple-700 focus:ring-blue-500/50 dark:from-blue-600 dark:to-purple-700 dark:hover:from-blue-700 dark:hover:to-purple-800 dark:focus:ring-blue-400/50",
|
||||
});
|
||||
}
|
||||
|
||||
// Lighthouse configured but no recommendation and not processing - don't show banner
|
||||
return renderBanner({
|
||||
message: "Use Lighthouse to review your findings and gain insights",
|
||||
href: "/lighthouse",
|
||||
gradient:
|
||||
"bg-gradient-to-r from-green-500 to-blue-500 hover:from-green-600 hover:to-blue-600 focus:ring-green-500/50 dark:from-green-600 dark:to-blue-600 dark:hover:from-green-700 dark:hover:to-blue-700 dark:focus:ring-green-400/50",
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Error getting banner state:", error);
|
||||
return null;
|
||||
|
||||
@@ -1,30 +1,39 @@
|
||||
"use client";
|
||||
|
||||
import { useChat } from "@ai-sdk/react";
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { useCallback, useEffect, useRef, useState } from "react";
|
||||
import { useForm } from "react-hook-form";
|
||||
|
||||
import { MemoizedMarkdown } from "@/components/lighthouse/memoized-markdown";
|
||||
import { CustomButton, CustomTextarea } from "@/components/ui/custom";
|
||||
import { CustomLink } from "@/components/ui/custom/custom-link";
|
||||
import { Form } from "@/components/ui/form";
|
||||
|
||||
interface SuggestedAction {
|
||||
title: string;
|
||||
label: string;
|
||||
action: string;
|
||||
}
|
||||
import {
|
||||
SuggestedAction,
|
||||
suggestedActions,
|
||||
} from "@/lib/lighthouse/suggested-actions";
|
||||
|
||||
interface ChatProps {
|
||||
hasConfig: boolean;
|
||||
isActive: boolean;
|
||||
cachedContent?: string | null;
|
||||
messageType?: string;
|
||||
isProcessing: boolean;
|
||||
questionAnswers: Record<string, string>;
|
||||
}
|
||||
|
||||
interface ChatFormData {
|
||||
message: string;
|
||||
}
|
||||
|
||||
export const Chat = ({ hasConfig, isActive }: ChatProps) => {
|
||||
export const Chat = ({
|
||||
hasConfig,
|
||||
isActive,
|
||||
cachedContent,
|
||||
messageType,
|
||||
isProcessing,
|
||||
questionAnswers,
|
||||
}: ChatProps) => {
|
||||
const [errorMessage, setErrorMessage] = useState<string | null>(null);
|
||||
|
||||
const {
|
||||
@@ -74,6 +83,13 @@ export const Chat = ({ hasConfig, isActive }: ChatProps) => {
|
||||
},
|
||||
});
|
||||
|
||||
// State for cached response streaming simulation
|
||||
const [isStreamingCached, setIsStreamingCached] = useState(false);
|
||||
const [streamingMessageId, setStreamingMessageId] = useState<string | null>(
|
||||
null,
|
||||
);
|
||||
const [currentStreamText, setCurrentStreamText] = useState("");
|
||||
|
||||
const form = useForm<ChatFormData>({
|
||||
defaultValues: {
|
||||
message: "",
|
||||
@@ -108,6 +124,149 @@ export const Chat = ({ hasConfig, isActive }: ChatProps) => {
|
||||
}
|
||||
}, [errorMessage, form, setMessages]);
|
||||
|
||||
// Function to simulate streaming text
|
||||
const simulateStreaming = useCallback(
|
||||
async (text: string, messageId: string) => {
|
||||
setIsStreamingCached(true);
|
||||
setStreamingMessageId(messageId);
|
||||
setCurrentStreamText("");
|
||||
|
||||
// Stream word by word with realistic delays
|
||||
const words = text.split(" ");
|
||||
let currentText = "";
|
||||
|
||||
for (let i = 0; i < words.length; i++) {
|
||||
currentText += (i > 0 ? " " : "") + words[i];
|
||||
setCurrentStreamText(currentText);
|
||||
|
||||
// Shorter delay between words for faster streaming
|
||||
const delay = Math.random() * 80 + 40; // 40-120ms delay per word
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
}
|
||||
|
||||
setIsStreamingCached(false);
|
||||
setStreamingMessageId(null);
|
||||
setCurrentStreamText("");
|
||||
},
|
||||
[],
|
||||
);
|
||||
|
||||
// Function to handle cached response for suggested actions
|
||||
const handleCachedResponse = useCallback(
|
||||
async (action: SuggestedAction) => {
|
||||
if (!action.questionRef) {
|
||||
// No question ref, use normal flow
|
||||
append({
|
||||
role: "user",
|
||||
content: action.action,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
if (isProcessing) {
|
||||
// Processing in progress, fallback to real-time LLM
|
||||
append({
|
||||
role: "user",
|
||||
content: action.action,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if we have cached answer
|
||||
const cachedAnswer = questionAnswers[action.questionRef];
|
||||
|
||||
if (cachedAnswer) {
|
||||
// Cache hit - use cached content with streaming simulation
|
||||
const userMessageId = `user-cached-${Date.now()}`;
|
||||
const assistantMessageId = `assistant-cached-${Date.now()}`;
|
||||
|
||||
const userMessage = {
|
||||
id: userMessageId,
|
||||
role: "user" as const,
|
||||
content: action.action,
|
||||
};
|
||||
|
||||
const assistantMessage = {
|
||||
id: assistantMessageId,
|
||||
role: "assistant" as const,
|
||||
content: "",
|
||||
};
|
||||
|
||||
const updatedMessages = [...messages, userMessage, assistantMessage];
|
||||
setMessages(updatedMessages);
|
||||
|
||||
// Start streaming simulation
|
||||
setTimeout(() => {
|
||||
simulateStreaming(cachedAnswer, assistantMessageId);
|
||||
}, 300);
|
||||
} else {
|
||||
// Cache miss/expired/error - fallback to real-time LLM
|
||||
append({
|
||||
role: "user",
|
||||
content: action.action,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error handling cached response:", error);
|
||||
// Fall back to normal API flow
|
||||
append({
|
||||
role: "user",
|
||||
content: action.action,
|
||||
});
|
||||
}
|
||||
},
|
||||
[
|
||||
messages,
|
||||
setMessages,
|
||||
append,
|
||||
simulateStreaming,
|
||||
isProcessing,
|
||||
questionAnswers,
|
||||
],
|
||||
);
|
||||
|
||||
// Load cached message on mount if cachedContent is provided
|
||||
useEffect(() => {
|
||||
const loadCachedMessage = () => {
|
||||
if (cachedContent && messages.length === 0) {
|
||||
// Create different user questions based on message type
|
||||
let userQuestion = "Tell me more about this";
|
||||
|
||||
if (messageType === "recommendation") {
|
||||
userQuestion =
|
||||
"Tell me more about the security issues Lighthouse found";
|
||||
}
|
||||
// Future: handle other message types
|
||||
// else if (messageType === "question_1") {
|
||||
// userQuestion = "Previously cached question here";
|
||||
// }
|
||||
|
||||
// Create message IDs
|
||||
const userMessageId = `user-cached-${messageType}-${Date.now()}`;
|
||||
const assistantMessageId = `assistant-cached-${messageType}-${Date.now()}`;
|
||||
|
||||
// Add user message
|
||||
const userMessage = {
|
||||
id: userMessageId,
|
||||
role: "user" as const,
|
||||
content: userQuestion,
|
||||
};
|
||||
|
||||
// Add assistant message with the cached content
|
||||
const assistantMessage = {
|
||||
id: assistantMessageId,
|
||||
role: "assistant" as const,
|
||||
content: cachedContent,
|
||||
};
|
||||
|
||||
setMessages([userMessage, assistantMessage]);
|
||||
}
|
||||
};
|
||||
|
||||
loadCachedMessage();
|
||||
}, [cachedContent, messageType, messages.length, setMessages]);
|
||||
|
||||
// Sync form value with chat input
|
||||
useEffect(() => {
|
||||
const syntheticEvent = {
|
||||
@@ -146,6 +305,19 @@ export const Chat = ({ hasConfig, isActive }: ChatProps) => {
|
||||
return () => document.removeEventListener("keydown", handleKeyDown);
|
||||
}, [messageValue, onFormSubmit]);
|
||||
|
||||
// Update assistant message content during streaming simulation
|
||||
useEffect(() => {
|
||||
if (isStreamingCached && streamingMessageId && currentStreamText) {
|
||||
setMessages((prevMessages) =>
|
||||
prevMessages.map((msg) =>
|
||||
msg.id === streamingMessageId
|
||||
? { ...msg, content: currentStreamText }
|
||||
: msg,
|
||||
),
|
||||
);
|
||||
}
|
||||
}, [currentStreamText, isStreamingCached, streamingMessageId, setMessages]);
|
||||
|
||||
useEffect(() => {
|
||||
if (messagesContainerRef.current && latestUserMsgRef.current) {
|
||||
const container = messagesContainerRef.current;
|
||||
@@ -156,30 +328,6 @@ export const Chat = ({ hasConfig, isActive }: ChatProps) => {
|
||||
}
|
||||
}, [messages]);
|
||||
|
||||
const suggestedActions: SuggestedAction[] = [
|
||||
{
|
||||
title: "Are there any exposed S3",
|
||||
label: "buckets in my AWS accounts?",
|
||||
action: "List exposed S3 buckets in my AWS accounts",
|
||||
},
|
||||
{
|
||||
title: "What is the risk of having",
|
||||
label: "RDS databases unencrypted?",
|
||||
action: "What is the risk of having RDS databases unencrypted?",
|
||||
},
|
||||
{
|
||||
title: "What is the CIS 1.10 compliance status",
|
||||
label: "of my Kubernetes cluster?",
|
||||
action:
|
||||
"What is the CIS 1.10 compliance status of my Kubernetes cluster?",
|
||||
},
|
||||
{
|
||||
title: "List my highest privileged",
|
||||
label: "AWS IAM users with full admin access?",
|
||||
action: "List my highest privileged AWS IAM users with full admin access",
|
||||
},
|
||||
];
|
||||
|
||||
// Determine if chat should be disabled
|
||||
const shouldDisableChat = !hasConfig || !isActive;
|
||||
|
||||
@@ -267,10 +415,7 @@ export const Chat = ({ hasConfig, isActive }: ChatProps) => {
|
||||
key={`suggested-action-${index}`}
|
||||
ariaLabel={`Send message: ${action.action}`}
|
||||
onPress={() => {
|
||||
append({
|
||||
role: "user",
|
||||
content: action.action,
|
||||
});
|
||||
handleCachedResponse(action); // Use cached response handler
|
||||
}}
|
||||
className="hover:bg-muted flex h-auto w-full flex-col items-start justify-start rounded-xl border bg-gray-50 px-4 py-3.5 text-left font-sans text-sm dark:bg-gray-900"
|
||||
>
|
||||
@@ -320,10 +465,12 @@ export const Chat = ({ hasConfig, isActive }: ChatProps) => {
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
{status === "submitted" && (
|
||||
{(status === "submitted" || isStreamingCached) && (
|
||||
<div className="flex justify-start">
|
||||
<div className="bg-muted max-w-[80%] rounded-lg px-4 py-2">
|
||||
<div className="animate-pulse">Thinking...</div>
|
||||
<div className="animate-pulse">
|
||||
{isStreamingCached ? "" : "Thinking..."}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
@@ -358,10 +505,18 @@ export const Chat = ({ hasConfig, isActive }: ChatProps) => {
|
||||
ariaLabel={
|
||||
status === "submitted" ? "Stop generation" : "Send message"
|
||||
}
|
||||
isDisabled={status === "submitted" || !messageValue?.trim()}
|
||||
isDisabled={
|
||||
status === "submitted" ||
|
||||
isStreamingCached ||
|
||||
!messageValue?.trim()
|
||||
}
|
||||
className="flex h-10 w-10 flex-shrink-0 items-center justify-center rounded-lg bg-primary p-2 text-primary-foreground hover:bg-primary/90 disabled:opacity-50 dark:bg-primary/90"
|
||||
>
|
||||
{status === "submitted" ? <span>■</span> : <span>➤</span>}
|
||||
{status === "submitted" || isStreamingCached ? (
|
||||
<span>■</span>
|
||||
) : (
|
||||
<span>➤</span>
|
||||
)}
|
||||
</CustomButton>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
435
ui/lib/lighthouse/cache.ts
Normal file
435
ui/lib/lighthouse/cache.ts
Normal file
@@ -0,0 +1,435 @@
|
||||
import Valkey from "iovalkey";
|
||||
|
||||
import { auth } from "@/auth.config";
|
||||
|
||||
import {
|
||||
generateBannerFromDetailed,
|
||||
generateDetailedRecommendation,
|
||||
generateQuestionAnswers,
|
||||
} from "./recommendations";
|
||||
import { suggestedActions } from "./suggested-actions";
|
||||
import {
|
||||
compareProcessedScanIds,
|
||||
generateSecurityScanSummary,
|
||||
getLatestCompletedScansPerProvider,
|
||||
} from "./summary";
|
||||
|
||||
let valkeyClient: Valkey | null = null;
|
||||
|
||||
export async function getValkeyClient(): Promise<Valkey> {
|
||||
if (!valkeyClient) {
|
||||
valkeyClient = new Valkey({
|
||||
host: process.env.VALKEY_HOST,
|
||||
port: parseInt(process.env.VALKEY_PORT || "6379"),
|
||||
connectTimeout: 5000,
|
||||
lazyConnect: true,
|
||||
});
|
||||
}
|
||||
|
||||
return valkeyClient;
|
||||
}
|
||||
|
||||
export class CacheService {
|
||||
private static async getTenantId(): Promise<string | null> {
|
||||
const session = await auth();
|
||||
return session?.tenantId || null;
|
||||
}
|
||||
|
||||
private static async acquireProcessingLock(
|
||||
tenantId: string,
|
||||
lockKey: string,
|
||||
lockTtlSeconds: number = 300,
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
const client = await getValkeyClient();
|
||||
const fullLockKey = `_lighthouse:${tenantId}:lock:${lockKey}`;
|
||||
|
||||
const result = await client.set(
|
||||
fullLockKey,
|
||||
Date.now().toString(),
|
||||
"EX",
|
||||
lockTtlSeconds,
|
||||
"NX",
|
||||
);
|
||||
|
||||
return result === "OK";
|
||||
} catch (error) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private static async releaseProcessingLock(
|
||||
tenantId: string,
|
||||
lockKey: string,
|
||||
): Promise<void> {
|
||||
try {
|
||||
const client = await getValkeyClient();
|
||||
const fullLockKey = `_lighthouse:${tenantId}:lock:${lockKey}`;
|
||||
await client.del([fullLockKey]);
|
||||
} catch (error) {
|
||||
// Silent failure
|
||||
}
|
||||
}
|
||||
|
||||
static async getProcessedScanIds(): Promise<string[]> {
|
||||
const tenantId = await this.getTenantId();
|
||||
if (!tenantId) return [];
|
||||
|
||||
try {
|
||||
const client = await getValkeyClient();
|
||||
const dataKey = `_lighthouse:${tenantId}:processed_scan_ids`;
|
||||
|
||||
const result = await client.get(dataKey);
|
||||
if (!result) return [];
|
||||
|
||||
const scanIdsString = result.toString();
|
||||
return scanIdsString ? scanIdsString.split(",") : [];
|
||||
} catch (error) {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
static async setProcessedScanIds(scanIds: string[]): Promise<boolean> {
|
||||
const tenantId = await this.getTenantId();
|
||||
if (!tenantId) return false;
|
||||
|
||||
try {
|
||||
const client = await getValkeyClient();
|
||||
const dataKey = `_lighthouse:${tenantId}:processed_scan_ids`;
|
||||
const scanIdsString = scanIds.join(",");
|
||||
|
||||
await client.set(dataKey, scanIdsString);
|
||||
return true;
|
||||
} catch (error) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static async processScansWithLock(scanIds: string[]): Promise<{
|
||||
success: boolean;
|
||||
data?: string;
|
||||
}> {
|
||||
const tenantId = await this.getTenantId();
|
||||
if (!tenantId) return { success: false };
|
||||
|
||||
const lockKey = "scan-processing";
|
||||
const lockTtlSeconds = 1200; // 20 minutes
|
||||
|
||||
try {
|
||||
// Try to acquire processing lock
|
||||
const lockAcquired = await this.acquireProcessingLock(
|
||||
tenantId,
|
||||
lockKey,
|
||||
lockTtlSeconds,
|
||||
);
|
||||
|
||||
if (!lockAcquired) {
|
||||
// Processing is happening in background, return success but no data
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
try {
|
||||
// Generate the scan summary for the provided scan IDs
|
||||
const scanSummary = await generateSecurityScanSummary(scanIds);
|
||||
|
||||
// Only process if we have valid scan summary
|
||||
if (scanSummary) {
|
||||
// Cache the scan summary
|
||||
await this.set("scan-summary", scanSummary);
|
||||
|
||||
// Mark scans as processed
|
||||
await this.setProcessedScanIds(scanIds);
|
||||
|
||||
// Generate and cache recommendations asynchronously
|
||||
this.generateAndCacheRecommendations(scanIds).catch((error) => {
|
||||
console.error(
|
||||
"Background recommendation generation failed:",
|
||||
error,
|
||||
);
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: scanSummary,
|
||||
};
|
||||
} else {
|
||||
// Even if no summary, mark scans as processed to avoid reprocessing
|
||||
await this.setProcessedScanIds(scanIds);
|
||||
}
|
||||
|
||||
return { success: true };
|
||||
} finally {
|
||||
await this.releaseProcessingLock(tenantId, lockKey);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error processing scans with lock:", error);
|
||||
return { success: false };
|
||||
}
|
||||
}
|
||||
|
||||
// Generic cache methods for future use
|
||||
static async get(key: string): Promise<string | null> {
|
||||
const tenantId = await this.getTenantId();
|
||||
if (!tenantId) return null;
|
||||
|
||||
try {
|
||||
const client = await getValkeyClient();
|
||||
const fullKey = `_lighthouse:${tenantId}:${key}`;
|
||||
const result = await client.get(fullKey);
|
||||
return result?.toString() || null;
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
static async set(
|
||||
key: string,
|
||||
value: string,
|
||||
ttlSeconds?: number,
|
||||
): Promise<boolean> {
|
||||
const tenantId = await this.getTenantId();
|
||||
if (!tenantId) return false;
|
||||
|
||||
try {
|
||||
const client = await getValkeyClient();
|
||||
const fullKey = `_lighthouse:${tenantId}:${key}`;
|
||||
|
||||
if (ttlSeconds) {
|
||||
await client.set(fullKey, value, "EX", ttlSeconds);
|
||||
} else {
|
||||
await client.set(fullKey, value);
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch (error) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static async getRecommendations(): Promise<{
|
||||
success: boolean;
|
||||
data?: string;
|
||||
}> {
|
||||
const tenantId = await this.getTenantId();
|
||||
if (!tenantId) return { success: false };
|
||||
|
||||
try {
|
||||
const client = await getValkeyClient();
|
||||
const dataKey = `_lighthouse:${tenantId}:recommendations`;
|
||||
|
||||
const cachedData = await client.get(dataKey);
|
||||
if (cachedData) {
|
||||
return {
|
||||
success: true,
|
||||
data: cachedData.toString(),
|
||||
};
|
||||
}
|
||||
|
||||
return { success: true, data: undefined };
|
||||
} catch (error) {
|
||||
return { success: false };
|
||||
}
|
||||
}
|
||||
|
||||
static async generateAndCacheRecommendations(scanIds: string[]): Promise<{
|
||||
success: boolean;
|
||||
data?: string;
|
||||
}> {
|
||||
const tenantId = await this.getTenantId();
|
||||
if (!tenantId) return { success: false };
|
||||
|
||||
const lockKey = "recommendations-processing";
|
||||
const dataKey = `_lighthouse:${tenantId}:recommendations`;
|
||||
const detailedDataKey = `_lighthouse:${tenantId}:cached-messages:recommendation`;
|
||||
|
||||
try {
|
||||
const client = await getValkeyClient();
|
||||
|
||||
// Check if data already exists
|
||||
const existingData = await client.get(dataKey);
|
||||
if (existingData) {
|
||||
return {
|
||||
success: true,
|
||||
data: existingData.toString(),
|
||||
};
|
||||
}
|
||||
|
||||
// Lock TTL 10 minutes
|
||||
const lockAcquired = await this.acquireProcessingLock(
|
||||
tenantId,
|
||||
lockKey,
|
||||
600,
|
||||
);
|
||||
|
||||
if (!lockAcquired) {
|
||||
// Processing is happening in background, return success but no data
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
try {
|
||||
// Double-check after acquiring lock
|
||||
const doubleCheckData = await client.get(dataKey);
|
||||
if (doubleCheckData) {
|
||||
return {
|
||||
success: true,
|
||||
data: doubleCheckData.toString(),
|
||||
};
|
||||
}
|
||||
|
||||
// Generate detailed recommendation first
|
||||
const detailedRecommendation = await generateDetailedRecommendation({
|
||||
scanIds,
|
||||
});
|
||||
|
||||
if (!detailedRecommendation.trim()) {
|
||||
return { success: true, data: "" };
|
||||
}
|
||||
|
||||
// Generate banner from detailed content
|
||||
const bannerRecommendation = await generateBannerFromDetailed(
|
||||
detailedRecommendation,
|
||||
);
|
||||
|
||||
// Both must succeed - no point in detailed without banner
|
||||
if (!bannerRecommendation.trim()) {
|
||||
return { success: true, data: "" };
|
||||
}
|
||||
|
||||
// Generate question answers
|
||||
const questionAnswers = await generateQuestionAnswers(suggestedActions);
|
||||
|
||||
// Cache both versions
|
||||
await client.set(dataKey, bannerRecommendation);
|
||||
await client.set(detailedDataKey, detailedRecommendation);
|
||||
|
||||
// Cache question answers with 24h TTL
|
||||
for (const [questionRef, answer] of Object.entries(questionAnswers)) {
|
||||
const questionKey = `_lighthouse:${tenantId}:cached-messages:question_${questionRef}`;
|
||||
await client.set(questionKey, answer, "EX", 86400); // 24 hours
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: bannerRecommendation,
|
||||
};
|
||||
} finally {
|
||||
await this.releaseProcessingLock(tenantId, lockKey);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error generating and caching recommendations:", error);
|
||||
return { success: false };
|
||||
}
|
||||
}
|
||||
|
||||
static async isRecommendationProcessing(): Promise<boolean> {
|
||||
const tenantId = await this.getTenantId();
|
||||
if (!tenantId) return false;
|
||||
|
||||
try {
|
||||
const client = await getValkeyClient();
|
||||
const lockKey = `_lighthouse:${tenantId}:lock:recommendations-processing`;
|
||||
|
||||
const result = await client.get(lockKey);
|
||||
return result !== null;
|
||||
} catch (error) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// New method to get cached message by type
|
||||
static async getCachedMessage(messageType: string): Promise<{
|
||||
success: boolean;
|
||||
data?: string;
|
||||
}> {
|
||||
const tenantId = await this.getTenantId();
|
||||
if (!tenantId) return { success: false };
|
||||
|
||||
try {
|
||||
const client = await getValkeyClient();
|
||||
const dataKey = `_lighthouse:${tenantId}:cached-messages:${messageType}`;
|
||||
|
||||
const cachedData = await client.get(dataKey);
|
||||
if (cachedData) {
|
||||
return {
|
||||
success: true,
|
||||
data: cachedData.toString(),
|
||||
};
|
||||
}
|
||||
|
||||
return { success: true, data: undefined };
|
||||
} catch (error) {
|
||||
console.error(`Error getting cached message ${messageType}:`, error);
|
||||
return { success: false };
|
||||
}
|
||||
}
|
||||
|
||||
// New method to set cached message by type
|
||||
static async setCachedMessage(
|
||||
messageType: string,
|
||||
content: string,
|
||||
): Promise<boolean> {
|
||||
const tenantId = await this.getTenantId();
|
||||
if (!tenantId) return false;
|
||||
|
||||
try {
|
||||
const client = await getValkeyClient();
|
||||
const dataKey = `_lighthouse:${tenantId}:cached-messages:${messageType}`;
|
||||
await client.set(dataKey, content);
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error(`Error caching message type ${messageType}:`, error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export async function initializeTenantCache(): Promise<{
|
||||
success: boolean;
|
||||
data?: string;
|
||||
scanSummary?: string;
|
||||
}> {
|
||||
try {
|
||||
// Check if there are any completed scans per provider
|
||||
const currentScanIds = await getLatestCompletedScansPerProvider();
|
||||
|
||||
if (currentScanIds.length === 0) {
|
||||
// No latest completed scans found, return existing cached data if any
|
||||
const existingSummary = await CacheService.get("scan-summary");
|
||||
return {
|
||||
success: true,
|
||||
data: existingSummary || undefined,
|
||||
scanSummary: existingSummary || undefined,
|
||||
};
|
||||
}
|
||||
|
||||
// Check if we need to process these scans
|
||||
const processedScanIds = await CacheService.getProcessedScanIds();
|
||||
const shouldProcess = !compareProcessedScanIds(
|
||||
currentScanIds,
|
||||
processedScanIds,
|
||||
);
|
||||
|
||||
if (!shouldProcess) {
|
||||
// Scans already processed, return existing cached data
|
||||
const existingSummary = await CacheService.get("scan-summary");
|
||||
return {
|
||||
success: true,
|
||||
data: existingSummary || undefined,
|
||||
scanSummary: existingSummary || undefined,
|
||||
};
|
||||
}
|
||||
|
||||
// New scans found, trigger full processing with lock
|
||||
const result = await CacheService.processScansWithLock(currentScanIds);
|
||||
return {
|
||||
success: result.success,
|
||||
data: result.data,
|
||||
scanSummary: result.data,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error("Error initializing tenant cache:", error);
|
||||
return {
|
||||
success: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -149,6 +149,7 @@ You operate in an agent loop, iterating through these steps:
|
||||
- Provider ID is a UUID string.
|
||||
- Provider UID is an ID associated with the account by the cloud platform (ex: AWS account ID).
|
||||
- Provider Alias is a user-defined name for the cloud account in Prowler.
|
||||
- Agents can hallucinate. Check purpose of agent before trusting the output. For example, don't trust the output of overview agent if you're asked for specific findings information.
|
||||
|
||||
## Proactive Security Recommendations
|
||||
|
||||
@@ -415,7 +416,9 @@ const findingsAgentPrompt = `You are Prowler's Findings Agent, specializing in s
|
||||
## Tool Calling Guidelines
|
||||
|
||||
- Mentioning all keys in the function call is mandatory. Don't skip any keys.
|
||||
- Don't add empty filters in the function call.`;
|
||||
- Don't add empty filters in the function call.
|
||||
- Use only available filters and sort options. Ensure you enclose filter with \`filter[]\`.
|
||||
- When user asks for a particular check, use getProviderChecksTool tool to get the correct check ID. Only pass check IDs selected from the tool output to subsequent tools.`;
|
||||
|
||||
const overviewAgentPrompt = `You are Prowler's Overview Agent, specializing in high-level security status information across providers and findings.
|
||||
|
||||
@@ -468,11 +471,36 @@ const rolesAgentPrompt = `You are Prowler's Roles Agent, specializing in role an
|
||||
- Mentioning all keys in the function call is mandatory. Don't skip any keys.
|
||||
- Don't add empty filters in the function call.`;
|
||||
|
||||
const resourcesAgentPrompt = `You are Prowler's Resource Agent, specializing in fetching resource information within Prowler.
|
||||
|
||||
## Available Tools
|
||||
|
||||
- getResourcesTool: List available resource with filtering options
|
||||
- getResourceTool: Get detailed information about a specific resource
|
||||
|
||||
## Response Guidelines
|
||||
|
||||
- Keep the response concise
|
||||
- Only share information relevant to the query
|
||||
- Answer directly without unnecessary introductions or conclusions
|
||||
- Ensure all responses are based on tools' output and information available in the prompt
|
||||
|
||||
## Additional Guidelines
|
||||
|
||||
- Focus only on resource-related information
|
||||
- Format resource IDs, permissions, and descriptions consistently
|
||||
|
||||
## Tool Calling Guidelines
|
||||
|
||||
- Mentioning all keys in the function call is mandatory. Don't skip any keys.
|
||||
- Don't add empty filters in the function call.`;
|
||||
|
||||
export {
|
||||
complianceAgentPrompt,
|
||||
findingsAgentPrompt,
|
||||
overviewAgentPrompt,
|
||||
providerAgentPrompt,
|
||||
resourcesAgentPrompt,
|
||||
rolesAgentPrompt,
|
||||
scansAgentPrompt,
|
||||
supervisorPrompt,
|
||||
|
||||
262
ui/lib/lighthouse/recommendations.ts
Normal file
262
ui/lib/lighthouse/recommendations.ts
Normal file
@@ -0,0 +1,262 @@
|
||||
import { ChatOpenAI } from "@langchain/openai";
|
||||
|
||||
import { getAIKey, getLighthouseConfig } from "@/actions/lighthouse/lighthouse";
|
||||
import { getCurrentDataSection } from "@/lib/lighthouse/data";
|
||||
|
||||
import { type SuggestedAction } from "./suggested-actions";
|
||||
import { initLighthouseWorkflow } from "./workflow";
|
||||
|
||||
export const generateDetailedRecommendation = async ({
|
||||
scanIds,
|
||||
}: {
|
||||
scanIds: string[];
|
||||
}): Promise<string> => {
|
||||
try {
|
||||
const apiKey = await getAIKey();
|
||||
if (!apiKey) {
|
||||
return "";
|
||||
}
|
||||
|
||||
const currentDataSection = await getCurrentDataSection();
|
||||
|
||||
const lighthouseConfig = await getLighthouseConfig();
|
||||
if (!lighthouseConfig?.attributes) {
|
||||
return "";
|
||||
}
|
||||
const businessContext =
|
||||
lighthouseConfig?.data?.attributes?.business_context;
|
||||
|
||||
const workflow = await initLighthouseWorkflow();
|
||||
const response = await workflow.invoke({
|
||||
messages: [
|
||||
{
|
||||
id: "business-context",
|
||||
role: "assistant",
|
||||
content: `Business Context Information:\n${businessContext}`,
|
||||
},
|
||||
{
|
||||
id: "providers",
|
||||
role: "assistant",
|
||||
content: `${currentDataSection}`,
|
||||
},
|
||||
{
|
||||
id: "scan-ids",
|
||||
role: "user",
|
||||
content: `Scan IDs in focus: ${scanIds}`,
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: `Based on findings from mentioned scans AND business context (if available), provide detailed recommendations about issues that need to be fixed first along with remediation steps. Call all necessary tools and give actionable next steps.
|
||||
|
||||
## Core Principles
|
||||
|
||||
1. **Data-Driven Analysis Only**: Base all recommendations solely on verified findings from tool calls
|
||||
2. **No Assumptions**: If data is unavailable or insufficient, clearly state this limitation
|
||||
3. **Factual Reporting**: Report only what tools return - no speculation or gap-filling
|
||||
|
||||
## Required Process
|
||||
|
||||
You MUST follow all the following steps in order. Do NOT skip any step.
|
||||
|
||||
### Step 1: Overview Agent gives Overview
|
||||
- Fetch overview of findings across scans using overview agent to get high level view of security posture
|
||||
- Overview agent must provide the high level overview of findings based on tools getProvidersOverviewTool, getFindingsByStatusTool and getFindingsBySeverityTool
|
||||
- Strictly use overview agent only for overview and findings agent for specific findings
|
||||
- Overview agent must not provide any information apart from overview. Example, it must not provide data about checks, check IDs and individual findings.
|
||||
|
||||
### Step 2: Findings Agent gives Findings
|
||||
- Fetch newly detected findings in the previous scans (if any)
|
||||
- Fetch failed findings sorted by severity - critical, high and medium. Paginate to fetch all findings
|
||||
- Ensure you went through all failed findings
|
||||
- Group findings to find patterns (if any). For example: multiple findings for the same check ID
|
||||
|
||||
### Step 3: Resource Agent gives Resource Information (Optional)
|
||||
- If the findings data doesn't contain sufficient information about resources, use resource agent to get the resource information
|
||||
- Verify that findings data is complete and actionable
|
||||
- Confirm that severity levels and resource details are available
|
||||
|
||||
### Step 4: Output
|
||||
- Based on information from previous steps, give a detailed recommendation about issues that need to be fixed first along with remediation steps.
|
||||
- This is the final summary recommendation. Do NOT add any other information about agents or tools.
|
||||
|
||||
## Report Structure (Conditional)
|
||||
|
||||
Generate a report ONLY if you have verified findings:
|
||||
|
||||
### Format Requirements
|
||||
- Use markdown formatting
|
||||
- No bullet points except for Resource Details sections
|
||||
- No emojis or decorative elements
|
||||
- Keep sentences concise - use 1-2 sentences maximum per concept
|
||||
- Strip any unnecessary descriptive language that doesn't add value
|
||||
|
||||
### Required Sections
|
||||
|
||||
- Opening Statement: Single sentence stating you analyzed the environment and found X vulnerabilities
|
||||
- Two sentences maximum giving executive summary of the findings and impact
|
||||
- First Focus: (H2 heading) - Name the specific vulnerability type, not severity levels
|
||||
- Second Focus: (H2 heading) - Name the specific vulnerability type, not severity levels
|
||||
- Immediate Actions Required: (H2 heading) - Implementation guidance
|
||||
|
||||
### Content Structure for Each Vulnerability
|
||||
|
||||
- Start with 1-2 sentences explaining what's wrong and why it matters
|
||||
- Include "Resource Details:" section with exactly these bullet points:
|
||||
- Resource name/identifier
|
||||
- Service
|
||||
- Account
|
||||
- Severity level
|
||||
- Impact description
|
||||
- Always prefer using the accurate resource information from tool output instead of adding placeholder
|
||||
- Mention the account alias (if available) along with account ID in account section. If there's no account alias, only mention the account ID.
|
||||
- Include "Remediation:" section with the exact CLI command in a code block along with other ways to remediate (example: terraform)
|
||||
- Use technical language, avoid storytelling or dramatic descriptions
|
||||
|
||||
## Failure Conditions
|
||||
|
||||
If any of these conditions occur, DO NOT generate a standard report:
|
||||
|
||||
- Overview agent returns no data or errors
|
||||
- Findings agent returns empty results
|
||||
- Tool calls fail or timeout
|
||||
- Data is incomplete or unclear
|
||||
|
||||
Instead, provide a brief status explaining:
|
||||
- What data collection was attempted
|
||||
- What information is missing or unavailable
|
||||
- What steps are needed to obtain the required data
|
||||
|
||||
## Style Guidelines
|
||||
|
||||
- Direct, technical, professional
|
||||
- No detective stories, narratives, or analogies
|
||||
- Focus on facts and actionable information
|
||||
- Assume technical audience familiar with AWS
|
||||
- Keep it clean and scannable
|
||||
|
||||
## Output Length
|
||||
|
||||
- Approximately 400-500 words total
|
||||
- Each vulnerability section should be roughly equal length
|
||||
- Adjust length based on actual findings complexity`,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
const lastMessage =
|
||||
response.messages[response.messages.length - 1]?.content?.toString?.();
|
||||
return lastMessage;
|
||||
} catch (error) {
|
||||
console.error("Error generating detailed recommendation:", error);
|
||||
return "";
|
||||
}
|
||||
};
|
||||
|
||||
export const generateBannerFromDetailed = async (
|
||||
detailedRecommendation: string,
|
||||
): Promise<string> => {
|
||||
try {
|
||||
const apiKey = await getAIKey();
|
||||
if (!apiKey) {
|
||||
return "";
|
||||
}
|
||||
|
||||
const lighthouseConfig = await getLighthouseConfig();
|
||||
if (!lighthouseConfig?.attributes) {
|
||||
return "";
|
||||
}
|
||||
|
||||
const config = lighthouseConfig.attributes;
|
||||
|
||||
const llm = new ChatOpenAI({
|
||||
model: config.model || "gpt-4o",
|
||||
temperature: config.temperature || 0,
|
||||
maxTokens: 100,
|
||||
apiKey: apiKey,
|
||||
});
|
||||
|
||||
const systemPrompt = `Create a short, engaging banner message from this detailed security analysis.
|
||||
|
||||
REQUIREMENTS:
|
||||
- Maximum 80 characters
|
||||
- Include "Lighthouse" in the message
|
||||
- Focus on the key insight or opportunity
|
||||
- Make it clickable and business-focused
|
||||
- Use action words like "detected", "found", "identified"
|
||||
- Don't end with punctuation
|
||||
|
||||
EXAMPLES:
|
||||
- Lighthouse found fixing 1 S3 check resolves 15 findings
|
||||
- Lighthouse detected critical RDS encryption gaps
|
||||
- Lighthouse identified 3 exposed databases needing attention
|
||||
|
||||
Based on this detailed analysis, create one engaging banner message:
|
||||
|
||||
${detailedRecommendation}`;
|
||||
|
||||
const response = await llm.invoke([
|
||||
{
|
||||
role: "system",
|
||||
content: systemPrompt,
|
||||
},
|
||||
]);
|
||||
|
||||
return response.content.toString().trim();
|
||||
} catch (error) {
|
||||
console.error(
|
||||
"Error generating banner from detailed recommendation:",
|
||||
error,
|
||||
);
|
||||
return "";
|
||||
}
|
||||
};
|
||||
|
||||
export const generateQuestionAnswers = async (
|
||||
questions: SuggestedAction[],
|
||||
): Promise<Record<string, string>> => {
|
||||
const answers: Record<string, string> = {};
|
||||
|
||||
try {
|
||||
const apiKey = await getAIKey();
|
||||
if (!apiKey) {
|
||||
return answers;
|
||||
}
|
||||
|
||||
// Initialize the workflow system
|
||||
const workflow = await initLighthouseWorkflow();
|
||||
|
||||
for (const question of questions) {
|
||||
if (!question.questionRef) continue;
|
||||
|
||||
try {
|
||||
// Use the existing workflow to answer the question
|
||||
const result = await workflow.invoke({
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: question.action,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
// Extract the final message content
|
||||
const finalMessage = result.messages[result.messages.length - 1];
|
||||
if (finalMessage?.content) {
|
||||
answers[question.questionRef] = finalMessage.content
|
||||
.toString()
|
||||
.trim();
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Error generating answer for question ${question.questionRef}:`,
|
||||
error,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error generating question answers:", error);
|
||||
}
|
||||
|
||||
return answers;
|
||||
};
|
||||
33
ui/lib/lighthouse/suggested-actions.ts
Normal file
33
ui/lib/lighthouse/suggested-actions.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
export interface SuggestedAction {
|
||||
title: string;
|
||||
label: string;
|
||||
action: string;
|
||||
questionRef?: string;
|
||||
}
|
||||
|
||||
export const suggestedActions: SuggestedAction[] = [
|
||||
{
|
||||
title: "Are there any exposed S3",
|
||||
label: "buckets in my AWS accounts?",
|
||||
action: "List exposed S3 buckets in my AWS accounts",
|
||||
questionRef: "1",
|
||||
},
|
||||
{
|
||||
title: "What is the risk of having",
|
||||
label: "RDS databases unencrypted?",
|
||||
action: "What is the risk of having RDS databases unencrypted?",
|
||||
questionRef: "2",
|
||||
},
|
||||
{
|
||||
title: "What is the CIS 1.10 compliance status",
|
||||
label: "of my Kubernetes cluster?",
|
||||
action: "What is the CIS 1.10 compliance status of my Kubernetes cluster?",
|
||||
questionRef: "3",
|
||||
},
|
||||
{
|
||||
title: "List my highest privileged",
|
||||
label: "AWS IAM users with full admin access?",
|
||||
action: "List my highest privileged AWS IAM users with full admin access",
|
||||
questionRef: "4",
|
||||
},
|
||||
];
|
||||
335
ui/lib/lighthouse/summary.ts
Normal file
335
ui/lib/lighthouse/summary.ts
Normal file
@@ -0,0 +1,335 @@
|
||||
import { getLighthouseCheckDetails } from "@/actions/lighthouse/checks";
|
||||
import { getLighthouseFindings } from "@/actions/lighthouse/findings";
|
||||
import { getProviders } from "@/actions/providers/providers";
|
||||
import { getScans } from "@/actions/scans/scans";
|
||||
import { CheckDetails, FindingSummary } from "@/types/lighthouse/summary";
|
||||
|
||||
import { getNewFailedFindingsSummary } from "./tools/findings";
|
||||
|
||||
export const getLatestCompletedScansPerProvider = async (): Promise<
|
||||
string[]
|
||||
> => {
|
||||
try {
|
||||
const providersResponse = await getProviders({
|
||||
pageSize: 100,
|
||||
});
|
||||
|
||||
if (!providersResponse?.data || providersResponse.data.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const providersWithScans = await Promise.all(
|
||||
providersResponse.data.map(async (provider: any) => {
|
||||
try {
|
||||
const scansData = await getScans({
|
||||
page: 1,
|
||||
sort: "-inserted_at",
|
||||
filters: {
|
||||
"filter[provider]": provider.id,
|
||||
"filter[state]": "completed",
|
||||
},
|
||||
});
|
||||
|
||||
// If scans exist, return the latest scan ID
|
||||
if (scansData && scansData.data && scansData.data.length > 0) {
|
||||
const latestScan = scansData.data[0];
|
||||
return latestScan.id;
|
||||
}
|
||||
|
||||
return null;
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Error fetching scans for provider ${provider.id}:`,
|
||||
error,
|
||||
);
|
||||
return null;
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
// Filter out null results and return scan IDs
|
||||
return providersWithScans.filter(
|
||||
(scanId): scanId is string => scanId !== null,
|
||||
);
|
||||
} catch (error) {
|
||||
console.error("Error fetching latest completed scans per provider:", error);
|
||||
return [];
|
||||
}
|
||||
};
|
||||
|
||||
export const compareProcessedScanIds = (
|
||||
currentScanIds: string[],
|
||||
processedScanIds: string[],
|
||||
): boolean => {
|
||||
const sortedCurrent = [...currentScanIds].sort();
|
||||
const sortedProcessed = [...processedScanIds].sort();
|
||||
|
||||
// Compare lengths first
|
||||
if (sortedCurrent.length !== sortedProcessed.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Compare each element
|
||||
for (let i = 0; i < sortedCurrent.length; i++) {
|
||||
if (sortedCurrent[i] !== sortedProcessed[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
const getTopFailedFindingsSummary = async (
|
||||
scanIds: string[],
|
||||
limit: number = 10,
|
||||
): Promise<FindingSummary[]> => {
|
||||
const response = await getLighthouseFindings({
|
||||
page: 1,
|
||||
pageSize: limit,
|
||||
sort: "severity",
|
||||
filters: {
|
||||
"fields[findings]": "check_id,severity",
|
||||
"filter[scan__in]": scanIds.join(","),
|
||||
"filter[status]": "FAIL",
|
||||
"filter[muted]": "false",
|
||||
},
|
||||
});
|
||||
|
||||
if (!response?.data) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return response.data.map((finding: any) => ({
|
||||
checkId: finding.attributes.check_id,
|
||||
severity: finding.attributes.severity,
|
||||
count: 1,
|
||||
findingIds: [finding.id],
|
||||
}));
|
||||
};
|
||||
|
||||
// Helper function to collect new failed findings across multiple scans
|
||||
const collectNewFailedFindings = async (
|
||||
scanIds: string[],
|
||||
): Promise<Record<string, FindingSummary[]>> => {
|
||||
const findingsByScan: Record<string, FindingSummary[]> = {};
|
||||
|
||||
for (const scanId of scanIds) {
|
||||
try {
|
||||
const newFailedFindingsSummary =
|
||||
await getNewFailedFindingsSummary(scanId);
|
||||
|
||||
if (Object.keys(newFailedFindingsSummary).length > 0) {
|
||||
const scanFindings: FindingSummary[] = [];
|
||||
|
||||
// Convert to FindingSummary format
|
||||
Object.entries(newFailedFindingsSummary).forEach(
|
||||
([severity, checks]) => {
|
||||
Object.entries(checks).forEach(([checkId, summary]) => {
|
||||
scanFindings.push({
|
||||
checkId,
|
||||
severity,
|
||||
count: summary.count,
|
||||
findingIds: summary.finding_ids,
|
||||
});
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
if (scanFindings.length > 0) {
|
||||
findingsByScan[scanId] = scanFindings;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Error fetching new failed findings for scan ${scanId}:`,
|
||||
error,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return findingsByScan;
|
||||
};
|
||||
|
||||
// Helper function to enrich findings with check details
|
||||
const enrichFindingsWithCheckDetails = async (
|
||||
findings: FindingSummary[],
|
||||
): Promise<Map<string, CheckDetails>> => {
|
||||
const uniqueCheckIds = Array.from(new Set(findings.map((f) => f.checkId)));
|
||||
const checkDetailsMap = new Map<string, CheckDetails>();
|
||||
|
||||
for (const checkId of uniqueCheckIds) {
|
||||
try {
|
||||
const checkDetails = await getLighthouseCheckDetails({ checkId });
|
||||
if (checkDetails) {
|
||||
checkDetailsMap.set(checkId, checkDetails);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error fetching check details for ${checkId}:`, error);
|
||||
// Add a fallback check details object
|
||||
checkDetailsMap.set(checkId, {
|
||||
id: checkId,
|
||||
title: checkId,
|
||||
description: "",
|
||||
risk: "",
|
||||
remediation: {},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return checkDetailsMap;
|
||||
};
|
||||
|
||||
// Helper function to sort findings by severity
|
||||
const sortFindingsBySeverity = (
|
||||
findings: FindingSummary[],
|
||||
): FindingSummary[] => {
|
||||
const severityOrder = {
|
||||
critical: 0,
|
||||
high: 1,
|
||||
medium: 2,
|
||||
low: 3,
|
||||
informational: 4,
|
||||
};
|
||||
|
||||
return findings.sort(
|
||||
(a, b) =>
|
||||
severityOrder[a.severity as keyof typeof severityOrder] -
|
||||
severityOrder[b.severity as keyof typeof severityOrder],
|
||||
);
|
||||
};
|
||||
|
||||
// Helper function to build details for a single finding
|
||||
const buildSingleFindingDetails = (
|
||||
finding: FindingSummary,
|
||||
checkDetailsMap: Map<string, CheckDetails>,
|
||||
): string => {
|
||||
const checkDetails = checkDetailsMap.get(finding.checkId);
|
||||
let detailsText = "";
|
||||
|
||||
detailsText += `**Title:** ${checkDetails?.title || finding.checkId}\n`;
|
||||
detailsText += `**Severity:** ${finding.severity.toUpperCase()}\n`;
|
||||
detailsText += `**Check Summary:** ${checkDetails?.description || "Description not available"}\n`;
|
||||
detailsText += `**Number of failed findings:** ${finding.count}\n`;
|
||||
detailsText += `**Finding IDs:** ${finding.findingIds.join(", ")}\n`;
|
||||
detailsText += "**Remediation:**\n";
|
||||
|
||||
const remediation = checkDetails?.remediation;
|
||||
if (remediation?.terraform) {
|
||||
detailsText += `- Terraform: ${remediation.terraform.description}\n`;
|
||||
if (remediation.terraform.reference) {
|
||||
detailsText += ` Reference: ${remediation.terraform.reference}\n`;
|
||||
}
|
||||
}
|
||||
if (remediation?.cli) {
|
||||
detailsText += `- AWS CLI: ${remediation.cli.description}\n`;
|
||||
if (remediation.cli.reference) {
|
||||
detailsText += ` Reference: ${remediation.cli.reference}\n`;
|
||||
}
|
||||
}
|
||||
if (remediation?.nativeiac) {
|
||||
detailsText += `- Native IAC: ${remediation.nativeiac.description}\n`;
|
||||
if (remediation.nativeiac.reference) {
|
||||
detailsText += ` Reference: ${remediation.nativeiac.reference}\n`;
|
||||
}
|
||||
}
|
||||
if (remediation?.other) {
|
||||
detailsText += `- Other: ${remediation.other.description}\n`;
|
||||
if (remediation.other.reference) {
|
||||
detailsText += ` Reference: ${remediation.other.reference}\n`;
|
||||
}
|
||||
}
|
||||
if (remediation?.wui) {
|
||||
detailsText += `- WUI: ${remediation.wui.description}\n`;
|
||||
if (remediation.wui.reference) {
|
||||
detailsText += ` Reference: ${remediation.wui.reference}\n`;
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
!remediation?.terraform &&
|
||||
!remediation?.cli &&
|
||||
!remediation?.nativeiac &&
|
||||
!remediation?.other &&
|
||||
!remediation?.wui
|
||||
) {
|
||||
detailsText += "- No specific remediation commands available\n";
|
||||
}
|
||||
|
||||
detailsText += "\n";
|
||||
return detailsText;
|
||||
};
|
||||
|
||||
// Generates a summary of failed findings for the provided scan IDs
|
||||
// Returns an empty string if no failed findings in any scan or unexpected error
|
||||
// Else it returns a string with the summary of the failed findings
|
||||
export const generateSecurityScanSummary = async (
|
||||
scanIds: string[],
|
||||
): Promise<string> => {
|
||||
try {
|
||||
// Collect new failed findings by scan
|
||||
const newFindingsByScan = await collectNewFailedFindings(scanIds);
|
||||
|
||||
// Get top failed findings across all scans
|
||||
let topFailedFindings: FindingSummary[] = [];
|
||||
try {
|
||||
topFailedFindings = await getTopFailedFindingsSummary(scanIds, 10);
|
||||
} catch (error) {
|
||||
console.error("Error fetching top failed findings:", error);
|
||||
}
|
||||
|
||||
// Combine all findings for check details enrichment
|
||||
const newFindings = Object.values(newFindingsByScan).flat();
|
||||
const allFindings = [...newFindings, ...topFailedFindings];
|
||||
|
||||
// If no findings at all, return empty string
|
||||
if (allFindings.length === 0) {
|
||||
return "";
|
||||
}
|
||||
|
||||
// Enrich all findings with check details
|
||||
const checkDetailsMap = await enrichFindingsWithCheckDetails(allFindings);
|
||||
|
||||
// Build the summary
|
||||
let summaryText = "";
|
||||
|
||||
// Header
|
||||
if (scanIds.length === 1) {
|
||||
summaryText += `# Scan ID: ${scanIds[0]}\n\n`;
|
||||
} else {
|
||||
summaryText += `# Completed scans (${scanIds.length} across providers)\n`;
|
||||
summaryText += `**Scan IDs:** ${scanIds.join(", ")}\n\n`;
|
||||
}
|
||||
|
||||
// New findings section (if any)
|
||||
if (newFindings.length > 0) {
|
||||
summaryText += "## New Failed Findings by Scan\n";
|
||||
summaryText += `${newFindings.length} new findings detected.\n\n`;
|
||||
|
||||
Object.entries(newFindingsByScan).forEach(([scanId, scanFindings]) => {
|
||||
summaryText += `### Scan ID: ${scanId}\n`;
|
||||
const sortedScanFindings = sortFindingsBySeverity(scanFindings);
|
||||
|
||||
for (const finding of sortedScanFindings) {
|
||||
summaryText += buildSingleFindingDetails(finding, checkDetailsMap);
|
||||
}
|
||||
summaryText += "\n";
|
||||
});
|
||||
}
|
||||
|
||||
// Top findings section
|
||||
if (topFailedFindings.length > 0) {
|
||||
summaryText += "## Top Failed Findings Across All Scans\n";
|
||||
summaryText += `Showing top ${topFailedFindings.length} critical findings.\n\n`;
|
||||
|
||||
const sortedTopFindings = sortFindingsBySeverity(topFailedFindings);
|
||||
for (const finding of sortedTopFindings) {
|
||||
summaryText += buildSingleFindingDetails(finding, checkDetailsMap);
|
||||
}
|
||||
}
|
||||
|
||||
return summaryText;
|
||||
} catch (error) {
|
||||
console.error("Error generating security scan summary:", error);
|
||||
return "";
|
||||
}
|
||||
};
|
||||
@@ -1,11 +1,23 @@
|
||||
import { tool } from "@langchain/core/tools";
|
||||
import { z } from "zod";
|
||||
|
||||
import { getFindings, getMetadataInfo } from "@/actions/findings";
|
||||
import { getMetadataInfo } from "@/actions/findings";
|
||||
import {
|
||||
getLighthouseFindings,
|
||||
getLighthouseLatestFindings,
|
||||
} from "@/actions/lighthouse/findings";
|
||||
import { getFindingsSchema, getMetadataInfoSchema } from "@/types/lighthouse";
|
||||
|
||||
export const getFindingsTool = tool(
|
||||
async ({ page, pageSize, query, sort, filters }) => {
|
||||
return await getFindings({ page, pageSize, query, sort, filters });
|
||||
async ({ page, pageSize, query, sort, filters, fields }) => {
|
||||
return await getLighthouseFindings({
|
||||
page,
|
||||
pageSize,
|
||||
query,
|
||||
sort,
|
||||
filters,
|
||||
fields,
|
||||
});
|
||||
},
|
||||
{
|
||||
name: "getFindings",
|
||||
@@ -15,6 +27,104 @@ export const getFindingsTool = tool(
|
||||
},
|
||||
);
|
||||
|
||||
export const getLatestFindingsTool = tool(
|
||||
async ({ page, pageSize, query, sort, filters }) => {
|
||||
return await getLighthouseLatestFindings({
|
||||
page,
|
||||
pageSize,
|
||||
query,
|
||||
sort,
|
||||
filters,
|
||||
});
|
||||
},
|
||||
{
|
||||
name: "getLatestFindings",
|
||||
description:
|
||||
"Retrieves a list of the latest findings from the latest scans of all providers with options for filtering by various criteria.",
|
||||
// getLatestFindings uses the same schema as getFindings
|
||||
schema: getFindingsSchema,
|
||||
},
|
||||
);
|
||||
|
||||
// Function to get a summary of new and changed failed findings that appeared in a particular scan
|
||||
export const getNewFailedFindingsSummary = async (scanId: string) => {
|
||||
let allFindings: any[] = [];
|
||||
let currentPage = 1;
|
||||
let totalPages = 1;
|
||||
const pageSize = 100;
|
||||
|
||||
do {
|
||||
const response = await getLighthouseFindings({
|
||||
page: currentPage,
|
||||
pageSize: pageSize,
|
||||
sort: "severity",
|
||||
filters: {
|
||||
"fields[findings]": "check_id,severity",
|
||||
"filter[scan]": scanId,
|
||||
"filter[status]": "FAIL",
|
||||
"filter[muted]": "false",
|
||||
"filter[delta__in]": "new,changed",
|
||||
},
|
||||
});
|
||||
|
||||
if (response?.data) {
|
||||
allFindings = allFindings.concat(response.data);
|
||||
}
|
||||
|
||||
if (currentPage === 1 && response?.meta?.pagination) {
|
||||
totalPages = response.meta.pagination.pages;
|
||||
}
|
||||
|
||||
currentPage++;
|
||||
} while (currentPage <= totalPages);
|
||||
|
||||
const summary: Record<
|
||||
string,
|
||||
Record<string, { count: number; finding_ids: string[] }>
|
||||
> = {};
|
||||
|
||||
allFindings.forEach((finding) => {
|
||||
const severity = finding.attributes.severity;
|
||||
const checkId = finding.attributes.check_id;
|
||||
const findingId = finding.id;
|
||||
|
||||
// Initialize severity group if it doesn't exist
|
||||
if (!summary[severity]) {
|
||||
summary[severity] = {};
|
||||
}
|
||||
|
||||
// Initialize check_id group if it doesn't exist
|
||||
if (!summary[severity][checkId]) {
|
||||
summary[severity][checkId] = {
|
||||
count: 0,
|
||||
finding_ids: [],
|
||||
};
|
||||
}
|
||||
|
||||
// Add finding to the appropriate group
|
||||
summary[severity][checkId].count++;
|
||||
summary[severity][checkId].finding_ids.push(findingId);
|
||||
});
|
||||
|
||||
return summary;
|
||||
};
|
||||
|
||||
export const getNewFailedFindingsSummaryTool = tool(
|
||||
async ({ scanId }) => {
|
||||
return await getNewFailedFindingsSummary(scanId);
|
||||
},
|
||||
{
|
||||
name: "getNewFailedFindingsSummary",
|
||||
description:
|
||||
"Fetches summary of new and changed failed findings that appeared in a particular scan. Summary includes count of findings by severity, check_id and finding_ids.",
|
||||
schema: z.object({
|
||||
scanId: z
|
||||
.string()
|
||||
.describe("The UUID of the scan to fetch failed findings summary for."),
|
||||
}),
|
||||
},
|
||||
);
|
||||
|
||||
export const getMetadataInfoTool = tool(
|
||||
async ({ query, sort, filters }) => {
|
||||
return await getMetadataInfo({ query, sort, filters });
|
||||
|
||||
@@ -42,7 +42,7 @@ export const getFindingsBySeverityTool = tool(
|
||||
{
|
||||
name: "getFindingsBySeverity",
|
||||
description:
|
||||
"Retrieves an aggregated summary of findings grouped by severity levels, such as low, medium, high, and critical. The response includes the total count of findings for each severity, considering only the latest scans for each provider. Additional filters can be applied to narrow down results by region, provider type, or other attributes.",
|
||||
"Retrieves an aggregated summary of all findings (including passed and failed) grouped by severity levels, such as low, medium, high, and critical. The response includes the total count of findings for each severity, considering only the latest scans for each provider. Additional filters can be applied to narrow down results by region, provider type, or other attributes. Note: This endpoint doesn't allow filtering or ordering by status, it only provides a summary of all detected findings including passed, failed and muted.",
|
||||
schema: getFindingsBySeveritySchema,
|
||||
},
|
||||
);
|
||||
|
||||
@@ -24,7 +24,9 @@ import {
|
||||
} from "@/lib/lighthouse/tools/compliances";
|
||||
import {
|
||||
getFindingsTool,
|
||||
getLatestFindingsTool,
|
||||
getMetadataInfoTool,
|
||||
getNewFailedFindingsSummaryTool,
|
||||
} from "@/lib/lighthouse/tools/findings";
|
||||
import {
|
||||
getFindingsBySeverityTool,
|
||||
@@ -35,6 +37,10 @@ import {
|
||||
getProvidersTool,
|
||||
getProviderTool,
|
||||
} from "@/lib/lighthouse/tools/providers";
|
||||
import {
|
||||
getResourcesTool,
|
||||
getResourceTool,
|
||||
} from "@/lib/lighthouse/tools/resources";
|
||||
import { getRolesTool, getRoleTool } from "@/lib/lighthouse/tools/roles";
|
||||
import { getScansTool, getScanTool } from "@/lib/lighthouse/tools/scans";
|
||||
import {
|
||||
@@ -101,7 +107,9 @@ export async function initLighthouseWorkflow() {
|
||||
llm: llm,
|
||||
tools: [
|
||||
getFindingsTool,
|
||||
getLatestFindingsTool,
|
||||
getMetadataInfoTool,
|
||||
getNewFailedFindingsSummaryTool,
|
||||
getProviderChecksTool,
|
||||
getProviderCheckDetailsTool,
|
||||
],
|
||||
@@ -127,6 +135,13 @@ export async function initLighthouseWorkflow() {
|
||||
prompt: rolesAgentPrompt,
|
||||
});
|
||||
|
||||
const resourcesAgent = createReactAgent({
|
||||
llm: llm,
|
||||
tools: [getResourceTool, getResourcesTool],
|
||||
name: "resources_agent",
|
||||
prompt: rolesAgentPrompt,
|
||||
});
|
||||
|
||||
const agents = [
|
||||
userInfoAgent,
|
||||
providerAgent,
|
||||
@@ -135,6 +150,7 @@ export async function initLighthouseWorkflow() {
|
||||
complianceAgent,
|
||||
findingsAgent,
|
||||
rolesAgent,
|
||||
resourcesAgent,
|
||||
];
|
||||
|
||||
// Create supervisor workflow
|
||||
|
||||
84
ui/package-lock.json
generated
84
ui/package-lock.json
generated
@@ -39,6 +39,7 @@
|
||||
"framer-motion": "^11.16.0",
|
||||
"immer": "^10.1.1",
|
||||
"intl-messageformat": "^10.5.0",
|
||||
"iovalkey": "^0.3.3",
|
||||
"jose": "^5.9.3",
|
||||
"js-yaml": "^4.1.0",
|
||||
"jwt-decode": "^4.0.0",
|
||||
@@ -971,6 +972,12 @@
|
||||
"@swc/helpers": "^0.5.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@iovalkey/commands": {
|
||||
"version": "0.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@iovalkey/commands/-/commands-0.1.0.tgz",
|
||||
"integrity": "sha512-/B9W4qKSSITDii5nkBCHyPkIkAi+ealUtr1oqBJsLxjSRLka4pxun2VvMNSmcwgAMxgXtQfl0qRv7TE+udPJzg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@isaacs/cliui": {
|
||||
"version": "8.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
|
||||
@@ -8191,6 +8198,15 @@
|
||||
"node": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/cluster-key-slot": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz",
|
||||
"integrity": "sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==",
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/cmdk": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/cmdk/-/cmdk-1.1.1.tgz",
|
||||
@@ -8706,6 +8722,15 @@
|
||||
"node": ">=0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/denque": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/denque/-/denque-2.1.0.tgz",
|
||||
"integrity": "sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==",
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">=0.10"
|
||||
}
|
||||
},
|
||||
"node_modules/dequal": {
|
||||
"version": "2.0.3",
|
||||
"resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
|
||||
@@ -10767,6 +10792,26 @@
|
||||
"tslib": "^2.8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/iovalkey": {
|
||||
"version": "0.3.3",
|
||||
"resolved": "https://registry.npmjs.org/iovalkey/-/iovalkey-0.3.3.tgz",
|
||||
"integrity": "sha512-4rTJX6Q5wTYEvxboXi8DsEiUo+OvqJGtLYOSGm37KpdRXsG5XJjbVtYKGJpPSWP+QT7rWscA4vsrdmzbEbenpw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@iovalkey/commands": "^0.1.0",
|
||||
"cluster-key-slot": "^1.1.0",
|
||||
"debug": "^4.3.4",
|
||||
"denque": "^2.1.0",
|
||||
"lodash.defaults": "^4.2.0",
|
||||
"lodash.isarguments": "^3.1.0",
|
||||
"redis-errors": "^1.2.0",
|
||||
"redis-parser": "^3.0.0",
|
||||
"standard-as-callback": "^2.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.12.0"
|
||||
}
|
||||
},
|
||||
"node_modules/is-alphabetical": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz",
|
||||
@@ -11802,6 +11847,12 @@
|
||||
"integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash.defaults": {
|
||||
"version": "4.2.0",
|
||||
"resolved": "https://registry.npmjs.org/lodash.defaults/-/lodash.defaults-4.2.0.tgz",
|
||||
"integrity": "sha512-qjxPLHd3r5DnsdGacqOMU6pb/avJzdh9tFX2ymgoZE27BmjXrNy/y4LoaiTeAb+O3gL8AfpJGtqfX/ae2leYYQ==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash.foreach": {
|
||||
"version": "4.5.0",
|
||||
"resolved": "https://registry.npmjs.org/lodash.foreach/-/lodash.foreach-4.5.0.tgz",
|
||||
@@ -11815,6 +11866,12 @@
|
||||
"deprecated": "This package is deprecated. Use the optional chaining (?.) operator instead.",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash.isarguments": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz",
|
||||
"integrity": "sha512-chi4NHZlZqZD18a0imDHnZPrDeBbTtVN7GXMwuGdRH9qotxAjYs3aVLKc7zNOG9eddR5Ksd8rvFEBc9SsggPpg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash.isplainobject": {
|
||||
"version": "4.0.6",
|
||||
"resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
|
||||
@@ -14461,6 +14518,27 @@
|
||||
"integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/redis-errors": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/redis-errors/-/redis-errors-1.2.0.tgz",
|
||||
"integrity": "sha512-1qny3OExCf0UvUV/5wpYKf2YwPcOqXzkwKKSmKHiE6ZMQs5heeE/c8eXK+PNllPvmjgAbfnsbpkGZWy8cBpn9w==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/redis-parser": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-3.0.0.tgz",
|
||||
"integrity": "sha512-DJnGAeenTdpMEH6uAJRK/uiyEIH9WVsUmoLwzudwGJUwZPp80PDBWPHXSAGNPwNvIXAbe7MSUB1zQFugFml66A==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"redis-errors": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/reflect.getprototypeof": {
|
||||
"version": "1.0.10",
|
||||
"resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz",
|
||||
@@ -15304,6 +15382,12 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/standard-as-callback": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/standard-as-callback/-/standard-as-callback-2.1.0.tgz",
|
||||
"integrity": "sha512-qoRRSyROncaz1z0mvYqIE4lCd9p2R90i6GxW3uZv5ucSu8tU7B5HXUP1gG8pVZsYNVaXjk8ClXHPttLyxAL48A==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/stdin-discarder": {
|
||||
"version": "0.1.0",
|
||||
"resolved": "https://registry.npmjs.org/stdin-discarder/-/stdin-discarder-0.1.0.tgz",
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
"framer-motion": "^11.16.0",
|
||||
"immer": "^10.1.1",
|
||||
"intl-messageformat": "^10.5.0",
|
||||
"iovalkey": "^0.3.3",
|
||||
"jose": "^5.9.3",
|
||||
"js-yaml": "^4.1.0",
|
||||
"jwt-decode": "^4.0.0",
|
||||
|
||||
@@ -71,21 +71,12 @@ export const getFindingsSchema = z.object({
|
||||
.optional()
|
||||
.describe("Comma-separated list of UUID values"),
|
||||
|
||||
// Impact and Severity filters
|
||||
// Impact filters
|
||||
"filter[impact]": impactEnum.optional(),
|
||||
"filter[impact__in]": z
|
||||
.string()
|
||||
.optional()
|
||||
.describe("Comma-separated list of impact values"),
|
||||
"filter[severity]": z
|
||||
.enum(["critical", "high", "medium", "low", "informational"])
|
||||
.optional(),
|
||||
"filter[severity__in]": z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Comma-separated list of severity values. Do not use it with severity filter.",
|
||||
),
|
||||
|
||||
// Date filters
|
||||
"filter[inserted_at]": z
|
||||
@@ -105,6 +96,9 @@ export const getFindingsSchema = z.object({
|
||||
.optional()
|
||||
.describe("Date in format YYYY-MM-DD"),
|
||||
|
||||
// Muted filter
|
||||
"filter[muted]": z.boolean().optional(),
|
||||
|
||||
// Provider filters
|
||||
"filter[provider]": z.string().optional().describe("Provider UUID"),
|
||||
"filter[provider__in]": z
|
||||
@@ -176,6 +170,17 @@ export const getFindingsSchema = z.object({
|
||||
.optional()
|
||||
.describe("Comma-separated list of service values"),
|
||||
|
||||
// Severity filters
|
||||
"filter[severity]": z
|
||||
.enum(["critical", "high", "medium", "low", "informational"])
|
||||
.optional(),
|
||||
"filter[severity__in]": z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Comma-separated list of severity values. Do not use it with severity filter.",
|
||||
),
|
||||
|
||||
// Status filters
|
||||
"filter[status]": statusEnum.optional(),
|
||||
"filter[status__in]": z
|
||||
@@ -208,6 +213,31 @@ export const getFindingsSchema = z.object({
|
||||
.describe(
|
||||
"The filters to apply. Default is {}. Only add necessary filters and ignore others. Generate the filters object **only** with non-empty values included.",
|
||||
),
|
||||
fields: z
|
||||
.array(
|
||||
z.enum([
|
||||
"uid",
|
||||
"delta",
|
||||
"status",
|
||||
"status_extended",
|
||||
"severity",
|
||||
"check_id",
|
||||
"check_metadata",
|
||||
"raw_result",
|
||||
"inserted_at",
|
||||
"updated_at",
|
||||
"first_seen_at",
|
||||
"muted",
|
||||
"muted_reason",
|
||||
"url",
|
||||
"scan",
|
||||
"resources",
|
||||
]),
|
||||
)
|
||||
.optional()
|
||||
.describe(
|
||||
"List of fields to include in the response. Use only available fields.",
|
||||
),
|
||||
});
|
||||
|
||||
// Get Metadata Info Schema
|
||||
|
||||
35
ui/types/lighthouse/summary.ts
Normal file
35
ui/types/lighthouse/summary.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
export interface CheckDetails {
|
||||
id: string;
|
||||
title: string;
|
||||
description: string;
|
||||
risk: string;
|
||||
remediation: {
|
||||
cli?: {
|
||||
description: string;
|
||||
reference: string;
|
||||
};
|
||||
terraform?: {
|
||||
description: string;
|
||||
reference: string;
|
||||
};
|
||||
nativeiac?: {
|
||||
description: string;
|
||||
reference: string;
|
||||
};
|
||||
other?: {
|
||||
description: string;
|
||||
reference: string;
|
||||
};
|
||||
wui?: {
|
||||
description: string;
|
||||
reference: string;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
export interface FindingSummary {
|
||||
checkId: string;
|
||||
severity: string;
|
||||
count: number;
|
||||
findingIds: string[];
|
||||
}
|
||||
Reference in New Issue
Block a user