"use client"; import { useChat } from "@ai-sdk/react"; import { DefaultChatTransport } from "ai"; import { Plus } from "lucide-react"; import { useEffect, useRef, useState } from "react"; import { getLighthouseModelIds } from "@/actions/lighthouse/lighthouse"; import { Conversation, ConversationContent, ConversationScrollButton, } from "@/components/ai-elements/conversation"; import { PromptInput, PromptInputBody, PromptInputSubmit, PromptInputTextarea, PromptInputToolbar, PromptInputTools, } from "@/components/lighthouse/ai-elements/prompt-input"; import { ERROR_PREFIX, MESSAGE_ROLES, MESSAGE_STATUS, } from "@/components/lighthouse/chat-utils"; import { Loader } from "@/components/lighthouse/loader"; import { MessageItem } from "@/components/lighthouse/message-item"; import { Button, Card, CardContent, CardDescription, CardHeader, CardTitle, Combobox, } from "@/components/shadcn"; import { useToast } from "@/components/ui"; import { CustomLink } from "@/components/ui/custom/custom-link"; import type { LighthouseProvider } from "@/types/lighthouse"; interface Model { id: string; name: string; } interface Provider { id: LighthouseProvider; name: string; models: Model[]; } interface SuggestedAction { title: string; label: string; action: string; } interface ChatProps { hasConfig: boolean; providers: Provider[]; defaultProviderId?: LighthouseProvider; defaultModelId?: string; } interface SelectedModel { providerType: LighthouseProvider | ""; modelId: string; modelName: string; } interface ExtendedError extends Error { status?: number; body?: Record; } const SUGGESTED_ACTIONS: SuggestedAction[] = [ { title: "Are there any exposed S3", label: "buckets in my AWS accounts?", action: "List exposed S3 buckets in my AWS accounts", }, { title: "What is the risk of having", label: "RDS databases unencrypted?", action: "What is the risk of having RDS databases unencrypted?", }, { title: "What is the CIS 1.10 compliance status", label: "of my Kubernetes cluster?", action: "What is the CIS 1.10 compliance status of my Kubernetes cluster?", }, { title: "List my highest privileged", label: "AWS IAM users with full admin access?", action: "List my highest privileged AWS IAM users with full admin access", }, ]; export const Chat = ({ hasConfig, providers: initialProviders, defaultProviderId, defaultModelId, }: ChatProps) => { const { toast } = useToast(); // Consolidated UI state const [uiState, setUiState] = useState<{ inputValue: string; }>({ inputValue: "", }); // Error handling const [errorMessage, setErrorMessage] = useState(null); // Provider and model management const [providers, setProviders] = useState(initialProviders); const loadedProvidersRef = useRef>(new Set()); const [loadingProviders, setLoadingProviders] = useState< Set >(new Set()); // Initialize selectedModel with defaults from props const [selectedModel, setSelectedModel] = useState(() => { const defaultProvider = initialProviders.find((p) => p.id === defaultProviderId) || initialProviders[0]; const defaultModel = defaultProvider?.models.find((m) => m.id === defaultModelId) || defaultProvider?.models[0]; return { providerType: defaultProvider?.id || "", modelId: defaultModel?.id || "", modelName: defaultModel?.name || "", }; }); // Keep ref in sync with selectedModel for stable access in callbacks const selectedModelRef = useRef(selectedModel); selectedModelRef.current = selectedModel; // Load models for all providers on mount useEffect(() => { initialProviders.forEach((provider) => { loadModelsForProvider(provider.id); }); // eslint-disable-next-line react-hooks/exhaustive-deps }, []); // Load all models for a specific provider const loadModelsForProvider = async (providerType: LighthouseProvider) => { // Skip if already loaded if (loadedProvidersRef.current.has(providerType)) { return; } // Mark as loaded loadedProvidersRef.current.add(providerType); setLoadingProviders((prev) => new Set(prev).add(providerType)); try { const response = await getLighthouseModelIds(providerType); if (response.errors) { console.error( `Error loading models for ${providerType}:`, response.errors, ); return; } if (response.data && Array.isArray(response.data)) { // Use the model data directly from the API const models: Model[] = response.data; // Update the provider's models setProviders((prev) => prev.map((p) => (p.id === providerType ? { ...p, models } : p)), ); } } catch (error) { console.error(`Error loading models for ${providerType}:`, error); // Remove from loaded on error so it can be retried loadedProvidersRef.current.delete(providerType); } finally { setLoadingProviders((prev) => { const next = new Set(prev); next.delete(providerType); return next; }); } }; const { messages, sendMessage, status, error, setMessages, regenerate, stop, } = useChat({ transport: new DefaultChatTransport({ api: "/api/lighthouse/analyst", credentials: "same-origin", body: () => ({ model: selectedModelRef.current.modelId, provider: selectedModelRef.current.providerType, }), }), experimental_throttle: 100, onFinish: ({ message }) => { // There is no specific way to output the error message from langgraph supervisor // Hence, all error messages are sent as normal messages with the prefix [LIGHTHOUSE_ANALYST_ERROR]: // Detect error messages sent from backend using specific prefix and display the error // Use includes() instead of startsWith() to catch errors that occur mid-stream (after text has been sent) const firstTextPart = message.parts.find((p) => p.type === "text"); if ( firstTextPart && "text" in firstTextPart && firstTextPart.text.includes(ERROR_PREFIX) ) { // Extract error text - handle both start-of-message and mid-stream errors const fullText = firstTextPart.text; const errorIndex = fullText.indexOf(ERROR_PREFIX); const errorText = fullText .substring(errorIndex + ERROR_PREFIX.length) .trim(); setErrorMessage(errorText); // Remove error message from chat history setMessages((prev) => prev.filter((m) => { const textPart = m.parts.find((p) => p.type === "text"); return !( textPart && "text" in textPart && textPart.text.includes(ERROR_PREFIX) ); }), ); restoreLastUserMessage(); } }, onError: (error) => { console.error("Chat error:", error); if ( error?.message?.includes("") && error?.message?.includes("403 Forbidden") ) { restoreLastUserMessage(); setErrorMessage("403 Forbidden"); return; } restoreLastUserMessage(); setErrorMessage( error?.message || "An error occurred. Please retry your message.", ); }, }); const restoreLastUserMessage = () => { let restoredText = ""; setMessages((currentMessages) => { const nextMessages = [...currentMessages]; for (let index = nextMessages.length - 1; index >= 0; index -= 1) { const current = nextMessages[index]; if (current.role !== "user") { continue; } const textPart = current.parts.find( (part): part is { type: "text"; text: string } => part.type === "text" && "text" in part, ); if (textPart) { restoredText = textPart.text; } nextMessages.splice(index, 1); break; } return nextMessages; }); if (restoredText) { setUiState((prev) => ({ ...prev, inputValue: restoredText })); } }; const stopGeneration = () => { if ( status === MESSAGE_STATUS.STREAMING || status === MESSAGE_STATUS.SUBMITTED ) { stop(); } }; // Handlers const handleNewChat = () => { setMessages([]); setErrorMessage(null); setUiState((prev) => ({ ...prev, inputValue: "" })); }; const handleModelSelect = ( providerType: LighthouseProvider, modelId: string, modelName: string, ) => { setSelectedModel({ providerType, modelId, modelName }); }; return (
{/* Header with New Chat button */} {messages.length > 0 && (
)} {!hasConfig && (
LLM Provider Configuration Required Please configure an LLM provider to use Lighthouse AI. Configure Provider
)} {/* Error Banner */} {(error || errorMessage) && (

Error

{errorMessage || error?.message || "An error occurred. Please retry your message."}

{/* Original error details for native errors */} {error && (error as ExtendedError).status && (

Status: {(error as ExtendedError).status}

)} {error && (error as ExtendedError).body && (
Show details
                    {JSON.stringify((error as ExtendedError).body, null, 2)}
                  
)}
)} {messages.length === 0 && !errorMessage && !error ? (

Suggestions

{SUGGESTED_ACTIONS.map((action, index) => ( ))}
) : ( {messages.map((message, idx) => ( { navigator.clipboard.writeText(text); toast({ title: "Copied", description: "Message copied to clipboard", }); }} onRegenerate={regenerate} /> ))} {/* Show loader only if no assistant message exists yet */} {(status === MESSAGE_STATUS.SUBMITTED || status === MESSAGE_STATUS.STREAMING) && messages.length > 0 && messages[messages.length - 1].role === MESSAGE_ROLES.USER && (
)}
)}
{ if ( status === MESSAGE_STATUS.STREAMING || status === MESSAGE_STATUS.SUBMITTED ) { return; } if (message.text?.trim()) { setErrorMessage(null); sendMessage({ text: message.text, }); setUiState((prev) => ({ ...prev, inputValue: "" })); } }} > setUiState((prev) => ({ ...prev, inputValue: e.target.value })) } /> {/* Model Selector - Combobox */} { const separatorIndex = value.indexOf(":"); if (separatorIndex === -1) return; const providerType = value.slice( 0, separatorIndex, ) as LighthouseProvider; const modelId = value.slice(separatorIndex + 1); const provider = providers.find((p) => p.id === providerType); const model = provider?.models.find((m) => m.id === modelId); if (provider && model) { handleModelSelect(providerType, modelId, model.name); } }} groups={providers.map((provider) => ({ heading: provider.name, options: provider.models.map((model) => ({ value: `${provider.id}:${model.id}`, label: model.name, })), }))} loading={loadingProviders.size > 0} loadingMessage="Loading models..." placeholder={selectedModel.modelName || "Select model..."} searchPlaceholder="Search models..." emptyMessage="No model found." showSelectedFirst={true} /> {/* Submit Button */} { if ( status === MESSAGE_STATUS.STREAMING || status === MESSAGE_STATUS.SUBMITTED ) { event.preventDefault(); stopGeneration(); } }} disabled={ !uiState.inputValue?.trim() && status !== MESSAGE_STATUS.STREAMING && status !== MESSAGE_STATUS.SUBMITTED } />
); }; export default Chat;