Files
prowler/ui/lib/lighthouse/utils.ts
Chandrapal Badshah 031548ca7e feat: Update Lighthouse UI to support multi LLM (#8925)
Co-authored-by: Chandrapal Badshah <12944530+Chan9390@users.noreply.github.com>
Co-authored-by: Alan Buscaglia <gentlemanprogramming@gmail.com>
Co-authored-by: alejandrobailo <alejandrobailo94@gmail.com>
2025-11-14 11:46:38 +01:00

58 lines
1.4 KiB
TypeScript

import {
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
} from "@langchain/core/messages";
import type { UIMessage } from "ai";
import type { ModelParams } from "@/types/lighthouse";
// https://stackoverflow.com/questions/79081298/how-to-stream-langchain-langgraphs-final-generation
/**
* Converts a Vercel message to a LangChain message.
* @param message - The message to convert.
* @returns The converted LangChain message.
*/
export const convertVercelMessageToLangChainMessage = (
message: UIMessage,
): BaseMessage => {
// Extract text content from message parts
const content =
message.parts
?.filter((p) => p.type === "text")
.map((p) => ("text" in p ? p.text : ""))
.join("") || "";
switch (message.role) {
case "user":
return new HumanMessage({ content });
case "assistant":
return new AIMessage({ content });
default:
return new ChatMessage({ content, role: message.role });
}
};
export const getModelParams = (config: {
model: string;
max_tokens?: number;
temperature?: number;
}): ModelParams => {
const modelId = config.model;
const params: ModelParams = {
maxTokens: config.max_tokens,
temperature: config.temperature,
reasoningEffort: undefined,
};
if (modelId.startsWith("gpt-5")) {
params.temperature = undefined;
params.reasoningEffort = "minimal" as const;
params.maxTokens = undefined;
}
return params;
};