chore: add Biome for formatting and linting (#116)

- Add Biome as formatter and linter (replaces Prettier)
- Configure Husky + lint-staged for pre-commit hooks
- Add VS Code settings for format on save
- Ignore components/ui/ (shadcn generated code)
- Remove semicolons, use 4-space indent
- Reformat all files to new style
This commit is contained in:
Dayuan Jiang
2025-12-06 12:46:40 +09:00
committed by GitHub
parent 215a101f54
commit 150eb1ff63
41 changed files with 3992 additions and 2401 deletions

View File

@@ -1,247 +1,299 @@
import { streamText, convertToModelMessages, createUIMessageStream, createUIMessageStreamResponse } from 'ai';
import { getAIModel } from '@/lib/ai-providers';
import { findCachedResponse } from '@/lib/cached-responses';
import { setTraceInput, setTraceOutput, getTelemetryConfig, wrapWithObserve } from '@/lib/langfuse';
import { getSystemPrompt } from '@/lib/system-prompts';
import { z } from "zod";
import {
convertToModelMessages,
createUIMessageStream,
createUIMessageStreamResponse,
streamText,
} from "ai"
import { z } from "zod"
import { getAIModel } from "@/lib/ai-providers"
import { findCachedResponse } from "@/lib/cached-responses"
import {
getTelemetryConfig,
setTraceInput,
setTraceOutput,
wrapWithObserve,
} from "@/lib/langfuse"
import { getSystemPrompt } from "@/lib/system-prompts"
export const maxDuration = 300;
export const maxDuration = 300
// File upload limits (must match client-side)
const MAX_FILE_SIZE = 2 * 1024 * 1024; // 2MB
const MAX_FILES = 5;
const MAX_FILE_SIZE = 2 * 1024 * 1024 // 2MB
const MAX_FILES = 5
// Helper function to validate file parts in messages
function validateFileParts(messages: any[]): { valid: boolean; error?: string } {
const lastMessage = messages[messages.length - 1];
const fileParts = lastMessage?.parts?.filter((p: any) => p.type === 'file') || [];
function validateFileParts(messages: any[]): {
valid: boolean
error?: string
} {
const lastMessage = messages[messages.length - 1]
const fileParts =
lastMessage?.parts?.filter((p: any) => p.type === "file") || []
if (fileParts.length > MAX_FILES) {
return { valid: false, error: `Too many files. Maximum ${MAX_FILES} allowed.` };
}
for (const filePart of fileParts) {
// Data URLs format: data:image/png;base64,<data>
// Base64 increases size by ~33%, so we check the decoded size
if (filePart.url && filePart.url.startsWith('data:')) {
const base64Data = filePart.url.split(',')[1];
if (base64Data) {
const sizeInBytes = Math.ceil((base64Data.length * 3) / 4);
if (sizeInBytes > MAX_FILE_SIZE) {
return { valid: false, error: `File exceeds ${MAX_FILE_SIZE / 1024 / 1024}MB limit.` };
if (fileParts.length > MAX_FILES) {
return {
valid: false,
error: `Too many files. Maximum ${MAX_FILES} allowed.`,
}
}
}
}
return { valid: true };
for (const filePart of fileParts) {
// Data URLs format: data:image/png;base64,<data>
// Base64 increases size by ~33%, so we check the decoded size
if (filePart.url && filePart.url.startsWith("data:")) {
const base64Data = filePart.url.split(",")[1]
if (base64Data) {
const sizeInBytes = Math.ceil((base64Data.length * 3) / 4)
if (sizeInBytes > MAX_FILE_SIZE) {
return {
valid: false,
error: `File exceeds ${MAX_FILE_SIZE / 1024 / 1024}MB limit.`,
}
}
}
}
}
return { valid: true }
}
// Helper function to check if diagram is minimal/empty
function isMinimalDiagram(xml: string): boolean {
const stripped = xml.replace(/\s/g, '');
return !stripped.includes('id="2"');
const stripped = xml.replace(/\s/g, "")
return !stripped.includes('id="2"')
}
// Helper function to create cached stream response
function createCachedStreamResponse(xml: string): Response {
const toolCallId = `cached-${Date.now()}`;
const toolCallId = `cached-${Date.now()}`
const stream = createUIMessageStream({
execute: async ({ writer }) => {
writer.write({ type: 'start' });
writer.write({ type: 'tool-input-start', toolCallId, toolName: 'display_diagram' });
writer.write({ type: 'tool-input-delta', toolCallId, inputTextDelta: xml });
writer.write({ type: 'tool-input-available', toolCallId, toolName: 'display_diagram', input: { xml } });
writer.write({ type: 'finish' });
},
});
const stream = createUIMessageStream({
execute: async ({ writer }) => {
writer.write({ type: "start" })
writer.write({
type: "tool-input-start",
toolCallId,
toolName: "display_diagram",
})
writer.write({
type: "tool-input-delta",
toolCallId,
inputTextDelta: xml,
})
writer.write({
type: "tool-input-available",
toolCallId,
toolName: "display_diagram",
input: { xml },
})
writer.write({ type: "finish" })
},
})
return createUIMessageStreamResponse({ stream });
return createUIMessageStreamResponse({ stream })
}
// Inner handler function
async function handleChatRequest(req: Request): Promise<Response> {
// Check for access code
const accessCodes = process.env.ACCESS_CODE_LIST?.split(',').map(code => code.trim()).filter(Boolean) || [];
if (accessCodes.length > 0) {
const accessCodeHeader = req.headers.get('x-access-code');
if (!accessCodeHeader || !accessCodes.includes(accessCodeHeader)) {
return Response.json(
{ error: 'Invalid or missing access code. Please configure it in Settings.' },
{ status: 401 }
);
// Check for access code
const accessCodes =
process.env.ACCESS_CODE_LIST?.split(",")
.map((code) => code.trim())
.filter(Boolean) || []
if (accessCodes.length > 0) {
const accessCodeHeader = req.headers.get("x-access-code")
if (!accessCodeHeader || !accessCodes.includes(accessCodeHeader)) {
return Response.json(
{
error: "Invalid or missing access code. Please configure it in Settings.",
},
{ status: 401 },
)
}
}
}
const { messages, xml, sessionId } = await req.json();
const { messages, xml, sessionId } = await req.json()
// Get user IP for Langfuse tracking
const forwardedFor = req.headers.get('x-forwarded-for');
const userId = forwardedFor?.split(',')[0]?.trim() || 'anonymous';
// Get user IP for Langfuse tracking
const forwardedFor = req.headers.get("x-forwarded-for")
const userId = forwardedFor?.split(",")[0]?.trim() || "anonymous"
// Validate sessionId for Langfuse (must be string, max 200 chars)
const validSessionId = sessionId && typeof sessionId === 'string' && sessionId.length <= 200
? sessionId
: undefined;
// Validate sessionId for Langfuse (must be string, max 200 chars)
const validSessionId =
sessionId && typeof sessionId === "string" && sessionId.length <= 200
? sessionId
: undefined
// Extract user input text for Langfuse trace
const currentMessage = messages[messages.length - 1];
const userInputText = currentMessage?.parts?.find((p: any) => p.type === 'text')?.text || '';
// Extract user input text for Langfuse trace
const currentMessage = messages[messages.length - 1]
const userInputText =
currentMessage?.parts?.find((p: any) => p.type === "text")?.text || ""
// Update Langfuse trace with input, session, and user
setTraceInput({
input: userInputText,
sessionId: validSessionId,
userId: userId,
});
// Update Langfuse trace with input, session, and user
setTraceInput({
input: userInputText,
sessionId: validSessionId,
userId: userId,
})
// === FILE VALIDATION START ===
const fileValidation = validateFileParts(messages);
if (!fileValidation.valid) {
return Response.json({ error: fileValidation.error }, { status: 400 });
}
// === FILE VALIDATION END ===
// === CACHE CHECK START ===
const isFirstMessage = messages.length === 1;
const isEmptyDiagram = !xml || xml.trim() === '' || isMinimalDiagram(xml);
if (isFirstMessage && isEmptyDiagram) {
const lastMessage = messages[0];
const textPart = lastMessage.parts?.find((p: any) => p.type === 'text');
const filePart = lastMessage.parts?.find((p: any) => p.type === 'file');
const cached = findCachedResponse(textPart?.text || '', !!filePart);
if (cached) {
console.log('[Cache] Returning cached response for:', textPart?.text);
return createCachedStreamResponse(cached.xml);
// === FILE VALIDATION START ===
const fileValidation = validateFileParts(messages)
if (!fileValidation.valid) {
return Response.json({ error: fileValidation.error }, { status: 400 })
}
}
// === CACHE CHECK END ===
// === FILE VALIDATION END ===
// Get AI model from environment configuration
const { model, providerOptions, headers, modelId } = getAIModel();
// === CACHE CHECK START ===
const isFirstMessage = messages.length === 1
const isEmptyDiagram = !xml || xml.trim() === "" || isMinimalDiagram(xml)
// Get the appropriate system prompt based on model (extended for Opus/Haiku 4.5)
const systemMessage = getSystemPrompt(modelId);
if (isFirstMessage && isEmptyDiagram) {
const lastMessage = messages[0]
const textPart = lastMessage.parts?.find((p: any) => p.type === "text")
const filePart = lastMessage.parts?.find((p: any) => p.type === "file")
const lastMessage = messages[messages.length - 1];
const cached = findCachedResponse(textPart?.text || "", !!filePart)
// Extract text from the last message parts
const lastMessageText = lastMessage.parts?.find((part: any) => part.type === 'text')?.text || '';
if (cached) {
console.log(
"[Cache] Returning cached response for:",
textPart?.text,
)
return createCachedStreamResponse(cached.xml)
}
}
// === CACHE CHECK END ===
// Extract file parts (images) from the last message
const fileParts = lastMessage.parts?.filter((part: any) => part.type === 'file') || [];
// Get AI model from environment configuration
const { model, providerOptions, headers, modelId } = getAIModel()
// User input only - XML is now in a separate cached system message
const formattedUserInput = `User input:
// Get the appropriate system prompt based on model (extended for Opus/Haiku 4.5)
const systemMessage = getSystemPrompt(modelId)
const lastMessage = messages[messages.length - 1]
// Extract text from the last message parts
const lastMessageText =
lastMessage.parts?.find((part: any) => part.type === "text")?.text || ""
// Extract file parts (images) from the last message
const fileParts =
lastMessage.parts?.filter((part: any) => part.type === "file") || []
// User input only - XML is now in a separate cached system message
const formattedUserInput = `User input:
"""md
${lastMessageText}
"""`;
"""`
// Convert UIMessages to ModelMessages and add system message
const modelMessages = convertToModelMessages(messages);
// Convert UIMessages to ModelMessages and add system message
const modelMessages = convertToModelMessages(messages)
// Filter out messages with empty content arrays (Bedrock API rejects these)
// This is a safety measure - ideally convertToModelMessages should handle all cases
let enhancedMessages = modelMessages.filter((msg: any) =>
msg.content && Array.isArray(msg.content) && msg.content.length > 0
);
// Filter out messages with empty content arrays (Bedrock API rejects these)
// This is a safety measure - ideally convertToModelMessages should handle all cases
let enhancedMessages = modelMessages.filter(
(msg: any) =>
msg.content && Array.isArray(msg.content) && msg.content.length > 0,
)
// Update the last message with user input only (XML moved to separate cached system message)
if (enhancedMessages.length >= 1) {
const lastModelMessage = enhancedMessages[enhancedMessages.length - 1];
if (lastModelMessage.role === 'user') {
// Build content array with user input text and file parts
const contentParts: any[] = [
{ type: 'text', text: formattedUserInput }
];
// Update the last message with user input only (XML moved to separate cached system message)
if (enhancedMessages.length >= 1) {
const lastModelMessage = enhancedMessages[enhancedMessages.length - 1]
if (lastModelMessage.role === "user") {
// Build content array with user input text and file parts
const contentParts: any[] = [
{ type: "text", text: formattedUserInput },
]
// Add image parts back
for (const filePart of fileParts) {
contentParts.push({
type: 'image',
image: filePart.url,
mimeType: filePart.mediaType
});
}
// Add image parts back
for (const filePart of fileParts) {
contentParts.push({
type: "image",
image: filePart.url,
mimeType: filePart.mediaType,
})
}
enhancedMessages = [
...enhancedMessages.slice(0, -1),
{ ...lastModelMessage, content: contentParts }
];
enhancedMessages = [
...enhancedMessages.slice(0, -1),
{ ...lastModelMessage, content: contentParts },
]
}
}
}
// Add cache point to the last assistant message in conversation history
// This caches the entire conversation prefix for subsequent requests
// Strategy: system (cached) + history with last assistant (cached) + new user message
if (enhancedMessages.length >= 2) {
// Find the last assistant message (should be second-to-last, before current user message)
for (let i = enhancedMessages.length - 2; i >= 0; i--) {
if (enhancedMessages[i].role === 'assistant') {
enhancedMessages[i] = {
...enhancedMessages[i],
providerOptions: {
bedrock: { cachePoint: { type: 'default' } },
},
};
break; // Only cache the last assistant message
}
// Add cache point to the last assistant message in conversation history
// This caches the entire conversation prefix for subsequent requests
// Strategy: system (cached) + history with last assistant (cached) + new user message
if (enhancedMessages.length >= 2) {
// Find the last assistant message (should be second-to-last, before current user message)
for (let i = enhancedMessages.length - 2; i >= 0; i--) {
if (enhancedMessages[i].role === "assistant") {
enhancedMessages[i] = {
...enhancedMessages[i],
providerOptions: {
bedrock: { cachePoint: { type: "default" } },
},
}
break // Only cache the last assistant message
}
}
}
}
// System messages with multiple cache breakpoints for optimal caching:
// - Breakpoint 1: Static instructions (~1500 tokens) - rarely changes
// - Breakpoint 2: Current XML context - changes per diagram, but constant within a conversation turn
// This allows: if only user message changes, both system caches are reused
// if XML changes, instruction cache is still reused
const systemMessages = [
// Cache breakpoint 1: Instructions (rarely change)
{
role: 'system' as const,
content: systemMessage,
providerOptions: {
bedrock: { cachePoint: { type: 'default' } },
},
},
// Cache breakpoint 2: Current diagram XML context
{
role: 'system' as const,
content: `Current diagram XML:\n"""xml\n${xml || ''}\n"""\nWhen using edit_diagram, COPY search patterns exactly from this XML - attribute order matters!`,
providerOptions: {
bedrock: { cachePoint: { type: 'default' } },
},
},
];
// System messages with multiple cache breakpoints for optimal caching:
// - Breakpoint 1: Static instructions (~1500 tokens) - rarely changes
// - Breakpoint 2: Current XML context - changes per diagram, but constant within a conversation turn
// This allows: if only user message changes, both system caches are reused
// if XML changes, instruction cache is still reused
const systemMessages = [
// Cache breakpoint 1: Instructions (rarely change)
{
role: "system" as const,
content: systemMessage,
providerOptions: {
bedrock: { cachePoint: { type: "default" } },
},
},
// Cache breakpoint 2: Current diagram XML context
{
role: "system" as const,
content: `Current diagram XML:\n"""xml\n${xml || ""}\n"""\nWhen using edit_diagram, COPY search patterns exactly from this XML - attribute order matters!`,
providerOptions: {
bedrock: { cachePoint: { type: "default" } },
},
},
]
const allMessages = [...systemMessages, ...enhancedMessages];
const allMessages = [...systemMessages, ...enhancedMessages]
const result = streamText({
model,
messages: allMessages,
...(providerOptions && { providerOptions }),
...(headers && { headers }),
// Langfuse telemetry config (returns undefined if not configured)
...(getTelemetryConfig({ sessionId: validSessionId, userId }) && {
experimental_telemetry: getTelemetryConfig({ sessionId: validSessionId, userId }),
}),
onFinish: ({ text, usage, providerMetadata }) => {
console.log('[Cache] Full providerMetadata:', JSON.stringify(providerMetadata, null, 2));
console.log('[Cache] Usage:', JSON.stringify(usage, null, 2));
// Pass usage to Langfuse (Bedrock streaming doesn't auto-report tokens to telemetry)
// AI SDK uses inputTokens/outputTokens, Langfuse expects promptTokens/completionTokens
setTraceOutput(text, {
promptTokens: usage?.inputTokens,
completionTokens: usage?.outputTokens,
});
},
tools: {
// Client-side tool that will be executed on the client
display_diagram: {
description: `Display a diagram on draw.io. Pass the XML content inside <root> tags.
const result = streamText({
model,
messages: allMessages,
...(providerOptions && { providerOptions }),
...(headers && { headers }),
// Langfuse telemetry config (returns undefined if not configured)
...(getTelemetryConfig({ sessionId: validSessionId, userId }) && {
experimental_telemetry: getTelemetryConfig({
sessionId: validSessionId,
userId,
}),
}),
onFinish: ({ text, usage, providerMetadata }) => {
console.log(
"[Cache] Full providerMetadata:",
JSON.stringify(providerMetadata, null, 2),
)
console.log("[Cache] Usage:", JSON.stringify(usage, null, 2))
// Pass usage to Langfuse (Bedrock streaming doesn't auto-report tokens to telemetry)
// AI SDK uses inputTokens/outputTokens, Langfuse expects promptTokens/completionTokens
setTraceOutput(text, {
promptTokens: usage?.inputTokens,
completionTokens: usage?.outputTokens,
})
},
tools: {
// Client-side tool that will be executed on the client
display_diagram: {
description: `Display a diagram on draw.io. Pass the XML content inside <root> tags.
VALIDATION RULES (XML will be rejected if violated):
1. All mxCell elements must be DIRECT children of <root> - never nested
@@ -276,12 +328,14 @@ Notes:
- For AWS diagrams, use **AWS 2025 icons**.
- For animated connectors, add "flowAnimation=1" to edge style.
`,
inputSchema: z.object({
xml: z.string().describe("XML string to be displayed on draw.io")
})
},
edit_diagram: {
description: `Edit specific parts of the current diagram by replacing exact line matches. Use this tool to make targeted fixes without regenerating the entire XML.
inputSchema: z.object({
xml: z
.string()
.describe("XML string to be displayed on draw.io"),
}),
},
edit_diagram: {
description: `Edit specific parts of the current diagram by replacing exact line matches. Use this tool to make targeted fixes without regenerating the entire XML.
CRITICAL: Copy-paste the EXACT search pattern from the "Current diagram XML" in system context. Do NOT reorder attributes or reformat - the attribute order in draw.io XML varies and you MUST match it exactly.
IMPORTANT: Keep edits concise:
- COPY the exact mxCell line from the current XML (attribute order matters!)
@@ -289,33 +343,48 @@ IMPORTANT: Keep edits concise:
- Break large changes into multiple smaller edits
- Each search must contain complete lines (never truncate mid-line)
- First match only - be specific enough to target the right element`,
inputSchema: z.object({
edits: z.array(z.object({
search: z.string().describe("EXACT lines copied from current XML (preserve attribute order!)"),
replace: z.string().describe("Replacement lines")
})).describe("Array of search/replace pairs to apply sequentially")
})
},
},
temperature: 0,
});
inputSchema: z.object({
edits: z
.array(
z.object({
search: z
.string()
.describe(
"EXACT lines copied from current XML (preserve attribute order!)",
),
replace: z
.string()
.describe("Replacement lines"),
}),
)
.describe(
"Array of search/replace pairs to apply sequentially",
),
}),
},
},
temperature: 0,
})
return result.toUIMessageStreamResponse();
return result.toUIMessageStreamResponse()
}
// Wrap handler with error handling
async function safeHandler(req: Request): Promise<Response> {
try {
return await handleChatRequest(req);
} catch (error) {
console.error('Error in chat route:', error);
return Response.json({ error: 'Internal server error' }, { status: 500 });
}
try {
return await handleChatRequest(req)
} catch (error) {
console.error("Error in chat route:", error)
return Response.json(
{ error: "Internal server error" },
{ status: 500 },
)
}
}
// Wrap with Langfuse observe (if configured)
const observedHandler = wrapWithObserve(safeHandler);
const observedHandler = wrapWithObserve(safeHandler)
export async function POST(req: Request) {
return observedHandler(req);
return observedHandler(req)
}

View File

@@ -1,9 +1,12 @@
import { NextResponse } from "next/server";
import { NextResponse } from "next/server"
export async function GET() {
const accessCodes = process.env.ACCESS_CODE_LIST?.split(',').map(code => code.trim()).filter(Boolean) || [];
const accessCodes =
process.env.ACCESS_CODE_LIST?.split(",")
.map((code) => code.trim())
.filter(Boolean) || []
return NextResponse.json({
accessCodeRequired: accessCodes.length > 0,
});
return NextResponse.json({
accessCodeRequired: accessCodes.length > 0,
})
}

View File

@@ -1,103 +1,112 @@
import { getLangfuseClient } from '@/lib/langfuse';
import { randomUUID } from 'crypto';
import { z } from 'zod';
import { randomUUID } from "crypto"
import { z } from "zod"
import { getLangfuseClient } from "@/lib/langfuse"
const feedbackSchema = z.object({
messageId: z.string().min(1).max(200),
feedback: z.enum(['good', 'bad']),
sessionId: z.string().min(1).max(200).optional(),
});
messageId: z.string().min(1).max(200),
feedback: z.enum(["good", "bad"]),
sessionId: z.string().min(1).max(200).optional(),
})
export async function POST(req: Request) {
const langfuse = getLangfuseClient();
if (!langfuse) {
return Response.json({ success: true, logged: false });
}
// Validate input
let data;
try {
data = feedbackSchema.parse(await req.json());
} catch {
return Response.json({ success: false, error: 'Invalid input' }, { status: 400 });
}
const { messageId, feedback, sessionId } = data;
// Get user IP for tracking
const forwardedFor = req.headers.get('x-forwarded-for');
const userId = forwardedFor?.split(',')[0]?.trim() || 'anonymous';
try {
// Find the most recent chat trace for this session to attach the score to
const tracesResponse = await langfuse.api.trace.list({
sessionId,
limit: 1,
});
const traces = tracesResponse.data || [];
const latestTrace = traces[0];
if (!latestTrace) {
// No trace found for this session - create a standalone feedback trace
const traceId = randomUUID();
const timestamp = new Date().toISOString();
await langfuse.api.ingestion.batch({
batch: [
{
type: 'trace-create',
id: randomUUID(),
timestamp,
body: {
id: traceId,
name: 'user-feedback',
sessionId,
userId,
input: { messageId, feedback },
metadata: { source: 'feedback-button', note: 'standalone - no chat trace found' },
timestamp,
},
},
{
type: 'score-create',
id: randomUUID(),
timestamp,
body: {
id: randomUUID(),
traceId,
name: 'user-feedback',
value: feedback === 'good' ? 1 : 0,
comment: `User gave ${feedback} feedback`,
},
},
],
});
} else {
// Attach score to the existing chat trace
const timestamp = new Date().toISOString();
await langfuse.api.ingestion.batch({
batch: [
{
type: 'score-create',
id: randomUUID(),
timestamp,
body: {
id: randomUUID(),
traceId: latestTrace.id,
name: 'user-feedback',
value: feedback === 'good' ? 1 : 0,
comment: `User gave ${feedback} feedback`,
},
},
],
});
const langfuse = getLangfuseClient()
if (!langfuse) {
return Response.json({ success: true, logged: false })
}
return Response.json({ success: true, logged: true });
} catch (error) {
console.error('Langfuse feedback error:', error);
return Response.json({ success: false, error: 'Failed to log feedback' }, { status: 500 });
}
// Validate input
let data
try {
data = feedbackSchema.parse(await req.json())
} catch {
return Response.json(
{ success: false, error: "Invalid input" },
{ status: 400 },
)
}
const { messageId, feedback, sessionId } = data
// Get user IP for tracking
const forwardedFor = req.headers.get("x-forwarded-for")
const userId = forwardedFor?.split(",")[0]?.trim() || "anonymous"
try {
// Find the most recent chat trace for this session to attach the score to
const tracesResponse = await langfuse.api.trace.list({
sessionId,
limit: 1,
})
const traces = tracesResponse.data || []
const latestTrace = traces[0]
if (!latestTrace) {
// No trace found for this session - create a standalone feedback trace
const traceId = randomUUID()
const timestamp = new Date().toISOString()
await langfuse.api.ingestion.batch({
batch: [
{
type: "trace-create",
id: randomUUID(),
timestamp,
body: {
id: traceId,
name: "user-feedback",
sessionId,
userId,
input: { messageId, feedback },
metadata: {
source: "feedback-button",
note: "standalone - no chat trace found",
},
timestamp,
},
},
{
type: "score-create",
id: randomUUID(),
timestamp,
body: {
id: randomUUID(),
traceId,
name: "user-feedback",
value: feedback === "good" ? 1 : 0,
comment: `User gave ${feedback} feedback`,
},
},
],
})
} else {
// Attach score to the existing chat trace
const timestamp = new Date().toISOString()
await langfuse.api.ingestion.batch({
batch: [
{
type: "score-create",
id: randomUUID(),
timestamp,
body: {
id: randomUUID(),
traceId: latestTrace.id,
name: "user-feedback",
value: feedback === "good" ? 1 : 0,
comment: `User gave ${feedback} feedback`,
},
},
],
})
}
return Response.json({ success: true, logged: true })
} catch (error) {
console.error("Langfuse feedback error:", error)
return Response.json(
{ success: false, error: "Failed to log feedback" },
{ status: 500 },
)
}
}

View File

@@ -1,65 +1,71 @@
import { getLangfuseClient } from '@/lib/langfuse';
import { randomUUID } from 'crypto';
import { z } from 'zod';
import { randomUUID } from "crypto"
import { z } from "zod"
import { getLangfuseClient } from "@/lib/langfuse"
const saveSchema = z.object({
filename: z.string().min(1).max(255),
format: z.enum(['drawio', 'png', 'svg']),
sessionId: z.string().min(1).max(200).optional(),
});
filename: z.string().min(1).max(255),
format: z.enum(["drawio", "png", "svg"]),
sessionId: z.string().min(1).max(200).optional(),
})
export async function POST(req: Request) {
const langfuse = getLangfuseClient();
if (!langfuse) {
return Response.json({ success: true, logged: false });
}
// Validate input
let data;
try {
data = saveSchema.parse(await req.json());
} catch {
return Response.json({ success: false, error: 'Invalid input' }, { status: 400 });
}
const { filename, format, sessionId } = data;
try {
const timestamp = new Date().toISOString();
// Find the most recent chat trace for this session to attach the save flag
const tracesResponse = await langfuse.api.trace.list({
sessionId,
limit: 1,
});
const traces = tracesResponse.data || [];
const latestTrace = traces[0];
if (latestTrace) {
// Add a score to the existing trace to flag that user saved
await langfuse.api.ingestion.batch({
batch: [
{
type: 'score-create',
id: randomUUID(),
timestamp,
body: {
id: randomUUID(),
traceId: latestTrace.id,
name: 'diagram-saved',
value: 1,
comment: `User saved diagram as ${filename}.${format}`,
},
},
],
});
const langfuse = getLangfuseClient()
if (!langfuse) {
return Response.json({ success: true, logged: false })
}
// If no trace found, skip logging (user hasn't chatted yet)
return Response.json({ success: true, logged: !!latestTrace });
} catch (error) {
console.error('Langfuse save error:', error);
return Response.json({ success: false, error: 'Failed to log save' }, { status: 500 });
}
// Validate input
let data
try {
data = saveSchema.parse(await req.json())
} catch {
return Response.json(
{ success: false, error: "Invalid input" },
{ status: 400 },
)
}
const { filename, format, sessionId } = data
try {
const timestamp = new Date().toISOString()
// Find the most recent chat trace for this session to attach the save flag
const tracesResponse = await langfuse.api.trace.list({
sessionId,
limit: 1,
})
const traces = tracesResponse.data || []
const latestTrace = traces[0]
if (latestTrace) {
// Add a score to the existing trace to flag that user saved
await langfuse.api.ingestion.batch({
batch: [
{
type: "score-create",
id: randomUUID(),
timestamp,
body: {
id: randomUUID(),
traceId: latestTrace.id,
name: "diagram-saved",
value: 1,
comment: `User saved diagram as ${filename}.${format}`,
},
},
],
})
}
// If no trace found, skip logging (user hasn't chatted yet)
return Response.json({ success: true, logged: !!latestTrace })
} catch (error) {
console.error("Langfuse save error:", error)
return Response.json(
{ success: false, error: "Failed to log save" },
{ status: 500 },
)
}
}