mirror of
https://github.com/DayuanJiang/next-ai-draw-io.git
synced 2026-01-03 14:52:28 +08:00
chore: add Biome for formatting and linting (#116)
- Add Biome as formatter and linter (replaces Prettier) - Configure Husky + lint-staged for pre-commit hooks - Add VS Code settings for format on save - Ignore components/ui/ (shadcn generated code) - Remove semicolons, use 4-space indent - Reformat all files to new style
This commit is contained in:
@@ -1,89 +1,89 @@
|
||||
import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';
|
||||
import { fromNodeProviderChain } from '@aws-sdk/credential-providers';
|
||||
import { openai, createOpenAI } from '@ai-sdk/openai';
|
||||
import { createAnthropic } from '@ai-sdk/anthropic';
|
||||
import { google, createGoogleGenerativeAI } from '@ai-sdk/google';
|
||||
import { azure, createAzure } from '@ai-sdk/azure';
|
||||
import { ollama, createOllama } from 'ollama-ai-provider-v2';
|
||||
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
||||
import { deepseek, createDeepSeek } from '@ai-sdk/deepseek';
|
||||
import { createAmazonBedrock } from "@ai-sdk/amazon-bedrock"
|
||||
import { createAnthropic } from "@ai-sdk/anthropic"
|
||||
import { azure, createAzure } from "@ai-sdk/azure"
|
||||
import { createDeepSeek, deepseek } from "@ai-sdk/deepseek"
|
||||
import { createGoogleGenerativeAI, google } from "@ai-sdk/google"
|
||||
import { createOpenAI, openai } from "@ai-sdk/openai"
|
||||
import { fromNodeProviderChain } from "@aws-sdk/credential-providers"
|
||||
import { createOpenRouter } from "@openrouter/ai-sdk-provider"
|
||||
import { createOllama, ollama } from "ollama-ai-provider-v2"
|
||||
|
||||
export type ProviderName =
|
||||
| 'bedrock'
|
||||
| 'openai'
|
||||
| 'anthropic'
|
||||
| 'google'
|
||||
| 'azure'
|
||||
| 'ollama'
|
||||
| 'openrouter'
|
||||
| 'deepseek';
|
||||
| "bedrock"
|
||||
| "openai"
|
||||
| "anthropic"
|
||||
| "google"
|
||||
| "azure"
|
||||
| "ollama"
|
||||
| "openrouter"
|
||||
| "deepseek"
|
||||
|
||||
interface ModelConfig {
|
||||
model: any;
|
||||
providerOptions?: any;
|
||||
headers?: Record<string, string>;
|
||||
modelId: string;
|
||||
model: any
|
||||
providerOptions?: any
|
||||
headers?: Record<string, string>
|
||||
modelId: string
|
||||
}
|
||||
|
||||
// Bedrock provider options for Anthropic beta features
|
||||
const BEDROCK_ANTHROPIC_BETA = {
|
||||
bedrock: {
|
||||
anthropicBeta: ['fine-grained-tool-streaming-2025-05-14'],
|
||||
},
|
||||
};
|
||||
bedrock: {
|
||||
anthropicBeta: ["fine-grained-tool-streaming-2025-05-14"],
|
||||
},
|
||||
}
|
||||
|
||||
// Direct Anthropic API headers for beta features
|
||||
const ANTHROPIC_BETA_HEADERS = {
|
||||
'anthropic-beta': 'fine-grained-tool-streaming-2025-05-14',
|
||||
};
|
||||
"anthropic-beta": "fine-grained-tool-streaming-2025-05-14",
|
||||
}
|
||||
|
||||
// Map of provider to required environment variable
|
||||
const PROVIDER_ENV_VARS: Record<ProviderName, string | null> = {
|
||||
bedrock: null, // AWS SDK auto-uses IAM role on AWS, or env vars locally
|
||||
openai: 'OPENAI_API_KEY',
|
||||
anthropic: 'ANTHROPIC_API_KEY',
|
||||
google: 'GOOGLE_GENERATIVE_AI_API_KEY',
|
||||
azure: 'AZURE_API_KEY',
|
||||
ollama: null, // No credentials needed for local Ollama
|
||||
openrouter: 'OPENROUTER_API_KEY',
|
||||
deepseek: 'DEEPSEEK_API_KEY',
|
||||
};
|
||||
bedrock: null, // AWS SDK auto-uses IAM role on AWS, or env vars locally
|
||||
openai: "OPENAI_API_KEY",
|
||||
anthropic: "ANTHROPIC_API_KEY",
|
||||
google: "GOOGLE_GENERATIVE_AI_API_KEY",
|
||||
azure: "AZURE_API_KEY",
|
||||
ollama: null, // No credentials needed for local Ollama
|
||||
openrouter: "OPENROUTER_API_KEY",
|
||||
deepseek: "DEEPSEEK_API_KEY",
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto-detect provider based on available API keys
|
||||
* Returns the provider if exactly one is configured, otherwise null
|
||||
*/
|
||||
function detectProvider(): ProviderName | null {
|
||||
const configuredProviders: ProviderName[] = [];
|
||||
const configuredProviders: ProviderName[] = []
|
||||
|
||||
for (const [provider, envVar] of Object.entries(PROVIDER_ENV_VARS)) {
|
||||
if (envVar === null) {
|
||||
// Skip ollama - it doesn't require credentials
|
||||
continue;
|
||||
for (const [provider, envVar] of Object.entries(PROVIDER_ENV_VARS)) {
|
||||
if (envVar === null) {
|
||||
// Skip ollama - it doesn't require credentials
|
||||
continue
|
||||
}
|
||||
if (process.env[envVar]) {
|
||||
configuredProviders.push(provider as ProviderName)
|
||||
}
|
||||
}
|
||||
if (process.env[envVar]) {
|
||||
configuredProviders.push(provider as ProviderName);
|
||||
|
||||
if (configuredProviders.length === 1) {
|
||||
return configuredProviders[0]
|
||||
}
|
||||
}
|
||||
|
||||
if (configuredProviders.length === 1) {
|
||||
return configuredProviders[0];
|
||||
}
|
||||
|
||||
return null;
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that required API keys are present for the selected provider
|
||||
*/
|
||||
function validateProviderCredentials(provider: ProviderName): void {
|
||||
const requiredVar = PROVIDER_ENV_VARS[provider];
|
||||
if (requiredVar && !process.env[requiredVar]) {
|
||||
throw new Error(
|
||||
`${requiredVar} environment variable is required for ${provider} provider. ` +
|
||||
`Please set it in your .env.local file.`
|
||||
);
|
||||
}
|
||||
const requiredVar = PROVIDER_ENV_VARS[provider]
|
||||
if (requiredVar && !process.env[requiredVar]) {
|
||||
throw new Error(
|
||||
`${requiredVar} environment variable is required for ${provider} provider. ` +
|
||||
`Please set it in your .env.local file.`,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -106,158 +106,164 @@ function validateProviderCredentials(provider: ProviderName): void {
|
||||
* - DEEPSEEK_BASE_URL: DeepSeek endpoint (optional)
|
||||
*/
|
||||
export function getAIModel(): ModelConfig {
|
||||
const modelId = process.env.AI_MODEL;
|
||||
const modelId = process.env.AI_MODEL
|
||||
|
||||
if (!modelId) {
|
||||
throw new Error(
|
||||
`AI_MODEL environment variable is required. Example: AI_MODEL=claude-sonnet-4-5`
|
||||
);
|
||||
}
|
||||
if (!modelId) {
|
||||
throw new Error(
|
||||
`AI_MODEL environment variable is required. Example: AI_MODEL=claude-sonnet-4-5`,
|
||||
)
|
||||
}
|
||||
|
||||
// Determine provider: explicit config > auto-detect > error
|
||||
let provider: ProviderName;
|
||||
if (process.env.AI_PROVIDER) {
|
||||
provider = process.env.AI_PROVIDER as ProviderName;
|
||||
} else {
|
||||
const detected = detectProvider();
|
||||
if (detected) {
|
||||
provider = detected;
|
||||
console.log(`[AI Provider] Auto-detected provider: ${provider}`);
|
||||
// Determine provider: explicit config > auto-detect > error
|
||||
let provider: ProviderName
|
||||
if (process.env.AI_PROVIDER) {
|
||||
provider = process.env.AI_PROVIDER as ProviderName
|
||||
} else {
|
||||
// List configured providers for better error message
|
||||
const configured = Object.entries(PROVIDER_ENV_VARS)
|
||||
.filter(([, envVar]) => envVar && process.env[envVar as string])
|
||||
.map(([p]) => p);
|
||||
const detected = detectProvider()
|
||||
if (detected) {
|
||||
provider = detected
|
||||
console.log(`[AI Provider] Auto-detected provider: ${provider}`)
|
||||
} else {
|
||||
// List configured providers for better error message
|
||||
const configured = Object.entries(PROVIDER_ENV_VARS)
|
||||
.filter(([, envVar]) => envVar && process.env[envVar as string])
|
||||
.map(([p]) => p)
|
||||
|
||||
if (configured.length === 0) {
|
||||
throw new Error(
|
||||
`No AI provider configured. Please set one of the following API keys in your .env.local file:\n` +
|
||||
`- DEEPSEEK_API_KEY for DeepSeek\n` +
|
||||
`- OPENAI_API_KEY for OpenAI\n` +
|
||||
`- ANTHROPIC_API_KEY for Anthropic\n` +
|
||||
`- GOOGLE_GENERATIVE_AI_API_KEY for Google\n` +
|
||||
`- AWS_ACCESS_KEY_ID for Bedrock\n` +
|
||||
`- OPENROUTER_API_KEY for OpenRouter\n` +
|
||||
`- AZURE_API_KEY for Azure\n` +
|
||||
`Or set AI_PROVIDER=ollama for local Ollama.`
|
||||
);
|
||||
} else {
|
||||
throw new Error(
|
||||
`Multiple AI providers configured (${configured.join(', ')}). ` +
|
||||
`Please set AI_PROVIDER to specify which one to use.`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate provider credentials
|
||||
validateProviderCredentials(provider);
|
||||
|
||||
console.log(`[AI Provider] Initializing ${provider} with model: ${modelId}`);
|
||||
|
||||
let model: any;
|
||||
let providerOptions: any = undefined;
|
||||
let headers: Record<string, string> | undefined = undefined;
|
||||
|
||||
switch (provider) {
|
||||
case 'bedrock': {
|
||||
// Use credential provider chain for IAM role support (Amplify, Lambda, etc.)
|
||||
// Falls back to env vars (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) for local dev
|
||||
const bedrockProvider = createAmazonBedrock({
|
||||
region: process.env.AWS_REGION || 'us-west-2',
|
||||
credentialProvider: fromNodeProviderChain(),
|
||||
});
|
||||
model = bedrockProvider(modelId);
|
||||
// Add Anthropic beta options if using Claude models via Bedrock
|
||||
if (modelId.includes('anthropic.claude')) {
|
||||
providerOptions = BEDROCK_ANTHROPIC_BETA;
|
||||
}
|
||||
break;
|
||||
if (configured.length === 0) {
|
||||
throw new Error(
|
||||
`No AI provider configured. Please set one of the following API keys in your .env.local file:\n` +
|
||||
`- DEEPSEEK_API_KEY for DeepSeek\n` +
|
||||
`- OPENAI_API_KEY for OpenAI\n` +
|
||||
`- ANTHROPIC_API_KEY for Anthropic\n` +
|
||||
`- GOOGLE_GENERATIVE_AI_API_KEY for Google\n` +
|
||||
`- AWS_ACCESS_KEY_ID for Bedrock\n` +
|
||||
`- OPENROUTER_API_KEY for OpenRouter\n` +
|
||||
`- AZURE_API_KEY for Azure\n` +
|
||||
`Or set AI_PROVIDER=ollama for local Ollama.`,
|
||||
)
|
||||
} else {
|
||||
throw new Error(
|
||||
`Multiple AI providers configured (${configured.join(", ")}). ` +
|
||||
`Please set AI_PROVIDER to specify which one to use.`,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case 'openai':
|
||||
if (process.env.OPENAI_BASE_URL) {
|
||||
const customOpenAI = createOpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
baseURL: process.env.OPENAI_BASE_URL,
|
||||
});
|
||||
model = customOpenAI.chat(modelId);
|
||||
} else {
|
||||
model = openai(modelId);
|
||||
}
|
||||
break;
|
||||
// Validate provider credentials
|
||||
validateProviderCredentials(provider)
|
||||
|
||||
case 'anthropic':
|
||||
const customProvider = createAnthropic({
|
||||
apiKey: process.env.ANTHROPIC_API_KEY,
|
||||
baseURL: process.env.ANTHROPIC_BASE_URL || 'https://api.anthropic.com/v1',
|
||||
headers: ANTHROPIC_BETA_HEADERS,
|
||||
});
|
||||
model = customProvider(modelId);
|
||||
// Add beta headers for fine-grained tool streaming
|
||||
headers = ANTHROPIC_BETA_HEADERS;
|
||||
break;
|
||||
console.log(`[AI Provider] Initializing ${provider} with model: ${modelId}`)
|
||||
|
||||
case 'google':
|
||||
if (process.env.GOOGLE_BASE_URL) {
|
||||
const customGoogle = createGoogleGenerativeAI({
|
||||
apiKey: process.env.GOOGLE_GENERATIVE_AI_API_KEY,
|
||||
baseURL: process.env.GOOGLE_BASE_URL,
|
||||
});
|
||||
model = customGoogle(modelId);
|
||||
} else {
|
||||
model = google(modelId);
|
||||
}
|
||||
break;
|
||||
let model: any
|
||||
let providerOptions: any
|
||||
let headers: Record<string, string> | undefined
|
||||
|
||||
case 'azure':
|
||||
if (process.env.AZURE_BASE_URL) {
|
||||
const customAzure = createAzure({
|
||||
apiKey: process.env.AZURE_API_KEY,
|
||||
baseURL: process.env.AZURE_BASE_URL,
|
||||
});
|
||||
model = customAzure(modelId);
|
||||
} else {
|
||||
model = azure(modelId);
|
||||
}
|
||||
break;
|
||||
switch (provider) {
|
||||
case "bedrock": {
|
||||
// Use credential provider chain for IAM role support (Amplify, Lambda, etc.)
|
||||
// Falls back to env vars (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) for local dev
|
||||
const bedrockProvider = createAmazonBedrock({
|
||||
region: process.env.AWS_REGION || "us-west-2",
|
||||
credentialProvider: fromNodeProviderChain(),
|
||||
})
|
||||
model = bedrockProvider(modelId)
|
||||
// Add Anthropic beta options if using Claude models via Bedrock
|
||||
if (modelId.includes("anthropic.claude")) {
|
||||
providerOptions = BEDROCK_ANTHROPIC_BETA
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case 'ollama':
|
||||
if (process.env.OLLAMA_BASE_URL) {
|
||||
const customOllama = createOllama({
|
||||
baseURL: process.env.OLLAMA_BASE_URL,
|
||||
});
|
||||
model = customOllama(modelId);
|
||||
} else {
|
||||
model = ollama(modelId);
|
||||
}
|
||||
break;
|
||||
case "openai":
|
||||
if (process.env.OPENAI_BASE_URL) {
|
||||
const customOpenAI = createOpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
baseURL: process.env.OPENAI_BASE_URL,
|
||||
})
|
||||
model = customOpenAI.chat(modelId)
|
||||
} else {
|
||||
model = openai(modelId)
|
||||
}
|
||||
break
|
||||
|
||||
case 'openrouter':
|
||||
const openrouter = createOpenRouter({
|
||||
apiKey: process.env.OPENROUTER_API_KEY,
|
||||
...(process.env.OPENROUTER_BASE_URL && { baseURL: process.env.OPENROUTER_BASE_URL }),
|
||||
});
|
||||
model = openrouter(modelId);
|
||||
break;
|
||||
case "anthropic": {
|
||||
const customProvider = createAnthropic({
|
||||
apiKey: process.env.ANTHROPIC_API_KEY,
|
||||
baseURL:
|
||||
process.env.ANTHROPIC_BASE_URL ||
|
||||
"https://api.anthropic.com/v1",
|
||||
headers: ANTHROPIC_BETA_HEADERS,
|
||||
})
|
||||
model = customProvider(modelId)
|
||||
// Add beta headers for fine-grained tool streaming
|
||||
headers = ANTHROPIC_BETA_HEADERS
|
||||
break
|
||||
}
|
||||
|
||||
case 'deepseek':
|
||||
if (process.env.DEEPSEEK_BASE_URL) {
|
||||
const customDeepSeek = createDeepSeek({
|
||||
apiKey: process.env.DEEPSEEK_API_KEY,
|
||||
baseURL: process.env.DEEPSEEK_BASE_URL,
|
||||
});
|
||||
model = customDeepSeek(modelId);
|
||||
} else {
|
||||
model = deepseek(modelId);
|
||||
}
|
||||
break;
|
||||
case "google":
|
||||
if (process.env.GOOGLE_BASE_URL) {
|
||||
const customGoogle = createGoogleGenerativeAI({
|
||||
apiKey: process.env.GOOGLE_GENERATIVE_AI_API_KEY,
|
||||
baseURL: process.env.GOOGLE_BASE_URL,
|
||||
})
|
||||
model = customGoogle(modelId)
|
||||
} else {
|
||||
model = google(modelId)
|
||||
}
|
||||
break
|
||||
|
||||
default:
|
||||
throw new Error(
|
||||
`Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek`
|
||||
);
|
||||
}
|
||||
case "azure":
|
||||
if (process.env.AZURE_BASE_URL) {
|
||||
const customAzure = createAzure({
|
||||
apiKey: process.env.AZURE_API_KEY,
|
||||
baseURL: process.env.AZURE_BASE_URL,
|
||||
})
|
||||
model = customAzure(modelId)
|
||||
} else {
|
||||
model = azure(modelId)
|
||||
}
|
||||
break
|
||||
|
||||
return { model, providerOptions, headers, modelId };
|
||||
case "ollama":
|
||||
if (process.env.OLLAMA_BASE_URL) {
|
||||
const customOllama = createOllama({
|
||||
baseURL: process.env.OLLAMA_BASE_URL,
|
||||
})
|
||||
model = customOllama(modelId)
|
||||
} else {
|
||||
model = ollama(modelId)
|
||||
}
|
||||
break
|
||||
|
||||
case "openrouter": {
|
||||
const openrouter = createOpenRouter({
|
||||
apiKey: process.env.OPENROUTER_API_KEY,
|
||||
...(process.env.OPENROUTER_BASE_URL && {
|
||||
baseURL: process.env.OPENROUTER_BASE_URL,
|
||||
}),
|
||||
})
|
||||
model = openrouter(modelId)
|
||||
break
|
||||
}
|
||||
|
||||
case "deepseek":
|
||||
if (process.env.DEEPSEEK_BASE_URL) {
|
||||
const customDeepSeek = createDeepSeek({
|
||||
apiKey: process.env.DEEPSEEK_API_KEY,
|
||||
baseURL: process.env.DEEPSEEK_BASE_URL,
|
||||
})
|
||||
model = customDeepSeek(modelId)
|
||||
} else {
|
||||
model = deepseek(modelId)
|
||||
}
|
||||
break
|
||||
|
||||
default:
|
||||
throw new Error(
|
||||
`Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek`,
|
||||
)
|
||||
}
|
||||
|
||||
return { model, providerOptions, headers, modelId }
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
export interface CachedResponse {
|
||||
promptText: string;
|
||||
hasImage: boolean;
|
||||
xml: string;
|
||||
promptText: string
|
||||
hasImage: boolean
|
||||
xml: string
|
||||
}
|
||||
|
||||
export const CACHED_EXAMPLE_RESPONSES: CachedResponse[] = [
|
||||
{
|
||||
promptText: "Give me a **animated connector** diagram of transformer's architecture",
|
||||
hasImage: false,
|
||||
xml: `<root>
|
||||
{
|
||||
promptText:
|
||||
"Give me a **animated connector** diagram of transformer's architecture",
|
||||
hasImage: false,
|
||||
xml: `<root>
|
||||
<mxCell id="0"/>
|
||||
<mxCell id="1" parent="0"/>
|
||||
|
||||
@@ -255,11 +256,11 @@ export const CACHED_EXAMPLE_RESPONSES: CachedResponse[] = [
|
||||
<mxGeometry x="660" y="530" width="100" height="30" as="geometry"/>
|
||||
</mxCell>
|
||||
</root>`,
|
||||
},
|
||||
{
|
||||
promptText: "Replicate this in aws style",
|
||||
hasImage: true,
|
||||
xml: `<root>
|
||||
},
|
||||
{
|
||||
promptText: "Replicate this in aws style",
|
||||
hasImage: true,
|
||||
xml: `<root>
|
||||
<mxCell id="0"/>
|
||||
<mxCell id="1" parent="0"/>
|
||||
|
||||
@@ -325,11 +326,11 @@ export const CACHED_EXAMPLE_RESPONSES: CachedResponse[] = [
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
</root>`,
|
||||
},
|
||||
{
|
||||
promptText: "Replicate this flowchart.",
|
||||
hasImage: true,
|
||||
xml: `<root>
|
||||
},
|
||||
{
|
||||
promptText: "Replicate this flowchart.",
|
||||
hasImage: true,
|
||||
xml: `<root>
|
||||
<mxCell id="0"/>
|
||||
<mxCell id="1" parent="0"/>
|
||||
|
||||
@@ -392,11 +393,11 @@ export const CACHED_EXAMPLE_RESPONSES: CachedResponse[] = [
|
||||
<mxGeometry x="130" y="650" width="200" height="60" as="geometry"/>
|
||||
</mxCell>
|
||||
</root>`,
|
||||
},
|
||||
{
|
||||
promptText: "Draw a cat for me",
|
||||
hasImage: false,
|
||||
xml: `<root>
|
||||
},
|
||||
{
|
||||
promptText: "Draw a cat for me",
|
||||
hasImage: false,
|
||||
xml: `<root>
|
||||
<mxCell id="0"/>
|
||||
<mxCell id="1" parent="0"/>
|
||||
|
||||
@@ -544,14 +545,17 @@ export const CACHED_EXAMPLE_RESPONSES: CachedResponse[] = [
|
||||
</mxCell>
|
||||
|
||||
</root>`,
|
||||
},
|
||||
];
|
||||
},
|
||||
]
|
||||
|
||||
export function findCachedResponse(
|
||||
promptText: string,
|
||||
hasImage: boolean
|
||||
promptText: string,
|
||||
hasImage: boolean,
|
||||
): CachedResponse | undefined {
|
||||
return CACHED_EXAMPLE_RESPONSES.find(
|
||||
(c) => c.promptText === promptText && c.hasImage === hasImage && c.xml !== ''
|
||||
);
|
||||
return CACHED_EXAMPLE_RESPONSES.find(
|
||||
(c) =>
|
||||
c.promptText === promptText &&
|
||||
c.hasImage === hasImage &&
|
||||
c.xml !== "",
|
||||
)
|
||||
}
|
||||
|
||||
132
lib/langfuse.ts
132
lib/langfuse.ts
@@ -1,95 +1,107 @@
|
||||
import { observe, updateActiveTrace } from '@langfuse/tracing';
|
||||
import { LangfuseClient } from '@langfuse/client';
|
||||
import * as api from '@opentelemetry/api';
|
||||
import { LangfuseClient } from "@langfuse/client"
|
||||
import { observe, updateActiveTrace } from "@langfuse/tracing"
|
||||
import * as api from "@opentelemetry/api"
|
||||
|
||||
// Singleton LangfuseClient instance for direct API calls
|
||||
let langfuseClient: LangfuseClient | null = null;
|
||||
let langfuseClient: LangfuseClient | null = null
|
||||
|
||||
export function getLangfuseClient(): LangfuseClient | null {
|
||||
if (!process.env.LANGFUSE_PUBLIC_KEY || !process.env.LANGFUSE_SECRET_KEY) {
|
||||
return null;
|
||||
}
|
||||
if (!process.env.LANGFUSE_PUBLIC_KEY || !process.env.LANGFUSE_SECRET_KEY) {
|
||||
return null
|
||||
}
|
||||
|
||||
if (!langfuseClient) {
|
||||
langfuseClient = new LangfuseClient({
|
||||
publicKey: process.env.LANGFUSE_PUBLIC_KEY,
|
||||
secretKey: process.env.LANGFUSE_SECRET_KEY,
|
||||
baseUrl: process.env.LANGFUSE_BASEURL,
|
||||
});
|
||||
}
|
||||
if (!langfuseClient) {
|
||||
langfuseClient = new LangfuseClient({
|
||||
publicKey: process.env.LANGFUSE_PUBLIC_KEY,
|
||||
secretKey: process.env.LANGFUSE_SECRET_KEY,
|
||||
baseUrl: process.env.LANGFUSE_BASEURL,
|
||||
})
|
||||
}
|
||||
|
||||
return langfuseClient;
|
||||
return langfuseClient
|
||||
}
|
||||
|
||||
// Check if Langfuse is configured
|
||||
export function isLangfuseEnabled(): boolean {
|
||||
return !!process.env.LANGFUSE_PUBLIC_KEY;
|
||||
return !!process.env.LANGFUSE_PUBLIC_KEY
|
||||
}
|
||||
|
||||
// Update trace with input data at the start of request
|
||||
export function setTraceInput(params: {
|
||||
input: string;
|
||||
sessionId?: string;
|
||||
userId?: string;
|
||||
input: string
|
||||
sessionId?: string
|
||||
userId?: string
|
||||
}) {
|
||||
if (!isLangfuseEnabled()) return;
|
||||
if (!isLangfuseEnabled()) return
|
||||
|
||||
updateActiveTrace({
|
||||
name: 'chat',
|
||||
input: params.input,
|
||||
sessionId: params.sessionId,
|
||||
userId: params.userId,
|
||||
});
|
||||
updateActiveTrace({
|
||||
name: "chat",
|
||||
input: params.input,
|
||||
sessionId: params.sessionId,
|
||||
userId: params.userId,
|
||||
})
|
||||
}
|
||||
|
||||
// Update trace with output and end the span
|
||||
export function setTraceOutput(output: string, usage?: { promptTokens?: number; completionTokens?: number }) {
|
||||
if (!isLangfuseEnabled()) return;
|
||||
export function setTraceOutput(
|
||||
output: string,
|
||||
usage?: { promptTokens?: number; completionTokens?: number },
|
||||
) {
|
||||
if (!isLangfuseEnabled()) return
|
||||
|
||||
updateActiveTrace({ output });
|
||||
updateActiveTrace({ output })
|
||||
|
||||
const activeSpan = api.trace.getActiveSpan();
|
||||
if (activeSpan) {
|
||||
// Manually set usage attributes since AI SDK Bedrock streaming doesn't provide them
|
||||
if (usage?.promptTokens) {
|
||||
activeSpan.setAttribute('ai.usage.promptTokens', usage.promptTokens);
|
||||
activeSpan.setAttribute('gen_ai.usage.input_tokens', usage.promptTokens);
|
||||
const activeSpan = api.trace.getActiveSpan()
|
||||
if (activeSpan) {
|
||||
// Manually set usage attributes since AI SDK Bedrock streaming doesn't provide them
|
||||
if (usage?.promptTokens) {
|
||||
activeSpan.setAttribute("ai.usage.promptTokens", usage.promptTokens)
|
||||
activeSpan.setAttribute(
|
||||
"gen_ai.usage.input_tokens",
|
||||
usage.promptTokens,
|
||||
)
|
||||
}
|
||||
if (usage?.completionTokens) {
|
||||
activeSpan.setAttribute(
|
||||
"ai.usage.completionTokens",
|
||||
usage.completionTokens,
|
||||
)
|
||||
activeSpan.setAttribute(
|
||||
"gen_ai.usage.output_tokens",
|
||||
usage.completionTokens,
|
||||
)
|
||||
}
|
||||
activeSpan.end()
|
||||
}
|
||||
if (usage?.completionTokens) {
|
||||
activeSpan.setAttribute('ai.usage.completionTokens', usage.completionTokens);
|
||||
activeSpan.setAttribute('gen_ai.usage.output_tokens', usage.completionTokens);
|
||||
}
|
||||
activeSpan.end();
|
||||
}
|
||||
}
|
||||
|
||||
// Get telemetry config for streamText
|
||||
export function getTelemetryConfig(params: {
|
||||
sessionId?: string;
|
||||
userId?: string;
|
||||
sessionId?: string
|
||||
userId?: string
|
||||
}) {
|
||||
if (!isLangfuseEnabled()) return undefined;
|
||||
if (!isLangfuseEnabled()) return undefined
|
||||
|
||||
return {
|
||||
isEnabled: true,
|
||||
// Disable automatic input recording to avoid uploading large base64 images to Langfuse media
|
||||
// User text input is recorded manually via setTraceInput
|
||||
recordInputs: false,
|
||||
recordOutputs: true,
|
||||
metadata: {
|
||||
sessionId: params.sessionId,
|
||||
userId: params.userId,
|
||||
},
|
||||
};
|
||||
return {
|
||||
isEnabled: true,
|
||||
// Disable automatic input recording to avoid uploading large base64 images to Langfuse media
|
||||
// User text input is recorded manually via setTraceInput
|
||||
recordInputs: false,
|
||||
recordOutputs: true,
|
||||
metadata: {
|
||||
sessionId: params.sessionId,
|
||||
userId: params.userId,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap a handler with Langfuse observe
|
||||
export function wrapWithObserve<T>(
|
||||
handler: (req: Request) => Promise<T>
|
||||
handler: (req: Request) => Promise<T>,
|
||||
): (req: Request) => Promise<T> {
|
||||
if (!isLangfuseEnabled()) {
|
||||
return handler;
|
||||
}
|
||||
if (!isLangfuseEnabled()) {
|
||||
return handler
|
||||
}
|
||||
|
||||
return observe(handler, { name: 'chat', endOnExit: false });
|
||||
return observe(handler, { name: "chat", endOnExit: false })
|
||||
}
|
||||
|
||||
@@ -119,7 +119,7 @@ Common styles:
|
||||
- Shapes: rounded=1 (rounded corners), fillColor=#hex, strokeColor=#hex
|
||||
- Edges: endArrow=classic/block/open/none, startArrow=none/classic, curved=1, edgeStyle=orthogonalEdgeStyle
|
||||
- Text: fontSize=14, fontStyle=1 (bold), align=center/left/right
|
||||
`;
|
||||
`
|
||||
|
||||
// Extended system prompt (~4000+ tokens) - for models with 4000 token cache minimum
|
||||
export const EXTENDED_SYSTEM_PROMPT = `
|
||||
@@ -519,14 +519,14 @@ The XML will be validated before rendering. Ensure:
|
||||
\`\`\`
|
||||
|
||||
Remember: Quality diagrams communicate clearly. Choose appropriate shapes, use consistent styling, and maintain proper spacing for professional results.
|
||||
`;
|
||||
`
|
||||
|
||||
// Model patterns that require extended prompt (4000 token cache minimum)
|
||||
// These patterns match Opus 4.5 and Haiku 4.5 model IDs
|
||||
const EXTENDED_PROMPT_MODEL_PATTERNS = [
|
||||
'claude-opus-4-5', // Matches any Opus 4.5 variant
|
||||
'claude-haiku-4-5', // Matches any Haiku 4.5 variant
|
||||
];
|
||||
"claude-opus-4-5", // Matches any Opus 4.5 variant
|
||||
"claude-haiku-4-5", // Matches any Haiku 4.5 variant
|
||||
]
|
||||
|
||||
/**
|
||||
* Get the appropriate system prompt based on the model ID
|
||||
@@ -535,16 +535,25 @@ const EXTENDED_PROMPT_MODEL_PATTERNS = [
|
||||
* @returns The system prompt string
|
||||
*/
|
||||
export function getSystemPrompt(modelId?: string): string {
|
||||
const modelName = modelId || "AI";
|
||||
const modelName = modelId || "AI"
|
||||
|
||||
let prompt: string;
|
||||
if (modelId && EXTENDED_PROMPT_MODEL_PATTERNS.some(pattern => modelId.includes(pattern))) {
|
||||
console.log(`[System Prompt] Using EXTENDED prompt for model: ${modelId}`);
|
||||
prompt = EXTENDED_SYSTEM_PROMPT;
|
||||
} else {
|
||||
console.log(`[System Prompt] Using DEFAULT prompt for model: ${modelId || 'unknown'}`);
|
||||
prompt = DEFAULT_SYSTEM_PROMPT;
|
||||
}
|
||||
let prompt: string
|
||||
if (
|
||||
modelId &&
|
||||
EXTENDED_PROMPT_MODEL_PATTERNS.some((pattern) =>
|
||||
modelId.includes(pattern),
|
||||
)
|
||||
) {
|
||||
console.log(
|
||||
`[System Prompt] Using EXTENDED prompt for model: ${modelId}`,
|
||||
)
|
||||
prompt = EXTENDED_SYSTEM_PROMPT
|
||||
} else {
|
||||
console.log(
|
||||
`[System Prompt] Using DEFAULT prompt for model: ${modelId || "unknown"}`,
|
||||
)
|
||||
prompt = DEFAULT_SYSTEM_PROMPT
|
||||
}
|
||||
|
||||
return prompt.replace("{{MODEL_NAME}}", modelName);
|
||||
return prompt.replace("{{MODEL_NAME}}", modelName)
|
||||
}
|
||||
|
||||
959
lib/utils.ts
959
lib/utils.ts
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user