mirror of
https://github.com/DayuanJiang/next-ai-draw-io.git
synced 2026-01-03 06:42:27 +08:00
feat: add multi-provider model configuration
- Add model config dialog for managing multiple AI providers - Support for OpenAI, Anthropic, Google, Azure, Bedrock, OpenRouter, DeepSeek, SiliconFlow, Ollama, and AI Gateway - Add model selector dropdown in chat panel header - Add API key validation endpoint - Add custom model ID input with keyboard navigation - Fix hover highlight in Command component - Add suggested models for each provider including latest Claude 4.5 series - Store configuration locally in browser
This commit is contained in:
180
app/api/validate-model/route.ts
Normal file
180
app/api/validate-model/route.ts
Normal file
@@ -0,0 +1,180 @@
|
||||
import { createAnthropic } from "@ai-sdk/anthropic"
|
||||
import { createDeepSeek, deepseek } from "@ai-sdk/deepseek"
|
||||
import { createGateway } from "@ai-sdk/gateway"
|
||||
import { createGoogleGenerativeAI } from "@ai-sdk/google"
|
||||
import { createOpenAI } from "@ai-sdk/openai"
|
||||
import { createOpenRouter } from "@openrouter/ai-sdk-provider"
|
||||
import { generateText } from "ai"
|
||||
import { NextResponse } from "next/server"
|
||||
import { createOllama } from "ollama-ai-provider-v2"
|
||||
|
||||
export const runtime = "nodejs"
|
||||
|
||||
interface ValidateRequest {
|
||||
provider: string
|
||||
apiKey: string
|
||||
baseUrl?: string
|
||||
modelId: string
|
||||
}
|
||||
|
||||
export async function POST(req: Request) {
|
||||
try {
|
||||
const body: ValidateRequest = await req.json()
|
||||
const { provider, apiKey, baseUrl, modelId } = body
|
||||
|
||||
if (!provider || !modelId) {
|
||||
return NextResponse.json(
|
||||
{ valid: false, error: "Provider and model ID are required" },
|
||||
{ status: 400 },
|
||||
)
|
||||
}
|
||||
|
||||
// Ollama doesn't require API key
|
||||
if (provider !== "ollama" && !apiKey) {
|
||||
return NextResponse.json(
|
||||
{ valid: false, error: "API key is required" },
|
||||
{ status: 400 },
|
||||
)
|
||||
}
|
||||
|
||||
let model: any
|
||||
|
||||
switch (provider) {
|
||||
case "openai": {
|
||||
const openai = createOpenAI({
|
||||
apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl }),
|
||||
})
|
||||
model = openai.chat(modelId)
|
||||
break
|
||||
}
|
||||
|
||||
case "anthropic": {
|
||||
const anthropic = createAnthropic({
|
||||
apiKey,
|
||||
baseURL: baseUrl || "https://api.anthropic.com/v1",
|
||||
})
|
||||
model = anthropic(modelId)
|
||||
break
|
||||
}
|
||||
|
||||
case "google": {
|
||||
const google = createGoogleGenerativeAI({
|
||||
apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl }),
|
||||
})
|
||||
model = google(modelId)
|
||||
break
|
||||
}
|
||||
|
||||
case "azure": {
|
||||
const azure = createOpenAI({
|
||||
apiKey,
|
||||
baseURL: baseUrl,
|
||||
})
|
||||
model = azure.chat(modelId)
|
||||
break
|
||||
}
|
||||
|
||||
case "openrouter": {
|
||||
const openrouter = createOpenRouter({
|
||||
apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl }),
|
||||
})
|
||||
model = openrouter(modelId)
|
||||
break
|
||||
}
|
||||
|
||||
case "deepseek": {
|
||||
if (baseUrl || apiKey) {
|
||||
const ds = createDeepSeek({
|
||||
apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl }),
|
||||
})
|
||||
model = ds(modelId)
|
||||
} else {
|
||||
model = deepseek(modelId)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case "siliconflow": {
|
||||
const sf = createOpenAI({
|
||||
apiKey,
|
||||
baseURL: baseUrl || "https://api.siliconflow.com/v1",
|
||||
})
|
||||
model = sf.chat(modelId)
|
||||
break
|
||||
}
|
||||
|
||||
case "ollama": {
|
||||
const ollama = createOllama({
|
||||
baseURL: baseUrl || "http://localhost:11434",
|
||||
})
|
||||
model = ollama(modelId)
|
||||
break
|
||||
}
|
||||
|
||||
case "gateway": {
|
||||
const gw = createGateway({
|
||||
apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl }),
|
||||
})
|
||||
model = gw(modelId)
|
||||
break
|
||||
}
|
||||
|
||||
default:
|
||||
return NextResponse.json(
|
||||
{ valid: false, error: `Unknown provider: ${provider}` },
|
||||
{ status: 400 },
|
||||
)
|
||||
}
|
||||
|
||||
// Make a minimal test request
|
||||
const startTime = Date.now()
|
||||
await generateText({
|
||||
model,
|
||||
prompt: "Say 'OK'",
|
||||
maxOutputTokens: 20,
|
||||
})
|
||||
const responseTime = Date.now() - startTime
|
||||
|
||||
return NextResponse.json({
|
||||
valid: true,
|
||||
responseTime,
|
||||
})
|
||||
} catch (error) {
|
||||
console.error("[validate-model] Error:", error)
|
||||
|
||||
let errorMessage = "Validation failed"
|
||||
if (error instanceof Error) {
|
||||
// Extract meaningful error message
|
||||
if (
|
||||
error.message.includes("401") ||
|
||||
error.message.includes("Unauthorized")
|
||||
) {
|
||||
errorMessage = "Invalid API key"
|
||||
} else if (
|
||||
error.message.includes("404") ||
|
||||
error.message.includes("not found")
|
||||
) {
|
||||
errorMessage = "Model not found"
|
||||
} else if (
|
||||
error.message.includes("429") ||
|
||||
error.message.includes("rate limit")
|
||||
) {
|
||||
errorMessage = "Rate limited - try again later"
|
||||
} else if (error.message.includes("ECONNREFUSED")) {
|
||||
errorMessage = "Cannot connect to server"
|
||||
} else {
|
||||
errorMessage = error.message.slice(0, 100)
|
||||
}
|
||||
}
|
||||
|
||||
return NextResponse.json(
|
||||
{ valid: false, error: errorMessage },
|
||||
{ status: 200 }, // Return 200 so client can read error message
|
||||
)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user