feat: multi-provider model configuration with UI/UX improvements (#355)

* feat: add multi-provider model configuration

- Add model config dialog for managing multiple AI providers
- Support for OpenAI, Anthropic, Google, Azure, Bedrock, OpenRouter, DeepSeek, SiliconFlow, Ollama, and AI Gateway
- Add model selector dropdown in chat panel header
- Add API key validation endpoint
- Add custom model ID input with keyboard navigation
- Fix hover highlight in Command component
- Add suggested models for each provider including latest Claude 4.5 series
- Store configuration locally in browser

* feat: improve model config UI and move selector to chat input

- Move model selector from header to chat input (left of send button)
- Add per-model validation status (queued, running, valid, invalid)
- Filter model selector to only show verified models
- Add editable model IDs in config dialog
- Add custom model input field alongside suggested models dropdown
- Fix hover states on provider buttons and select triggers
- Update OpenAI suggested models with GPT-5 series
- Add alert-dialog component for delete confirmation

* refactor: revert shadcn component changes, apply hover fix at usage site

* feat: add AWS credentials support for Bedrock provider

- Add AWS Access Key ID, Secret Access Key, Region fields for Bedrock
- Show different credential fields based on provider type
- Update validation API to handle Bedrock with AWS credentials
- Add region selector with common AWS regions

* fix: reset Test button after validation completes

* fix: reset validation button to Test after success

* fix: complete bedrock support and UI/UX improvements

- Add bedrock to ALLOWED_CLIENT_PROVIDERS for client credentials
- Pass AWS credentials through full chain (headers → API → provider)
- Replace non-existent GPT-5 models with real ones (o1, o3-mini)
- Add accessibility: aria-labels, focus-visible rings, inline errors
- Add more AWS regions (Ohio, London, Paris, Mumbai, Seoul, São Paulo)
- Fix setTimeout cleanup with useRef on component unmount
- Fix TypeScript type consistency in getSelectedAIConfig fallback

* chore: remove unused code

- Remove unused setAccessCodeRequired state in chat-panel.tsx
- Remove unused getSelectedModel export in model-config.ts

* fix: UI/UX improvements for model configuration dialog

- Add gradient header styling with icon badge
- Change Configuration section icon from Key to Settings2
- Add duplicate model detection with warning banner and inline removal
- Filter out already-added models from suggestions dropdown
- Add type-to-confirm for deleting providers with 3+ models
- Enhance delete confirmation dialog with warning icon
- Improve model selector discoverability (show model name + chevron)
- Add truncation for long model names with title tooltip
- Remove AI provider settings from Settings dialog (now in Model Config)
- Extract ValidationButton into reusable component

* fix: prevent duplicate model IDs within same provider

- Block adding model if ID already exists in provider
- Block editing model ID to match existing model in provider

* fix: improve duplicate model ID notifications

- Add toast notification when trying to add duplicate model
- Allow free typing when editing model ID, validate on blur
- Show warning toast instead of blocking input

* fix: improve duplicate model validation UX in config dialog

- Add inline error display for duplicate model IDs
- Show red border on input when error exists
- Validate on blur with shake animation for edit errors
- Prevent saving empty model names
- Clear errors when user starts typing
- Simplify error styling (small red text, no heavy chips)
This commit is contained in:
Dayuan Jiang
2025-12-22 22:36:36 +09:00
committed by GitHub
parent b088a0653e
commit 85cb441e26
20 changed files with 4090 additions and 314 deletions

View File

@@ -0,0 +1,213 @@
import { createAmazonBedrock } from "@ai-sdk/amazon-bedrock"
import { createAnthropic } from "@ai-sdk/anthropic"
import { createDeepSeek, deepseek } from "@ai-sdk/deepseek"
import { createGateway } from "@ai-sdk/gateway"
import { createGoogleGenerativeAI } from "@ai-sdk/google"
import { createOpenAI } from "@ai-sdk/openai"
import { createOpenRouter } from "@openrouter/ai-sdk-provider"
import { generateText } from "ai"
import { NextResponse } from "next/server"
import { createOllama } from "ollama-ai-provider-v2"
export const runtime = "nodejs"
interface ValidateRequest {
provider: string
apiKey: string
baseUrl?: string
modelId: string
// AWS Bedrock specific
awsAccessKeyId?: string
awsSecretAccessKey?: string
awsRegion?: string
}
export async function POST(req: Request) {
try {
const body: ValidateRequest = await req.json()
const {
provider,
apiKey,
baseUrl,
modelId,
awsAccessKeyId,
awsSecretAccessKey,
awsRegion,
} = body
if (!provider || !modelId) {
return NextResponse.json(
{ valid: false, error: "Provider and model ID are required" },
{ status: 400 },
)
}
// Validate credentials based on provider
if (provider === "bedrock") {
if (!awsAccessKeyId || !awsSecretAccessKey || !awsRegion) {
return NextResponse.json(
{
valid: false,
error: "AWS credentials (Access Key ID, Secret Access Key, Region) are required",
},
{ status: 400 },
)
}
} else if (provider !== "ollama" && !apiKey) {
return NextResponse.json(
{ valid: false, error: "API key is required" },
{ status: 400 },
)
}
let model: any
switch (provider) {
case "openai": {
const openai = createOpenAI({
apiKey,
...(baseUrl && { baseURL: baseUrl }),
})
model = openai.chat(modelId)
break
}
case "anthropic": {
const anthropic = createAnthropic({
apiKey,
baseURL: baseUrl || "https://api.anthropic.com/v1",
})
model = anthropic(modelId)
break
}
case "google": {
const google = createGoogleGenerativeAI({
apiKey,
...(baseUrl && { baseURL: baseUrl }),
})
model = google(modelId)
break
}
case "azure": {
const azure = createOpenAI({
apiKey,
baseURL: baseUrl,
})
model = azure.chat(modelId)
break
}
case "bedrock": {
const bedrock = createAmazonBedrock({
accessKeyId: awsAccessKeyId,
secretAccessKey: awsSecretAccessKey,
region: awsRegion,
})
model = bedrock(modelId)
break
}
case "openrouter": {
const openrouter = createOpenRouter({
apiKey,
...(baseUrl && { baseURL: baseUrl }),
})
model = openrouter(modelId)
break
}
case "deepseek": {
if (baseUrl || apiKey) {
const ds = createDeepSeek({
apiKey,
...(baseUrl && { baseURL: baseUrl }),
})
model = ds(modelId)
} else {
model = deepseek(modelId)
}
break
}
case "siliconflow": {
const sf = createOpenAI({
apiKey,
baseURL: baseUrl || "https://api.siliconflow.com/v1",
})
model = sf.chat(modelId)
break
}
case "ollama": {
const ollama = createOllama({
baseURL: baseUrl || "http://localhost:11434",
})
model = ollama(modelId)
break
}
case "gateway": {
const gw = createGateway({
apiKey,
...(baseUrl && { baseURL: baseUrl }),
})
model = gw(modelId)
break
}
default:
return NextResponse.json(
{ valid: false, error: `Unknown provider: ${provider}` },
{ status: 400 },
)
}
// Make a minimal test request
const startTime = Date.now()
await generateText({
model,
prompt: "Say 'OK'",
maxOutputTokens: 20,
})
const responseTime = Date.now() - startTime
return NextResponse.json({
valid: true,
responseTime,
})
} catch (error) {
console.error("[validate-model] Error:", error)
let errorMessage = "Validation failed"
if (error instanceof Error) {
// Extract meaningful error message
if (
error.message.includes("401") ||
error.message.includes("Unauthorized")
) {
errorMessage = "Invalid API key"
} else if (
error.message.includes("404") ||
error.message.includes("not found")
) {
errorMessage = "Model not found"
} else if (
error.message.includes("429") ||
error.message.includes("rate limit")
) {
errorMessage = "Rate limited - try again later"
} else if (error.message.includes("ECONNREFUSED")) {
errorMessage = "Cannot connect to server"
} else {
errorMessage = error.message.slice(0, 100)
}
}
return NextResponse.json(
{ valid: false, error: errorMessage },
{ status: 200 }, // Return 200 so client can read error message
)
}
}