mirror of
https://github.com/DayuanJiang/next-ai-draw-io.git
synced 2026-01-02 22:32:27 +08:00
feat: multi-provider model configuration with UI/UX improvements (#355)
* feat: add multi-provider model configuration - Add model config dialog for managing multiple AI providers - Support for OpenAI, Anthropic, Google, Azure, Bedrock, OpenRouter, DeepSeek, SiliconFlow, Ollama, and AI Gateway - Add model selector dropdown in chat panel header - Add API key validation endpoint - Add custom model ID input with keyboard navigation - Fix hover highlight in Command component - Add suggested models for each provider including latest Claude 4.5 series - Store configuration locally in browser * feat: improve model config UI and move selector to chat input - Move model selector from header to chat input (left of send button) - Add per-model validation status (queued, running, valid, invalid) - Filter model selector to only show verified models - Add editable model IDs in config dialog - Add custom model input field alongside suggested models dropdown - Fix hover states on provider buttons and select triggers - Update OpenAI suggested models with GPT-5 series - Add alert-dialog component for delete confirmation * refactor: revert shadcn component changes, apply hover fix at usage site * feat: add AWS credentials support for Bedrock provider - Add AWS Access Key ID, Secret Access Key, Region fields for Bedrock - Show different credential fields based on provider type - Update validation API to handle Bedrock with AWS credentials - Add region selector with common AWS regions * fix: reset Test button after validation completes * fix: reset validation button to Test after success * fix: complete bedrock support and UI/UX improvements - Add bedrock to ALLOWED_CLIENT_PROVIDERS for client credentials - Pass AWS credentials through full chain (headers → API → provider) - Replace non-existent GPT-5 models with real ones (o1, o3-mini) - Add accessibility: aria-labels, focus-visible rings, inline errors - Add more AWS regions (Ohio, London, Paris, Mumbai, Seoul, São Paulo) - Fix setTimeout cleanup with useRef on component unmount - Fix TypeScript type consistency in getSelectedAIConfig fallback * chore: remove unused code - Remove unused setAccessCodeRequired state in chat-panel.tsx - Remove unused getSelectedModel export in model-config.ts * fix: UI/UX improvements for model configuration dialog - Add gradient header styling with icon badge - Change Configuration section icon from Key to Settings2 - Add duplicate model detection with warning banner and inline removal - Filter out already-added models from suggestions dropdown - Add type-to-confirm for deleting providers with 3+ models - Enhance delete confirmation dialog with warning icon - Improve model selector discoverability (show model name + chevron) - Add truncation for long model names with title tooltip - Remove AI provider settings from Settings dialog (now in Model Config) - Extract ValidationButton into reusable component * fix: prevent duplicate model IDs within same provider - Block adding model if ID already exists in provider - Block editing model ID to match existing model in provider * fix: improve duplicate model ID notifications - Add toast notification when trying to add duplicate model - Allow free typing when editing model ID, validate on blur - Show warning toast instead of blocking input * fix: improve duplicate model validation UX in config dialog - Add inline error display for duplicate model IDs - Show red border on input when error exists - Validate on blur with shake animation for edit errors - Prevent saving empty model names - Clear errors when user starts typing - Simplify error styling (small red text, no heavy chips)
This commit is contained in:
@@ -33,6 +33,11 @@ export interface ClientOverrides {
|
||||
baseUrl?: string | null
|
||||
apiKey?: string | null
|
||||
modelId?: string | null
|
||||
// AWS Bedrock credentials
|
||||
awsAccessKeyId?: string | null
|
||||
awsSecretAccessKey?: string | null
|
||||
awsRegion?: string | null
|
||||
awsSessionToken?: string | null
|
||||
}
|
||||
|
||||
// Providers that can be used with client-provided API keys
|
||||
@@ -41,6 +46,7 @@ const ALLOWED_CLIENT_PROVIDERS: ProviderName[] = [
|
||||
"anthropic",
|
||||
"google",
|
||||
"azure",
|
||||
"bedrock",
|
||||
"openrouter",
|
||||
"deepseek",
|
||||
"siliconflow",
|
||||
@@ -537,12 +543,25 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
|
||||
|
||||
switch (provider) {
|
||||
case "bedrock": {
|
||||
// Use credential provider chain for IAM role support (Lambda, EC2, etc.)
|
||||
// Falls back to env vars (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) for local dev
|
||||
const bedrockProvider = createAmazonBedrock({
|
||||
region: process.env.AWS_REGION || "us-west-2",
|
||||
credentialProvider: fromNodeProviderChain(),
|
||||
})
|
||||
// Use client-provided credentials if available, otherwise fall back to IAM/env vars
|
||||
const hasClientCredentials =
|
||||
overrides?.awsAccessKeyId && overrides?.awsSecretAccessKey
|
||||
const bedrockRegion =
|
||||
overrides?.awsRegion || process.env.AWS_REGION || "us-west-2"
|
||||
|
||||
const bedrockProvider = hasClientCredentials
|
||||
? createAmazonBedrock({
|
||||
region: bedrockRegion,
|
||||
accessKeyId: overrides.awsAccessKeyId!,
|
||||
secretAccessKey: overrides.awsSecretAccessKey!,
|
||||
...(overrides?.awsSessionToken && {
|
||||
sessionToken: overrides.awsSessionToken,
|
||||
}),
|
||||
})
|
||||
: createAmazonBedrock({
|
||||
region: bedrockRegion,
|
||||
credentialProvider: fromNodeProviderChain(),
|
||||
})
|
||||
model = bedrockProvider(modelId)
|
||||
// Add Anthropic beta options if using Claude models via Bedrock
|
||||
if (modelId.includes("anthropic.claude")) {
|
||||
|
||||
@@ -180,5 +180,24 @@
|
||||
"seekingSponsorship": "Call for Sponsorship",
|
||||
"contactMe": "Contact Me",
|
||||
"usageNotice": "Due to high usage, I have changed the model from Claude to minimax-m2 and added some usage limits. See About page for details."
|
||||
},
|
||||
"modelConfig": {
|
||||
"title": "AI Model Configuration",
|
||||
"description": "Configure multiple AI providers and models",
|
||||
"configure": "Configure",
|
||||
"addProvider": "Add Provider",
|
||||
"addModel": "Add Model",
|
||||
"modelId": "Model ID",
|
||||
"modelLabel": "Display Label",
|
||||
"streaming": "Enable Streaming",
|
||||
"deleteProvider": "Delete Provider",
|
||||
"deleteModel": "Delete Model",
|
||||
"noModels": "No models configured. Add a model to get started.",
|
||||
"selectProvider": "Select a provider or add a new one",
|
||||
"configureMultiple": "Configure multiple AI providers and switch between them easily",
|
||||
"apiKeyStored": "API keys are stored locally in your browser",
|
||||
"test": "Test",
|
||||
"validationError": "Validation failed",
|
||||
"addModelFirst": "Add at least one model to validate"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,5 +180,24 @@
|
||||
"seekingSponsorship": "スポンサー募集",
|
||||
"contactMe": "お問い合わせ",
|
||||
"usageNotice": "利用量の増加に伴い、コスト削減のためモデルを Claude から minimax-m2 に変更し、いくつかの利用制限を設けました。詳細は概要ページをご覧ください。"
|
||||
},
|
||||
"modelConfig": {
|
||||
"title": "AIモデル設定",
|
||||
"description": "複数のAIプロバイダーとモデルを設定",
|
||||
"configure": "設定",
|
||||
"addProvider": "プロバイダーを追加",
|
||||
"addModel": "モデルを追加",
|
||||
"modelId": "モデルID",
|
||||
"modelLabel": "表示名",
|
||||
"streaming": "ストリーミングを有効",
|
||||
"deleteProvider": "プロバイダーを削除",
|
||||
"deleteModel": "モデルを削除",
|
||||
"noModels": "モデルが設定されていません。モデルを追加してください。",
|
||||
"selectProvider": "プロバイダーを選択または追加してください",
|
||||
"configureMultiple": "複数のAIプロバイダーを設定して簡単に切り替え",
|
||||
"apiKeyStored": "APIキーはブラウザにローカル保存されます",
|
||||
"test": "テスト",
|
||||
"validationError": "検証に失敗しました",
|
||||
"addModelFirst": "検証するには少なくとも1つのモデルを追加してください"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,5 +180,24 @@
|
||||
"seekingSponsorship": "寻求赞助(求大佬捞一把)",
|
||||
"contactMe": "联系我",
|
||||
"usageNotice": "由于使用量过高,我已将模型从 Claude 更换为 minimax-m2,并设置了一些用量限制。详情请查看关于页面。"
|
||||
},
|
||||
"modelConfig": {
|
||||
"title": "AI 模型配置",
|
||||
"description": "配置多个 AI 提供商和模型",
|
||||
"configure": "配置",
|
||||
"addProvider": "添加提供商",
|
||||
"addModel": "添加模型",
|
||||
"modelId": "模型 ID",
|
||||
"modelLabel": "显示名称",
|
||||
"streaming": "启用流式输出",
|
||||
"deleteProvider": "删除提供商",
|
||||
"deleteModel": "删除模型",
|
||||
"noModels": "尚未配置模型。添加模型以开始使用。",
|
||||
"selectProvider": "选择一个提供商或添加新的",
|
||||
"configureMultiple": "配置多个 AI 提供商并轻松切换",
|
||||
"apiKeyStored": "API 密钥存储在您的浏览器本地",
|
||||
"test": "测试",
|
||||
"validationError": "验证失败",
|
||||
"addModelFirst": "请先添加至少一个模型以进行验证"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,4 +24,8 @@ export const STORAGE_KEYS = {
|
||||
aiBaseUrl: "next-ai-draw-io-ai-base-url",
|
||||
aiApiKey: "next-ai-draw-io-ai-api-key",
|
||||
aiModel: "next-ai-draw-io-ai-model",
|
||||
|
||||
// Multi-model configuration
|
||||
modelConfigs: "next-ai-draw-io-model-configs",
|
||||
selectedModelId: "next-ai-draw-io-selected-model-id",
|
||||
} as const
|
||||
|
||||
277
lib/types/model-config.ts
Normal file
277
lib/types/model-config.ts
Normal file
@@ -0,0 +1,277 @@
|
||||
// Types for multi-provider model configuration
|
||||
|
||||
export type ProviderName =
|
||||
| "openai"
|
||||
| "anthropic"
|
||||
| "google"
|
||||
| "azure"
|
||||
| "bedrock"
|
||||
| "openrouter"
|
||||
| "deepseek"
|
||||
| "siliconflow"
|
||||
| "gateway"
|
||||
|
||||
// Individual model configuration
|
||||
export interface ModelConfig {
|
||||
id: string // UUID for this model
|
||||
modelId: string // e.g., "gpt-4o", "claude-sonnet-4-5"
|
||||
validated?: boolean // Has this model been validated
|
||||
validationError?: string // Error message if validation failed
|
||||
}
|
||||
|
||||
// Provider configuration
|
||||
export interface ProviderConfig {
|
||||
id: string // UUID for this provider config
|
||||
provider: ProviderName
|
||||
name?: string // Custom display name (e.g., "OpenAI Production")
|
||||
apiKey: string
|
||||
baseUrl?: string
|
||||
// AWS Bedrock specific fields
|
||||
awsAccessKeyId?: string
|
||||
awsSecretAccessKey?: string
|
||||
awsRegion?: string
|
||||
awsSessionToken?: string // Optional, for temporary credentials
|
||||
models: ModelConfig[]
|
||||
validated?: boolean // Has API key been validated
|
||||
}
|
||||
|
||||
// The complete multi-model configuration
|
||||
export interface MultiModelConfig {
|
||||
version: 1
|
||||
providers: ProviderConfig[]
|
||||
selectedModelId?: string // Currently selected model's UUID
|
||||
}
|
||||
|
||||
// Flattened model for dropdown display
|
||||
export interface FlattenedModel {
|
||||
id: string // Model config UUID
|
||||
modelId: string // Actual model ID
|
||||
provider: ProviderName
|
||||
providerLabel: string // Provider display name
|
||||
apiKey: string
|
||||
baseUrl?: string
|
||||
// AWS Bedrock specific fields
|
||||
awsAccessKeyId?: string
|
||||
awsSecretAccessKey?: string
|
||||
awsRegion?: string
|
||||
awsSessionToken?: string
|
||||
validated?: boolean // Has this model been validated
|
||||
}
|
||||
|
||||
// Provider metadata
|
||||
export const PROVIDER_INFO: Record<
|
||||
ProviderName,
|
||||
{ label: string; defaultBaseUrl?: string }
|
||||
> = {
|
||||
openai: { label: "OpenAI" },
|
||||
anthropic: {
|
||||
label: "Anthropic",
|
||||
defaultBaseUrl: "https://api.anthropic.com/v1",
|
||||
},
|
||||
google: { label: "Google" },
|
||||
azure: { label: "Azure OpenAI" },
|
||||
bedrock: { label: "Amazon Bedrock" },
|
||||
openrouter: { label: "OpenRouter" },
|
||||
deepseek: { label: "DeepSeek" },
|
||||
siliconflow: {
|
||||
label: "SiliconFlow",
|
||||
defaultBaseUrl: "https://api.siliconflow.com/v1",
|
||||
},
|
||||
gateway: { label: "AI Gateway" },
|
||||
}
|
||||
|
||||
// Suggested models per provider for quick add
|
||||
export const SUGGESTED_MODELS: Record<ProviderName, string[]> = {
|
||||
openai: [
|
||||
// GPT-4o series (latest)
|
||||
"gpt-4o",
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-2024-11-20",
|
||||
// GPT-4 Turbo
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-preview",
|
||||
// o1/o3 reasoning models
|
||||
"o1",
|
||||
"o1-mini",
|
||||
"o1-preview",
|
||||
"o3-mini",
|
||||
// GPT-4
|
||||
"gpt-4",
|
||||
// GPT-3.5
|
||||
"gpt-3.5-turbo",
|
||||
],
|
||||
anthropic: [
|
||||
// Claude 4.5 series (latest)
|
||||
"claude-opus-4-5-20250514",
|
||||
"claude-sonnet-4-5-20250514",
|
||||
// Claude 4 series
|
||||
"claude-opus-4-20250514",
|
||||
"claude-sonnet-4-20250514",
|
||||
// Claude 3.7 series
|
||||
"claude-3-7-sonnet-20250219",
|
||||
// Claude 3.5 series
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-haiku-20241022",
|
||||
// Claude 3 series
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
],
|
||||
google: [
|
||||
// Gemini 2.5 series
|
||||
"gemini-2.5-pro",
|
||||
"gemini-2.5-flash",
|
||||
"gemini-2.5-flash-preview-05-20",
|
||||
// Gemini 2.0 series
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-lite",
|
||||
// Gemini 1.5 series
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-flash",
|
||||
// Legacy
|
||||
"gemini-pro",
|
||||
],
|
||||
azure: ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4", "gpt-35-turbo"],
|
||||
bedrock: [
|
||||
// Anthropic Claude
|
||||
"anthropic.claude-opus-4-5-20250514-v1:0",
|
||||
"anthropic.claude-sonnet-4-5-20250514-v1:0",
|
||||
"anthropic.claude-opus-4-20250514-v1:0",
|
||||
"anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
"anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
"anthropic.claude-3-opus-20240229-v1:0",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0",
|
||||
// Amazon Nova
|
||||
"amazon.nova-pro-v1:0",
|
||||
"amazon.nova-lite-v1:0",
|
||||
"amazon.nova-micro-v1:0",
|
||||
// Meta Llama
|
||||
"meta.llama3-3-70b-instruct-v1:0",
|
||||
"meta.llama3-1-405b-instruct-v1:0",
|
||||
"meta.llama3-1-70b-instruct-v1:0",
|
||||
// Mistral
|
||||
"mistral.mistral-large-2411-v1:0",
|
||||
"mistral.mistral-small-2503-v1:0",
|
||||
],
|
||||
openrouter: [
|
||||
// Anthropic
|
||||
"anthropic/claude-sonnet-4",
|
||||
"anthropic/claude-opus-4",
|
||||
"anthropic/claude-3.5-sonnet",
|
||||
"anthropic/claude-3.5-haiku",
|
||||
// OpenAI
|
||||
"openai/gpt-4o",
|
||||
"openai/gpt-4o-mini",
|
||||
"openai/o1",
|
||||
"openai/o3-mini",
|
||||
// Google
|
||||
"google/gemini-2.5-pro",
|
||||
"google/gemini-2.5-flash",
|
||||
"google/gemini-2.0-flash-exp:free",
|
||||
// Meta Llama
|
||||
"meta-llama/llama-3.3-70b-instruct",
|
||||
"meta-llama/llama-3.1-405b-instruct",
|
||||
"meta-llama/llama-3.1-70b-instruct",
|
||||
// DeepSeek
|
||||
"deepseek/deepseek-chat",
|
||||
"deepseek/deepseek-r1",
|
||||
// Qwen
|
||||
"qwen/qwen-2.5-72b-instruct",
|
||||
],
|
||||
deepseek: ["deepseek-chat", "deepseek-reasoner", "deepseek-coder"],
|
||||
siliconflow: [
|
||||
// DeepSeek
|
||||
"deepseek-ai/DeepSeek-V3",
|
||||
"deepseek-ai/DeepSeek-R1",
|
||||
"deepseek-ai/DeepSeek-V2.5",
|
||||
// Qwen
|
||||
"Qwen/Qwen2.5-72B-Instruct",
|
||||
"Qwen/Qwen2.5-32B-Instruct",
|
||||
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
||||
"Qwen/Qwen2.5-7B-Instruct",
|
||||
"Qwen/Qwen2-VL-72B-Instruct",
|
||||
],
|
||||
gateway: [
|
||||
"openai/gpt-4o",
|
||||
"openai/gpt-4o-mini",
|
||||
"anthropic/claude-sonnet-4-5",
|
||||
"anthropic/claude-3-5-sonnet",
|
||||
"google/gemini-2.0-flash",
|
||||
],
|
||||
}
|
||||
|
||||
// Helper to generate UUID
|
||||
export function generateId(): string {
|
||||
return `${Date.now()}-${Math.random().toString(36).slice(2, 9)}`
|
||||
}
|
||||
|
||||
// Create empty config
|
||||
export function createEmptyConfig(): MultiModelConfig {
|
||||
return {
|
||||
version: 1,
|
||||
providers: [],
|
||||
selectedModelId: undefined,
|
||||
}
|
||||
}
|
||||
|
||||
// Create new provider config
|
||||
export function createProviderConfig(provider: ProviderName): ProviderConfig {
|
||||
return {
|
||||
id: generateId(),
|
||||
provider,
|
||||
apiKey: "",
|
||||
baseUrl: PROVIDER_INFO[provider].defaultBaseUrl,
|
||||
models: [],
|
||||
validated: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Create new model config
|
||||
export function createModelConfig(modelId: string): ModelConfig {
|
||||
return {
|
||||
id: generateId(),
|
||||
modelId,
|
||||
}
|
||||
}
|
||||
|
||||
// Get all models as flattened list for dropdown
|
||||
export function flattenModels(config: MultiModelConfig): FlattenedModel[] {
|
||||
const models: FlattenedModel[] = []
|
||||
|
||||
for (const provider of config.providers) {
|
||||
// Use custom name if provided, otherwise use default provider label
|
||||
const providerLabel =
|
||||
provider.name || PROVIDER_INFO[provider.provider].label
|
||||
|
||||
for (const model of provider.models) {
|
||||
models.push({
|
||||
id: model.id,
|
||||
modelId: model.modelId,
|
||||
provider: provider.provider,
|
||||
providerLabel,
|
||||
apiKey: provider.apiKey,
|
||||
baseUrl: provider.baseUrl,
|
||||
// AWS Bedrock fields
|
||||
awsAccessKeyId: provider.awsAccessKeyId,
|
||||
awsSecretAccessKey: provider.awsSecretAccessKey,
|
||||
awsRegion: provider.awsRegion,
|
||||
awsSessionToken: provider.awsSessionToken,
|
||||
validated: model.validated,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return models
|
||||
}
|
||||
|
||||
// Find model by ID
|
||||
export function findModelById(
|
||||
config: MultiModelConfig,
|
||||
modelId: string,
|
||||
): FlattenedModel | undefined {
|
||||
return flattenModels(config).find((m) => m.id === modelId)
|
||||
}
|
||||
Reference in New Issue
Block a user