mirror of
https://github.com/DayuanJiang/next-ai-draw-io.git
synced 2026-01-02 22:32:27 +08:00
feat: add bring-your-own-API-key support (#186)
- Add AI provider settings to config panel (provider, model, API key, base URL) - Support 7 providers: OpenAI, Anthropic, Google, Azure, OpenRouter, DeepSeek, SiliconFlow - Client API keys stored in localStorage, never stored on server - Client settings override server env vars when provided - Skip server credential validation when client provides API key - Bypass usage limits (request/token/TPM) when using own API key - Add /api/config endpoint for fetching usage limits - Add privacy notices to settings dialog, about pages, and quota toast - Add clear settings button to reset saved API keys - Update README files (EN/CN/JA) with BYOK documentation Co-authored-by: dayuan.jiang <jiangdy@amazon.co.jp>
This commit is contained in:
@@ -199,8 +199,17 @@ async function handleChatRequest(req: Request): Promise<Response> {
|
||||
}
|
||||
// === CACHE CHECK END ===
|
||||
|
||||
// Get AI model from environment configuration
|
||||
const { model, providerOptions, headers, modelId } = getAIModel()
|
||||
// Read client AI provider overrides from headers
|
||||
const clientOverrides = {
|
||||
provider: req.headers.get("x-ai-provider"),
|
||||
baseUrl: req.headers.get("x-ai-base-url"),
|
||||
apiKey: req.headers.get("x-ai-api-key"),
|
||||
modelId: req.headers.get("x-ai-model"),
|
||||
}
|
||||
|
||||
// Get AI model with optional client overrides
|
||||
const { model, providerOptions, headers, modelId } =
|
||||
getAIModel(clientOverrides)
|
||||
|
||||
// Check if model supports prompt caching
|
||||
const shouldCache = supportsPromptCaching(modelId)
|
||||
|
||||
Reference in New Issue
Block a user