From 72d647de7aae0e533d5467714c3075979c363883 Mon Sep 17 00:00:00 2001 From: Dayuan Jiang <34411969+DayuanJiang@users.noreply.github.com> Date: Tue, 23 Dec 2025 20:29:48 +0900 Subject: [PATCH] fix: use Chat Completions API for OpenAI-compatible proxies (#382) Third-party OpenAI-compatible proxies typically don't support the /responses endpoint. Use .chat() for custom baseURLs while keeping Responses API for official OpenAI to preserve reasoning model support. Fixes #377 --- lib/ai-providers.ts | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/lib/ai-providers.ts b/lib/ai-providers.ts index da93069..eddfe7a 100644 --- a/lib/ai-providers.ts +++ b/lib/ai-providers.ts @@ -588,13 +588,15 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig { case "openai": { const apiKey = overrides?.apiKey || process.env.OPENAI_API_KEY const baseURL = overrides?.baseUrl || process.env.OPENAI_BASE_URL - if (baseURL || overrides?.apiKey) { - const customOpenAI = createOpenAI({ - apiKey, - ...(baseURL && { baseURL }), - }) - // Use Responses API (default) instead of .chat() to support reasoning - // for gpt-5, o1, o3, o4 models. Chat Completions API does not emit reasoning events. + if (baseURL) { + // Custom base URL = third-party proxy, use Chat Completions API + // for compatibility (most proxies don't support /responses endpoint) + const customOpenAI = createOpenAI({ apiKey, baseURL }) + model = customOpenAI.chat(modelId) + } else if (overrides?.apiKey) { + // Custom API key but official OpenAI endpoint, use Responses API + // to support reasoning for gpt-5, o1, o3, o4 models + const customOpenAI = createOpenAI({ apiKey }) model = customOpenAI(modelId) } else { model = openai(modelId)