fix: use Chat Completions API for OpenAI-compatible proxies (#382)

Third-party OpenAI-compatible proxies typically don't support the
/responses endpoint. Use .chat() for custom baseURLs while keeping
Responses API for official OpenAI to preserve reasoning model support.

Fixes #377
This commit is contained in:
Dayuan Jiang
2025-12-23 20:29:48 +09:00
committed by GitHub
parent c6b0e5ac62
commit 72d647de7a

View File

@@ -588,13 +588,15 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
case "openai": { case "openai": {
const apiKey = overrides?.apiKey || process.env.OPENAI_API_KEY const apiKey = overrides?.apiKey || process.env.OPENAI_API_KEY
const baseURL = overrides?.baseUrl || process.env.OPENAI_BASE_URL const baseURL = overrides?.baseUrl || process.env.OPENAI_BASE_URL
if (baseURL || overrides?.apiKey) { if (baseURL) {
const customOpenAI = createOpenAI({ // Custom base URL = third-party proxy, use Chat Completions API
apiKey, // for compatibility (most proxies don't support /responses endpoint)
...(baseURL && { baseURL }), const customOpenAI = createOpenAI({ apiKey, baseURL })
}) model = customOpenAI.chat(modelId)
// Use Responses API (default) instead of .chat() to support reasoning } else if (overrides?.apiKey) {
// for gpt-5, o1, o3, o4 models. Chat Completions API does not emit reasoning events. // Custom API key but official OpenAI endpoint, use Responses API
// to support reasoning for gpt-5, o1, o3, o4 models
const customOpenAI = createOpenAI({ apiKey })
model = customOpenAI(modelId) model = customOpenAI(modelId)
} else { } else {
model = openai(modelId) model = openai(modelId)