mirror of
https://github.com/DayuanJiang/next-ai-draw-io.git
synced 2026-01-02 14:22:28 +08:00
fix: detect models that don't support image input and return clear error
Some models (Kimi K2, DeepSeek, Qwen text models) don't support image/vision input. The AI SDK silently drops unsupported image parts, causing confusing responses where the model acts as if no image was uploaded. Added supportsImageInput() function to detect unsupported models by name, and return a 400 error with clear guidance when users try to upload images to these models. Closes #469
This commit is contained in:
@@ -12,7 +12,11 @@ import fs from "fs/promises"
|
|||||||
import { jsonrepair } from "jsonrepair"
|
import { jsonrepair } from "jsonrepair"
|
||||||
import path from "path"
|
import path from "path"
|
||||||
import { z } from "zod"
|
import { z } from "zod"
|
||||||
import { getAIModel, supportsPromptCaching } from "@/lib/ai-providers"
|
import {
|
||||||
|
getAIModel,
|
||||||
|
supportsImageInput,
|
||||||
|
supportsPromptCaching,
|
||||||
|
} from "@/lib/ai-providers"
|
||||||
import { findCachedResponse } from "@/lib/cached-responses"
|
import { findCachedResponse } from "@/lib/cached-responses"
|
||||||
import {
|
import {
|
||||||
checkAndIncrementRequest,
|
checkAndIncrementRequest,
|
||||||
@@ -295,6 +299,17 @@ async function handleChatRequest(req: Request): Promise<Response> {
|
|||||||
lastUserMessage?.parts?.filter((part: any) => part.type === "file") ||
|
lastUserMessage?.parts?.filter((part: any) => part.type === "file") ||
|
||||||
[]
|
[]
|
||||||
|
|
||||||
|
// Check if user is sending images to a model that doesn't support them
|
||||||
|
// AI SDK silently drops unsupported parts, so we need to catch this early
|
||||||
|
if (fileParts.length > 0 && !supportsImageInput(modelId)) {
|
||||||
|
return Response.json(
|
||||||
|
{
|
||||||
|
error: `The model "${modelId}" does not support image input. Please use a vision-capable model (e.g., GPT-4o, Claude, Gemini) or remove the image.`,
|
||||||
|
},
|
||||||
|
{ status: 400 },
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// User input only - XML is now in a separate cached system message
|
// User input only - XML is now in a separate cached system message
|
||||||
const formattedUserInput = `User input:
|
const formattedUserInput = `User input:
|
||||||
"""md
|
"""md
|
||||||
|
|||||||
@@ -906,3 +906,34 @@ export function supportsPromptCaching(modelId: string): boolean {
|
|||||||
modelId.startsWith("eu.anthropic")
|
modelId.startsWith("eu.anthropic")
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a model supports image/vision input.
|
||||||
|
* Some models silently drop image parts without error (AI SDK warning only).
|
||||||
|
*/
|
||||||
|
export function supportsImageInput(modelId: string): boolean {
|
||||||
|
const lowerModelId = modelId.toLowerCase()
|
||||||
|
|
||||||
|
// Helper to check if model has vision capability indicator
|
||||||
|
const hasVisionIndicator =
|
||||||
|
lowerModelId.includes("vision") || lowerModelId.includes("vl")
|
||||||
|
|
||||||
|
// Models that DON'T support image/vision input (unless vision variant)
|
||||||
|
// Kimi K2 models don't support images
|
||||||
|
if (lowerModelId.includes("kimi") && !hasVisionIndicator) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepSeek text models (not vision variants)
|
||||||
|
if (lowerModelId.includes("deepseek") && !hasVisionIndicator) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Qwen text models (not vision variants like qwen-vl)
|
||||||
|
if (lowerModelId.includes("qwen") && !hasVisionIndicator) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default: assume model supports images
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user