From 4cd78dc5615f51ca4a2856f359fb16901d7f460c Mon Sep 17 00:00:00 2001 From: Dayuan Jiang <34411969+DayuanJiang@users.noreply.github.com> Date: Fri, 5 Dec 2025 20:18:19 +0900 Subject: [PATCH] chore: remove complex 503 error handling code (#102) - Remove 15s streaming timeout detection (too slow, added complexity) - Remove status indicator (issue resolved by switching model) - Remove streamingError state and related refs - Simplify onFinish callback (remove 503 detection logging) - Remove errorHandler function (use default AI SDK errors) The real fix was switching from global.* to us.* Bedrock model. This removes ~134 lines of unnecessary complexity. --- app/api/chat/route.ts | 68 ++------------------------------ components/chat-panel.tsx | 82 ++------------------------------------- 2 files changed, 8 insertions(+), 142 deletions(-) diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index 374c984..c9d01de 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -191,28 +191,9 @@ ${lastMessageText} messages: allMessages, ...(providerOptions && { providerOptions }), ...(headers && { headers }), - onFinish: ({ usage, providerMetadata, finishReason, text, toolCalls }) => { - // Detect potential mid-stream failures (e.g., Bedrock 503 ServiceUnavailableException) - // When this happens, usage is empty and providerMetadata is undefined - const hasUsage = usage && Object.keys(usage).length > 0; - if (!hasUsage) { - console.error('[Stream Error] Empty usage detected - possible Bedrock 503 or mid-stream failure'); - console.error('[Stream Error] finishReason:', finishReason); - console.error('[Stream Error] text received:', text?.substring(0, 200) || '(none)'); - console.error('[Stream Error] toolCalls:', toolCalls?.length || 0); - // Log the user's last message for debugging - const lastUserMsg = enhancedMessages.filter(m => m.role === 'user').pop(); - if (lastUserMsg) { - const content = lastUserMsg.content; - const preview = Array.isArray(content) - ? (content.find((c) => c.type === 'text') as { type: 'text'; text: string } | undefined)?.text?.substring(0, 100) - : String(content).substring(0, 100); - console.error('[Stream Error] Last user message preview:', preview); - } - } else { - console.log('[Cache] Full providerMetadata:', JSON.stringify(providerMetadata, null, 2)); - console.log('[Cache] Usage:', JSON.stringify(usage, null, 2)); - } + onFinish: ({ usage, providerMetadata }) => { + console.log('[Cache] providerMetadata:', JSON.stringify(providerMetadata, null, 2)); + console.log('[Cache] Usage:', JSON.stringify(usage, null, 2)); }, tools: { // Client-side tool that will be executed on the client @@ -276,48 +257,7 @@ IMPORTANT: Keep edits concise: temperature: 0, }); - // Error handler function to provide detailed error messages - function errorHandler(error: unknown) { - if (error == null) { - return 'unknown error'; - } - - const errorString = typeof error === 'string' - ? error - : error instanceof Error - ? error.message - : JSON.stringify(error); - - // Check for Bedrock service errors (503, throttling, etc.) - if (errorString.includes('ServiceUnavailable') || - errorString.includes('503') || - errorString.includes('temporarily unavailable')) { - console.error('[Bedrock Error] ServiceUnavailableException:', errorString); - return 'The AI service is temporarily unavailable. Please try again in a few seconds.'; - } - - // Check for throttling errors - if (errorString.includes('ThrottlingException') || - errorString.includes('rate limit') || - errorString.includes('too many requests') || - errorString.includes('429')) { - console.error('[Bedrock Error] ThrottlingException:', errorString); - return 'Too many requests. Please wait a moment and try again.'; - } - - // Check for image not supported error (e.g., DeepSeek models) - if (errorString.includes('image_url') || - errorString.includes('unknown variant') || - (errorString.includes('image') && errorString.includes('not supported'))) { - return 'This model does not support image inputs. Please remove the image and try again, or switch to a vision-capable model.'; - } - - return errorString; - } - - return result.toUIMessageStreamResponse({ - onError: errorHandler, - }); + return result.toUIMessageStreamResponse(); } export async function POST(req: Request) { diff --git a/components/chat-panel.tsx b/components/chat-panel.tsx index 0bdbbfe..6c9b6f5 100644 --- a/components/chat-panel.tsx +++ b/components/chat-panel.tsx @@ -4,7 +4,7 @@ import type React from "react"; import { useRef, useEffect, useState } from "react"; import { flushSync } from "react-dom"; import { FaGithub } from "react-icons/fa"; -import { PanelRightClose, PanelRightOpen, CheckCircle } from "lucide-react"; +import { PanelRightClose, PanelRightOpen } from "lucide-react"; import Link from "next/link"; import Image from "next/image"; @@ -62,7 +62,6 @@ export default function ChatPanel({ const [files, setFiles] = useState([]); const [showHistory, setShowHistory] = useState(false); const [input, setInput] = useState(""); - const [streamingError, setStreamingError] = useState(null); // Store XML snapshots for each user message (keyed by message index) const xmlSnapshotsRef = useRef>(new Map()); @@ -80,7 +79,6 @@ export default function ChatPanel({ status, error, setMessages, - stop, } = useChat({ transport: new DefaultChatTransport({ api: "/api/chat", @@ -168,68 +166,9 @@ Please retry with an adjusted search pattern or use display_diagram if retries a }, onError: (error) => { console.error("Chat error:", error); - setStreamingError(error); }, }); - // Streaming timeout detection - detects when stream stalls mid-response (e.g., Bedrock 503) - // This catches cases where onError doesn't fire because headers were already sent - const lastMessageCountRef = useRef(0); - const lastMessagePartsRef = useRef(0); - - useEffect(() => { - // Clear streaming error when status changes to ready - if (status === "ready") { - setStreamingError(null); - lastMessageCountRef.current = 0; - lastMessagePartsRef.current = 0; - return; - } - - if (status !== "streaming") return; - - const STALL_TIMEOUT_MS = 15000; // 15 seconds without any update - - // Capture current state BEFORE setting timeout - // This way we compare against values at the time timeout was set - const currentPartsCount = messages.reduce( - (acc, msg) => acc + (msg.parts?.length || 0), - 0 - ); - const capturedMessageCount = messages.length; - const capturedPartsCount = currentPartsCount; - - // Update refs immediately so next effect run has fresh values - lastMessageCountRef.current = messages.length; - lastMessagePartsRef.current = currentPartsCount; - - const timeoutId = setTimeout(() => { - // Re-count parts at timeout time - const newPartsCount = messages.reduce( - (acc, msg) => acc + (msg.parts?.length || 0), - 0 - ); - - // If no change since timeout was set, stream has stalled - if ( - messages.length === capturedMessageCount && - newPartsCount === capturedPartsCount - ) { - console.error( - "[Streaming Timeout] No activity for 15s - forcing error state" - ); - setStreamingError( - new Error( - "Connection lost. The AI service may be temporarily unavailable. Please try again." - ) - ); - stop(); // Allow user to retry by transitioning status to "ready" - } - }, STALL_TIMEOUT_MS); - - return () => clearTimeout(timeoutId); - }, [status, messages, stop]); - const messagesEndRef = useRef(null); useEffect(() => { @@ -240,13 +179,8 @@ Please retry with an adjusted search pattern or use display_diagram if retries a const onFormSubmit = async (e: React.FormEvent) => { e.preventDefault(); - // Allow retry if there's a streaming error (workaround for stop() not transitioning status) - const isProcessing = - (status === "streaming" || status === "submitted") && - !streamingError; + const isProcessing = status === "streaming" || status === "submitted"; if (input.trim() && !isProcessing) { - // Clear any previous streaming error before starting new request - setStreamingError(null); try { let chartXml = await onFetchChart(); chartXml = formatXML(chartXml); @@ -476,14 +410,6 @@ Please retry with an adjusted search pattern or use display_diagram if retries a > About - - -